kenken999 commited on
Commit
abfee45
1 Parent(s): 73eedaf

create vector functin and insert products function

Browse files
Files changed (1) hide show
  1. babyagi/babyagi.py +72 -6
babyagi/babyagi.py CHANGED
@@ -54,11 +54,57 @@ JOIN_EXISTING_OBJECTIVE = False
54
  # Goal configuration
55
  #OBJECTIVE = os.getenv("OBJECTIVE", "")
56
  OBJECTIVE = "ボットの性能をよくする方法 日本語で説明"
 
 
57
  INITIAL_TASK = os.getenv("INITIAL_TASK", os.getenv("FIRST_TASK", ""))
58
 
59
  # Model configuration
60
  OPENAI_TEMPERATURE = float(os.getenv("OPENAI_TEMPERATURE", 0.0))
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  # Extensions support begin
64
 
@@ -231,7 +277,7 @@ class DefaultResultsStorage:
231
  # Break the function if LLM_MODEL starts with "human" (case-insensitive)
232
  if LLM_MODEL.startswith("human"):
233
  return
234
- return
235
  #from langchain_community.chat_models import ChatOpenAI
236
  # Continue with the rest of the function
237
  #llm_embed = ChatOpenAI(model_name="lama3-70b-8192",
@@ -244,10 +290,26 @@ class DefaultResultsStorage:
244
  #response = openai.embeddings.create(input=result,
245
  # model="lama3-70b-8192",
246
  #
247
- inputs = tokenizer(result, return_tensors="pt")
248
  outputs = model(**inputs)
249
  # [CLS]トークンの出力を取得
250
  embeddings = outputs.last_hidden_state[:,0,:].squeeze().detach().cpu().numpy().tolist()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
  #cls_embedding = outputs.last_hidden_state[:, 0, :].squeeze()
252
  # テンソルが CPU 上にあることを確認し、NumPy 配列に変換
253
  #cls_embedding_np = cls_embedding.detach().cpu().numpy()
@@ -374,14 +436,18 @@ def openai_call(
374
  max_tokens: int = 100,
375
  ):
376
  while True:
 
377
  messages=[
378
  {
379
  "role": "user",
380
  "content": "prompt"
381
  }
382
  ],
 
 
383
  client = Groq(api_key=os.getenv("api_key"))
384
  res = ""
 
385
  print(prompt)
386
  completion = client.chat.completions.create(
387
  model="llama3-8b-8192",
@@ -392,7 +458,7 @@ def openai_call(
392
  }
393
  ],
394
  temperature=1,
395
- max_tokens=1024,
396
  top_p=1,
397
  stream=True,
398
  stop=None,
@@ -644,11 +710,11 @@ def main():
644
  }
645
  # extract the actual result from the dictionary
646
  # since we don't do enrichment currently
647
- # vector = enriched_result["data"]
648
 
649
  result_id = f"result_{task['task_id']}"
650
 
651
- #results_storage.add(task, result, result_id)
652
 
653
  # Step 3: Create new tasks and re-prioritize task list
654
  # only the main instance in cooperative mode does that
@@ -671,7 +737,7 @@ def main():
671
  tasks_storage.replace(prioritized_tasks)
672
 
673
  # Sleep a bit before checking the task list again
674
- time.sleep(5)
675
  else:
676
  print('Done.')
677
  loop = False
 
54
  # Goal configuration
55
  #OBJECTIVE = os.getenv("OBJECTIVE", "")
56
  OBJECTIVE = "ボットの性能をよくする方法 日本語で説明"
57
+ OBJECTIVE = f"""チャットボットでの広告展開"""
58
+
59
  INITIAL_TASK = os.getenv("INITIAL_TASK", os.getenv("FIRST_TASK", ""))
60
 
61
  # Model configuration
62
  OPENAI_TEMPERATURE = float(os.getenv("OPENAI_TEMPERATURE", 0.0))
63
 
64
+ def create_vector():
65
+ inputs = tokenizer(result, return_tensors="pt", max_length=512, truncation=True)
66
+ outputs = model(**inputs)
67
+ # [CLS]トークンの出力を取得
68
+ embeddings = outputs.last_hidden_state[:,0,:].squeeze().detach().cpu().numpy().tolist()
69
+ print(embeddings)
70
+ import requests
71
+
72
+ url = "https://kenken999-php.hf.space/api/v1.php"
73
+
74
+ payload = f"""model_name={embeddings}&vector_text={result}&table=products&action=insert"""
75
+ headers = {
76
+ 'X-Auth-Token': 'admin',
77
+ 'Content-Type': 'application/x-www-form-urlencoded',
78
+ 'Cookie': 'runnerSession=muvclb78zpsdjbm7y9c3; pD1lszvk6ratOZhmmgvkp=13767810ebf0782b0b51bf72dedb63b3'
79
+ }
80
+
81
+ response = requests.request("POST", url, headers=headers, data=payload)
82
+
83
+ print(response.text)
84
+ return True
85
+
86
+ def insert_product():
87
+ inputs = tokenizer(result, return_tensors="pt", max_length=512, truncation=True)
88
+ outputs = model(**inputs)
89
+ # [CLS]トークンの出力を取得
90
+ embeddings = outputs.last_hidden_state[:,0,:].squeeze().detach().cpu().numpy().tolist()
91
+ print(embeddings)
92
+ import requests
93
+
94
+ url = "https://kenken999-php.hf.space/api/v1.php"
95
+
96
+ payload = f"""model_name={embeddings}&vector_text={result}&table=products&action=insert"""
97
+ headers = {
98
+ 'X-Auth-Token': 'admin',
99
+ 'Content-Type': 'application/x-www-form-urlencoded',
100
+ 'Cookie': 'runnerSession=muvclb78zpsdjbm7y9c3; pD1lszvk6ratOZhmmgvkp=13767810ebf0782b0b51bf72dedb63b3'
101
+ }
102
+
103
+ response = requests.request("POST", url, headers=headers, data=payload)
104
+
105
+ print(response.text)
106
+ return True
107
+
108
 
109
  # Extensions support begin
110
 
 
277
  # Break the function if LLM_MODEL starts with "human" (case-insensitive)
278
  if LLM_MODEL.startswith("human"):
279
  return
280
+ #return
281
  #from langchain_community.chat_models import ChatOpenAI
282
  # Continue with the rest of the function
283
  #llm_embed = ChatOpenAI(model_name="lama3-70b-8192",
 
290
  #response = openai.embeddings.create(input=result,
291
  # model="lama3-70b-8192",
292
  #
293
+ inputs = tokenizer(result, return_tensors="pt", max_length=512, truncation=True)
294
  outputs = model(**inputs)
295
  # [CLS]トークンの出力を取得
296
  embeddings = outputs.last_hidden_state[:,0,:].squeeze().detach().cpu().numpy().tolist()
297
+ print(embeddings)
298
+ import requests
299
+
300
+ url = "https://kenken999-php.hf.space/api/v1.php"
301
+
302
+ payload = f"""model_name={embeddings}&vector_text={result}&table=products&action=insert"""
303
+ headers = {
304
+ 'X-Auth-Token': 'admin',
305
+ 'Content-Type': 'application/x-www-form-urlencoded',
306
+ 'Cookie': 'runnerSession=muvclb78zpsdjbm7y9c3; pD1lszvk6ratOZhmmgvkp=13767810ebf0782b0b51bf72dedb63b3'
307
+ }
308
+
309
+ response = requests.request("POST", url, headers=headers, data=payload)
310
+
311
+ print(response.text)
312
+
313
  #cls_embedding = outputs.last_hidden_state[:, 0, :].squeeze()
314
  # テンソルが CPU 上にあることを確認し、NumPy 配列に変換
315
  #cls_embedding_np = cls_embedding.detach().cpu().numpy()
 
436
  max_tokens: int = 100,
437
  ):
438
  while True:
439
+ print("--------------------------------------------------------------------------------------")
440
  messages=[
441
  {
442
  "role": "user",
443
  "content": "prompt"
444
  }
445
  ],
446
+ print(prompt)
447
+ #return
448
  client = Groq(api_key=os.getenv("api_key"))
449
  res = ""
450
+ print("--------------------------------------------------------------------------------------")
451
  print(prompt)
452
  completion = client.chat.completions.create(
453
  model="llama3-8b-8192",
 
458
  }
459
  ],
460
  temperature=1,
461
+ max_tokens=4024,
462
  top_p=1,
463
  stream=True,
464
  stop=None,
 
710
  }
711
  # extract the actual result from the dictionary
712
  # since we don't do enrichment currently
713
+ vector = enriched_result["data"]
714
 
715
  result_id = f"result_{task['task_id']}"
716
 
717
+ results_storage.add(task, result, result_id)
718
 
719
  # Step 3: Create new tasks and re-prioritize task list
720
  # only the main instance in cooperative mode does that
 
737
  tasks_storage.replace(prioritized_tasks)
738
 
739
  # Sleep a bit before checking the task list again
740
+ time.sleep(15)
741
  else:
742
  print('Done.')
743
  loop = False