KingNish commited on
Commit
15584ad
1 Parent(s): 50e18af

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +6 -0
chatbot.py CHANGED
@@ -278,6 +278,7 @@ def model_inference( user_prompt, chat_history):
278
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
279
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
280
  message_groq.append({"role": "user", "content": f"[USER] {str(message_text)} , [WEB RESULTS] {str(web2)}"})
 
281
  stream = client_groq.chat.completions.create(model="llama-3.1-8b-instant", messages=message_groq, max_tokens=4096, stream=True)
282
  output = ""
283
  for chunk in stream:
@@ -334,6 +335,7 @@ def model_inference( user_prompt, chat_history):
334
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
335
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
336
  message_groq.append({"role": "user", "content": f"{str(message_text)}"})
 
337
  stream = client_groq.chat.completions.create(model="llama-3.1-70b-versatile", messages=message_groq, max_tokens=4096, stream=True)
338
  output = ""
339
  for chunk in stream:
@@ -350,6 +352,7 @@ def model_inference( user_prompt, chat_history):
350
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
351
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
352
  message_groq.append({"role": "user", "content": f"{str(message_text)}"})
 
353
  stream = client_groq.chat.completions.create(model="llama3-70b-8192", messages=message_groq, max_tokens=4096, stream=True)
354
  output = ""
355
  for chunk in stream:
@@ -380,6 +383,7 @@ def model_inference( user_prompt, chat_history):
380
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
381
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
382
  message_groq.append({"role": "user", "content": f"{str(message_text)}"})
 
383
  stream = client_groq.chat.completions.create(model="llama3-70b-8192", messages=message_groq, max_tokens=4096, stream=True)
384
  output = ""
385
  for chunk in stream:
@@ -396,6 +400,7 @@ def model_inference( user_prompt, chat_history):
396
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
397
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
398
  message_groq.append({"role": "user", "content": f"{str(message_text)}"})
 
399
  stream = client_groq.chat.completions.create(model="llama3-8b-8192", messages=message_groq, max_tokens=4096, stream=True)
400
  output = ""
401
  for chunk in stream:
@@ -425,6 +430,7 @@ def model_inference( user_prompt, chat_history):
425
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
426
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
427
  message_groq.append({"role": "user", "content": f"{str(message_text)}"})
 
428
  stream = client_groq.chat.completions.create(model="llama3-8b-8192", messages=message_groq, max_tokens=4096, stream=True)
429
  output = ""
430
  for chunk in stream:
 
278
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
279
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
280
  message_groq.append({"role": "user", "content": f"[USER] {str(message_text)} , [WEB RESULTS] {str(web2)}"})
281
+ # its meta-llama/Meta-Llama-3.1-8B-Instruct
282
  stream = client_groq.chat.completions.create(model="llama-3.1-8b-instant", messages=message_groq, max_tokens=4096, stream=True)
283
  output = ""
284
  for chunk in stream:
 
335
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
336
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
337
  message_groq.append({"role": "user", "content": f"{str(message_text)}"})
338
+ # its meta-llama/Meta-Llama-3.1-70B-Instruct
339
  stream = client_groq.chat.completions.create(model="llama-3.1-70b-versatile", messages=message_groq, max_tokens=4096, stream=True)
340
  output = ""
341
  for chunk in stream:
 
352
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
353
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
354
  message_groq.append({"role": "user", "content": f"{str(message_text)}"})
355
+ # its meta-llama/Meta-Llama-3-70B-Instruct
356
  stream = client_groq.chat.completions.create(model="llama3-70b-8192", messages=message_groq, max_tokens=4096, stream=True)
357
  output = ""
358
  for chunk in stream:
 
383
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
384
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
385
  message_groq.append({"role": "user", "content": f"{str(message_text)}"})
386
+ # its meta-llama/Meta-Llama-3-70B-Instruct
387
  stream = client_groq.chat.completions.create(model="llama3-70b-8192", messages=message_groq, max_tokens=4096, stream=True)
388
  output = ""
389
  for chunk in stream:
 
400
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
401
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
402
  message_groq.append({"role": "user", "content": f"{str(message_text)}"})
403
+ # its meta-llama/Meta-Llama-3-8B-Instruct
404
  stream = client_groq.chat.completions.create(model="llama3-8b-8192", messages=message_groq, max_tokens=4096, stream=True)
405
  output = ""
406
  for chunk in stream:
 
430
  message_groq.append({"role": "user", "content": f"{str(msg[0])}"})
431
  message_groq.append({"role": "assistant", "content": f"{str(msg[1])}"})
432
  message_groq.append({"role": "user", "content": f"{str(message_text)}"})
433
+ # its meta-llama/Meta-Llama-3-8B-Instruct
434
  stream = client_groq.chat.completions.create(model="llama3-8b-8192", messages=message_groq, max_tokens=4096, stream=True)
435
  output = ""
436
  for chunk in stream: