MefhigosetH commited on
Commit
0be597b
1 Parent(s): cdba2c8

Migramos a Groq

Browse files
Files changed (3) hide show
  1. Pipfile +1 -0
  2. app.py +11 -15
  3. requirements.txt +1 -4
Pipfile CHANGED
@@ -12,6 +12,7 @@ langchain-core = "*"
12
  transformers = "*"
13
  llama-cpp-python = "*"
14
  langchain-community = "*"
 
15
 
16
  [dev-packages]
17
 
 
12
  transformers = "*"
13
  llama-cpp-python = "*"
14
  langchain-community = "*"
15
+ langchain-groq = "*"
16
 
17
  [dev-packages]
18
 
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import gradio as gr
2
- from huggingface_hub import hf_hub_download
3
- from langchain_community.chat_models import ChatLlamaCpp
4
  from langchain_core.prompts import ChatPromptTemplate
5
  from langchain.globals import set_verbose, set_debug
6
  import os
@@ -31,20 +30,17 @@ def initLLM():
31
  """
32
  Inicializamos el modelo LLM.
33
 
34
- Otros modelos que podríamos usar:
35
- * bartowski/Llama-3.2-1B-Instruct-GGUF
36
- * HuggingFaceH4/zephyr-7b-beta
37
  """
38
-
39
- model_path = hf_hub_download(repo_id="bartowski/Llama-3.2-1B-Instruct-GGUF", filename="Llama-3.2-1B-Instruct-IQ4_XS.gguf")
40
-
41
- llm = ChatLlamaCpp(
42
- model_path=model_path,
43
- temperature=0.7,
44
- max_tokens=500,
45
- top_p=1,
46
- # callback_manager=callback_manager,
47
- # verbose=True, # Verbose is required to pass to the callback manager
48
  )
49
 
50
  return llm
 
1
  import gradio as gr
2
+ from langchain_groq import ChatGroq
 
3
  from langchain_core.prompts import ChatPromptTemplate
4
  from langchain.globals import set_verbose, set_debug
5
  import os
 
30
  """
31
  Inicializamos el modelo LLM.
32
 
33
+ Modelos:
34
+ * llama3-8b-8192
35
+ * mixtral-8x7b-32768
36
  """
37
+ llm = ChatGroq(
38
+ model="mixtral-8x7b-32768",
39
+ temperature=0,
40
+ max_tokens=None,
41
+ timeout=None,
42
+ max_retries=2,
43
+ # other params...
 
 
 
44
  )
45
 
46
  return llm
requirements.txt CHANGED
@@ -1,6 +1,3 @@
1
- huggingface-hub
2
- langchain-huggingface
3
  langchain
4
  langchain-core
5
- langchain-community
6
- llama-cpp-python
 
 
 
1
  langchain
2
  langchain-core
3
+ langchain-groq