KushwanthK commited on
Commit
3a6af8e
1 Parent(s): 2e6d819

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -7
app.py CHANGED
@@ -106,10 +106,10 @@ def prompt_engineer(text, longtext, query):
106
  # summary = summarizer(prompt, max_length=1024, min_length=50)[0]["summary_text"]
107
 
108
  try:
109
- llm = HuggingFaceHub(
110
- repo_id="meta-llama/Meta-Llama-3-8B-Instruct", model_kwargs={"temperature": 0, "max_new_tokens": 256, "task":"text-generation"}
111
  )
112
- st.write("llm connection started..")
113
  except Exception as e:
114
  st.error(f"Error invoke: {e}")
115
 
@@ -126,7 +126,7 @@ def prompt_engineer(text, longtext, query):
126
  )
127
 
128
  # Instantiate chain
129
- chain = create_stuff_documents_chain(llm, prompt)
130
 
131
  # Invoke chain
132
  summary = chain.invoke({"context": [docs]})
@@ -158,9 +158,10 @@ def prompt_engineer(text, longtext, query):
158
  result = ""
159
 
160
  try:
161
- # llm = HuggingFaceHub(
162
- # repo_id="meta-llama/Meta-Llama-3-8B-Instruct", model_kwargs={"temperature": 0, "max_new_tokens": 256, "task":"text-generation"}
163
- # )
 
164
  response_text = llm.invoke(prompt)
165
  escaped_query = re.escape(query)
166
  result = re.split(f'Answer the question based on the above context: {escaped_query}\n',response_text)[-1]
 
106
  # summary = summarizer(prompt, max_length=1024, min_length=50)[0]["summary_text"]
107
 
108
  try:
109
+ sllm = HuggingFaceHub(
110
+ repo_id="facebook/bart-large-cnn", model_kwargs={"temperature": 0, "max_new_tokens": 256, "task":"text-generation"}
111
  )
112
+ st.write("summary llm connection started..")
113
  except Exception as e:
114
  st.error(f"Error invoke: {e}")
115
 
 
126
  )
127
 
128
  # Instantiate chain
129
+ chain = create_stuff_documents_chain(sllm, prompt)
130
 
131
  # Invoke chain
132
  summary = chain.invoke({"context": [docs]})
 
158
  result = ""
159
 
160
  try:
161
+ llm = HuggingFaceHub(
162
+ repo_id="meta-llama/Meta-Llama-3-8B-Instruct", model_kwargs={"temperature": 0, "max_new_tokens": 256, "task":"text-generation"}
163
+ )
164
+ st.write("summary llm connection started..")
165
  response_text = llm.invoke(prompt)
166
  escaped_query = re.escape(query)
167
  result = re.split(f'Answer the question based on the above context: {escaped_query}\n',response_text)[-1]