KushwanthK commited on
Commit
24a430f
1 Parent(s): a66244c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -26
app.py CHANGED
@@ -88,7 +88,7 @@ def get_pinecone_semantic_index(pinecone):
88
 
89
 
90
  def prompt_engineer(text, longtext, query):
91
- summary_prompt_template = """
92
  write a concise summary of the following text delimited by triple backquotes.
93
  return your response in bullet points which convers the key points of the text.
94
 
@@ -105,37 +105,39 @@ def prompt_engineer(text, longtext, query):
105
  # Generate the summary
106
  # summary = summarizer(prompt, max_length=1024, min_length=50)[0]["summary_text"]
107
 
108
- try:
109
- sllm = HuggingFaceHub(
110
- repo_id="meta-llama/Meta-Llama-3-8B-Instruct", model_kwargs={"temperature": 0.1, "max_new_tokens": 256, "task":"summarization"}
111
- )
112
- st.write("Summary Chat llm connection started..")
113
- except Exception as e:
114
- st.error(f"Error invoke: {e}")
115
-
116
- from langchain.chains.combine_documents import create_stuff_documents_chain
117
- from langchain.chains.llm import LLMChain
118
- from langchain_core.prompts import ChatPromptTemplate
119
- from langchain_core.documents import Document
120
-
121
- docs = Document(page_content=longtext, metadata={"source": "pinecone"})
122
- st.write(docs)
123
- # Define prompt
124
- prompt = ChatPromptTemplate.from_messages(
125
- [("system", summary_prompt_template)]
126
- )
127
 
128
- # Instantiate chain
129
- chain = create_stuff_documents_chain(sllm, prompt)
130
 
131
- # Invoke chain
132
- summary = chain.invoke({"context": [docs]})
 
 
 
133
 
134
  with st.sidebar:
135
  st.divider()
136
  st.markdown("*:red[Text Summary Generation]* from above Top 5 **:green[similarity search results]**.")
137
- st.write(summary)
138
- st.divider()
139
 
140
  GENERATION_PROMPT_TEMPLATE = """
141
  Instructions:
@@ -162,6 +164,9 @@ def prompt_engineer(text, longtext, query):
162
  repo_id="meta-llama/Meta-Llama-3-8B-Instruct", model_kwargs={"temperature": 0, "task":"text-generation"}
163
  )
164
  st.write("GEN llm connection started..")
 
 
 
165
  response_text = llm.invoke(prompt)
166
  escaped_query = re.escape(query)
167
  result = re.split(f'Answer the question based on the above context: {escaped_query}\n',response_text)[-1]
 
88
 
89
 
90
  def prompt_engineer(text, longtext, query):
91
+ summary_prompt_inst = """
92
  write a concise summary of the following text delimited by triple backquotes.
93
  return your response in bullet points which convers the key points of the text.
94
 
 
105
  # Generate the summary
106
  # summary = summarizer(prompt, max_length=1024, min_length=50)[0]["summary_text"]
107
 
108
+ # try:
109
+ # sllm = HuggingFaceHub(
110
+ # repo_id="meta-llama/Meta-Llama-3-8B-Instruct", model_kwargs={"temperature": 0.1, "max_new_tokens": 256, "task":"summarization"}
111
+ # )
112
+ # st.write("Summary Chat llm connection started..")
113
+ # except Exception as e:
114
+ # st.error(f"Error invoke: {e}")
115
+
116
+ # from langchain.chains.combine_documents import create_stuff_documents_chain
117
+ # from langchain.chains.llm import LLMChain
118
+ # from langchain_core.prompts import ChatPromptTemplate
119
+ # from langchain_core.documents import Document
120
+
121
+ # docs = Document(page_content=longtext, metadata={"source": "pinecone"})
122
+ # st.write(docs)
123
+ # # Define prompt
124
+ # prompt = ChatPromptTemplate.from_messages(
125
+ # [("system", summary_prompt_template)]
126
+ # )
127
 
128
+ # # Instantiate chain
129
+ # chain = create_stuff_documents_chain(sllm, prompt)
130
 
131
+ # # Invoke chain
132
+ # summary = chain.invoke({"context": [docs]})
133
+
134
+ summary_prompt_template = ChatPromptTemplate.from_template(summary_prompt_inst)
135
+ summary_prompt = summary_prompt_template.format(context=longtext, question="generate summary of text?")
136
 
137
  with st.sidebar:
138
  st.divider()
139
  st.markdown("*:red[Text Summary Generation]* from above Top 5 **:green[similarity search results]**.")
140
+
 
141
 
142
  GENERATION_PROMPT_TEMPLATE = """
143
  Instructions:
 
164
  repo_id="meta-llama/Meta-Llama-3-8B-Instruct", model_kwargs={"temperature": 0, "task":"text-generation"}
165
  )
166
  st.write("GEN llm connection started..")
167
+ summary = llm.invoke(summary_prompt)
168
+ st.write(summary)
169
+ st.divider()
170
  response_text = llm.invoke(prompt)
171
  escaped_query = re.escape(query)
172
  result = re.split(f'Answer the question based on the above context: {escaped_query}\n',response_text)[-1]