ArturG9 commited on
Commit
2bc8ef5
1 Parent(s): fbb9f3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -61
app.py CHANGED
@@ -20,7 +20,7 @@ from HTML_templates import css, bot_template, user_template
20
 
21
 
22
 
23
- def get_vectorstore(text_chunks):
24
  model_name = "sentence-transformers/all-mpnet-base-v2"
25
  model_kwargs = {'device': 'cpu'}
26
  encode_kwargs = {'normalize_embeddings': True}
@@ -34,7 +34,9 @@ def get_vectorstore(text_chunks):
34
  os.makedirs(vectorstore_path)
35
  vectorstore = Chroma.from_documents(
36
  documents=text_chunks, embedding=embeddings, persist_directory="docs/chroma/")
37
- return vectorstore
 
 
38
 
39
 
40
 
@@ -57,16 +59,16 @@ for filename in os.listdir(data_path):
57
 
58
 
59
 
60
- docs = split_docs(documents, 350, 40)
61
 
62
- vectorstore = get_vectorstore(docs)
63
 
64
 
65
 
66
 
67
 
68
 
69
- def main(vectorstore):
70
 
71
  st.set_page_config(page_title="Chat with multiple PDFs",
72
  page_icon=":books:")
@@ -78,34 +80,28 @@ def main(vectorstore):
78
  st.session_state.chat_history = None
79
 
80
  st.header("Chat with multiple PDFs :books:")
 
 
 
81
  user_question = st.text_input("Ask a question about your documents:")
 
 
82
  if user_question:
83
  handle_userinput(user_question,vectorstore)
84
 
85
 
86
- def handle_userinput(user_question,vectorstore):
87
- Rag_chain = create_conversational_rag_chain(vectorstore)
88
- msgs = StreamlitChatMessageHistory(key="special_app_key")
89
- response = Rag_chain({'question': user_question})
90
- st.session_state.chat_history = response['chat_history']
91
-
92
- for i, message in enumerate(st.session_state.chat_history):
93
- if i % 2 == 0:
94
- st.write(user_template.replace(
95
- "{{MSG}}", message.content), unsafe_allow_html=True)
96
- else:
97
- st.write(bot_template.replace(
98
- "{{MSG}}", message.content), unsafe_allow_html=True)
99
-
100
- if 'retrieved_documents' in response:
101
- st.subheader("Retrieved Documents")
102
- for doc in response['source_documents']:
103
- st.write(f"Document: {doc.metadata['source']}")
104
- st.write(doc.page_content)
105
-
106
 
107
 
108
- def create_conversational_rag_chain(vectorstore):
109
 
110
  model_path = ('qwen2-0_5b-instruct-q4_0.gguf')
111
 
@@ -119,47 +115,19 @@ def create_conversational_rag_chain(vectorstore):
119
  n_ctx=22000,
120
  max_tokens=200,
121
  repeat_penalty=1.7,
122
- callback_manager=callback_manager,
123
  verbose=False,
124
  )
125
 
126
- contextualize_q_system_prompt = """Given a context, chat history and the latest user question
127
- which maybe reference context in the chat history, formulate a standalone question
128
- which can be understood without the chat history. Do NOT answer the question,
129
- just reformulate it if needed and otherwise return it as is."""
130
-
131
- contextualize_q_prompt = ChatPromptTemplate.from_messages(
132
- [
133
- ("system", contextualize_q_system_prompt),
134
- MessagesPlaceholder("chat_history"),
135
- ("human", "{input}"),
136
- ]
137
- )
138
 
139
- ha_retriever = create_history_aware_retriever(llm, vectorstore.as_retriever(), contextualize_q_prompt)
 
 
140
 
141
- qa_system_prompt = """You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. Be as informative as possible, be polite and formal.\n{context}"""
142
 
143
- qa_prompt = ChatPromptTemplate.from_messages(
144
- [
145
- ("system", qa_system_prompt),
146
- MessagesPlaceholder("chat_history"),
147
- ("human", "{input}"),
148
- ]
149
- )
150
-
151
- question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
152
- rag_chain = create_retrieval_chain(ha_retriever, question_answer_chain)
153
- msgs = StreamlitChatMessageHistory(key="special_app_key")
154
-
155
- conversational_rag_chain = RunnableWithMessageHistory(
156
- rag_chain,
157
- lambda session_id: msgs,
158
- input_messages_key="input",
159
- history_messages_key="chat_history",
160
- output_messages_key="answer",
161
- return_source_documents=True
162
- )
163
 
164
  return rag_chain
165
 
 
20
 
21
 
22
 
23
+ def retriever_from_chroma(docs, search_type, k):
24
  model_name = "sentence-transformers/all-mpnet-base-v2"
25
  model_kwargs = {'device': 'cpu'}
26
  encode_kwargs = {'normalize_embeddings': True}
 
34
  os.makedirs(vectorstore_path)
35
  vectorstore = Chroma.from_documents(
36
  documents=text_chunks, embedding=embeddings, persist_directory="docs/chroma/")
37
+ retriever = vectordb.as_retriever(search_type=search_type, search_kwargs={"k": k})
38
+ return retriever
39
+
40
 
41
 
42
 
 
59
 
60
 
61
 
62
+ docs = split_docs(documents, 250, 20)
63
 
64
+ retriever = retriever_from_chroma(docs,'mmr',7)
65
 
66
 
67
 
68
 
69
 
70
 
71
+ def main(retriever):
72
 
73
  st.set_page_config(page_title="Chat with multiple PDFs",
74
  page_icon=":books:")
 
80
  st.session_state.chat_history = None
81
 
82
  st.header("Chat with multiple PDFs :books:")
83
+
84
+ with st.chat_message("Assistant"):
85
+ st.write("Hello my name is Robert, how can i help you? ")
86
  user_question = st.text_input("Ask a question about your documents:")
87
+ with st.chat_message("User"):
88
+ st.write(user_question)
89
  if user_question:
90
  handle_userinput(user_question,vectorstore)
91
 
92
 
93
+ def handle_userinput(user_question,retriever):
94
+ docs = retriever.invoke(question)
95
+
96
+ doc_txt = [doc.page_content for doc in docs]
97
+
98
+ Rag_chain = create_conversational_rag_chain(retriever)
99
+ response = rag_chain.invoke({"context": doc_txt, "question": question})
100
+ with st.chat_message("Assistant"):
101
+ st.write(response)
 
 
 
 
 
 
 
 
 
 
 
102
 
103
 
104
+ def create_conversational_rag_chain(retriever):
105
 
106
  model_path = ('qwen2-0_5b-instruct-q4_0.gguf')
107
 
 
115
  n_ctx=22000,
116
  max_tokens=200,
117
  repeat_penalty=1.7,
118
+ # callback_manager=callback_manager,
119
  verbose=False,
120
  )
121
 
122
+ template = """Answer the question based only on the following context:
123
+ {context}
 
 
 
 
 
 
 
 
 
 
124
 
125
+ Question: {question}
126
+ """
127
+ prompt = ChatPromptTemplate.from_template(template)
128
 
129
+ rag_chain = prompt | llm | StrOutputParser()
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
  return rag_chain
133