root commited on
Commit
c5ab12e
1 Parent(s): c839b4c

deepnote update

Browse files
Files changed (1) hide show
  1. app.py +93 -57
app.py CHANGED
@@ -14,10 +14,16 @@ from langchain.vectorstores import Chroma
14
  from langchain.vectorstores.base import VectorStoreRetriever
15
 
16
 
17
- current_agent = os.environ["AGENT_SHEET"]
18
  vectordb = None
19
 
20
 
 
 
 
 
 
 
21
  def load_dialogues():
22
  df = pd.read_excel(os.environ["DIALOGUE_SHEET"], header=0, keep_default_na=False)
23
  df = df[df["Agent"] == current_agent]
@@ -27,13 +33,13 @@ def load_dialogues():
27
  def load_persona():
28
  df = pd.read_excel(os.environ["PERSONA_SHEET"], header=0, keep_default_na=False)
29
  df = df[df["Agent"] == current_agent]
30
- return df.astype(str)
31
 
32
 
33
  def load_prompts():
34
  df = pd.read_excel(os.environ["PROMPT_SHEET"], header=0, keep_default_na=False)
35
  df = df[df["Agent"] == current_agent]
36
- return df.astype(str)
37
 
38
 
39
  def load_documents(df, page_content_column: str):
@@ -87,51 +93,48 @@ def get_retriever(context_state: str, vectordb):
87
 
88
  @cl.langchain_factory(use_async=True)
89
  def factory():
 
90
  load_vectordb()
91
- df_prompts = load_prompts()
92
  user_session.set("context_state", "")
 
 
93
 
94
  llm_settings = LLMSettings(
95
  model_name="text-davinci-003",
96
- temperature=df_prompts["Temperature"].values[0],
97
  )
98
  user_session.set("llm_settings", llm_settings)
99
 
100
- llm = AzureOpenAI(
101
- deployment_name="davinci003",
102
- model_name=llm_settings.model_name,
103
- temperature=llm_settings.temperature,
104
- streaming=True,
105
- )
106
-
107
- utterance_prompt = PromptTemplate.from_template(df_prompts["Template"].values[0])
108
-
109
  chat_memory = ConversationBufferWindowMemory(
110
  memory_key="History",
111
  input_key="Utterance",
112
- k=df_prompts["History"].values[0],
113
  )
 
114
 
115
- utterance_chain = LLMChain(
116
- prompt=utterance_prompt,
117
- llm=llm,
118
- verbose=False,
119
- memory=chat_memory,
120
  )
121
 
122
- continuation_prompt = PromptTemplate.from_template(df_prompts["Template"].values[1])
123
-
124
- continuation_chain = LLMChain(
125
- prompt=continuation_prompt,
 
 
 
 
 
 
 
126
  llm=llm,
127
- verbose=False,
128
  memory=chat_memory,
129
  )
130
 
131
- user_session.set("continuation_chain", continuation_chain)
132
-
133
- return utterance_chain
134
-
135
 
136
  @cl.langchain_run
137
  async def run(agent, input_str):
@@ -140,48 +143,81 @@ async def run(agent, input_str):
140
  vectordb = load_vectordb(True)
141
  return await cl.Message(content="Data loaded").send()
142
 
143
- df_persona = load_persona()
 
 
144
 
145
  retriever = get_retriever(user_session.get("context_state"), vectordb)
146
-
147
  document = retriever.get_relevant_documents(query=input_str)
148
 
149
- response = await agent.acall(
150
- {
151
- "Persona": df_persona.loc[df_persona["AI"] == document[0].metadata["AI"]][
152
- "Persona"
153
- ].values[0],
154
- "Utterance": input_str,
155
- "Response": document[0].metadata["Response"],
156
- },
157
- callbacks=[cl.AsyncLangchainCallbackHandler()],
158
- )
159
- await cl.Message(
160
- content=response["text"],
161
- author=document[0].metadata["AI"],
162
- llm_settings=user_session.get("llm_settings"),
163
- ).send()
164
- user_session.set("context_state", document[0].metadata["Contextualisation"])
165
- continuation = document[0].metadata["Continuation"]
166
 
167
- while continuation != "":
168
- document_continuation = vectordb.get(where={"Intent": continuation})
169
- continuation_chain = user_session.get("continuation_chain")
170
- response = await continuation_chain.acall(
171
  {
172
  "Persona": df_persona.loc[
173
- df_persona["AI"] == document_continuation["metadatas"][0]["AI"]
174
  ]["Persona"].values[0],
175
- "Utterance": "",
176
- "Response": document_continuation["metadatas"][0]["Response"],
177
  },
178
  callbacks=[cl.AsyncLangchainCallbackHandler()],
179
  )
180
  await cl.Message(
181
  content=response["text"],
182
- author=document_continuation["metadatas"][0]["AI"],
183
- llm_settings=user_session.get("llm_settings"),
184
  ).send()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  user_session.set(
186
  "context_state",
187
  document_continuation["metadatas"][0]["Contextualisation"],
 
14
  from langchain.vectorstores.base import VectorStoreRetriever
15
 
16
 
17
+ current_agent = "Demo"
18
  vectordb = None
19
 
20
 
21
+ def load_agent():
22
+ df = pd.read_excel(os.environ["AGENT_SHEET"], header=0, keep_default_na=False)
23
+ df = df[df["Agent"] == current_agent]
24
+ return df
25
+
26
+
27
  def load_dialogues():
28
  df = pd.read_excel(os.environ["DIALOGUE_SHEET"], header=0, keep_default_na=False)
29
  df = df[df["Agent"] == current_agent]
 
33
  def load_persona():
34
  df = pd.read_excel(os.environ["PERSONA_SHEET"], header=0, keep_default_na=False)
35
  df = df[df["Agent"] == current_agent]
36
+ return df
37
 
38
 
39
  def load_prompts():
40
  df = pd.read_excel(os.environ["PROMPT_SHEET"], header=0, keep_default_na=False)
41
  df = df[df["Agent"] == current_agent]
42
+ return df
43
 
44
 
45
  def load_documents(df, page_content_column: str):
 
93
 
94
  @cl.langchain_factory(use_async=True)
95
  def factory():
96
+ df_agent = load_agent()
97
  load_vectordb()
 
98
  user_session.set("context_state", "")
99
+ user_session.set("df_prompts", load_prompts())
100
+ user_session.set("df_persona", load_persona())
101
 
102
  llm_settings = LLMSettings(
103
  model_name="text-davinci-003",
104
+ temperature=0.7,
105
  )
106
  user_session.set("llm_settings", llm_settings)
107
 
 
 
 
 
 
 
 
 
 
108
  chat_memory = ConversationBufferWindowMemory(
109
  memory_key="History",
110
  input_key="Utterance",
111
+ k=df_agent["History"].values[0],
112
  )
113
+ user_session.set("chat_memory", chat_memory)
114
 
115
+ llm = AzureOpenAI(
116
+ deployment_name="davinci003",
117
+ model_name=llm_settings.model_name,
118
+ temperature=llm_settings.temperature,
119
+ streaming=True,
120
  )
121
 
122
+ default_prompt = """{History}
123
+ ##
124
+ System: {Persona}
125
+ ##
126
+ Human: {Utterance}
127
+ Response: {Response}
128
+ ##
129
+ AI:"""
130
+
131
+ return LLMChain(
132
+ prompt=PromptTemplate.from_template(default_prompt),
133
  llm=llm,
134
+ verbose=True,
135
  memory=chat_memory,
136
  )
137
 
 
 
 
 
138
 
139
  @cl.langchain_run
140
  async def run(agent, input_str):
 
143
  vectordb = load_vectordb(True)
144
  return await cl.Message(content="Data loaded").send()
145
 
146
+ df_prompts = user_session.get("df_prompts")
147
+ df_persona = user_session.get("df_persona")
148
+ llm_settings = user_session.get("llm_settings")
149
 
150
  retriever = get_retriever(user_session.get("context_state"), vectordb)
 
151
  document = retriever.get_relevant_documents(query=input_str)
152
 
153
+ prompt = document[0].metadata["Prompt"]
154
+ if not prompt:
155
+ await cl.Message(
156
+ content=document[0].metadata["Response"],
157
+ author=document[0].metadata["Role"],
158
+ ).send()
159
+ else:
160
+ agent.prompt = PromptTemplate.from_template(
161
+ df_prompts.loc[df_prompts["Prompt"] == prompt]["Template"].values[0]
162
+ )
163
+ llm_settings.temperature = df_prompts.loc[df_prompts["Prompt"] == prompt][
164
+ "Temperature"
165
+ ].values[0]
166
+ agent.llm.temperature = llm_settings.temperature
 
 
 
167
 
168
+ response = await agent.acall(
 
 
 
169
  {
170
  "Persona": df_persona.loc[
171
+ df_persona["Role"] == document[0].metadata["Role"]
172
  ]["Persona"].values[0],
173
+ "Utterance": input_str,
174
+ "Response": document[0].metadata["Response"],
175
  },
176
  callbacks=[cl.AsyncLangchainCallbackHandler()],
177
  )
178
  await cl.Message(
179
  content=response["text"],
180
+ author=document[0].metadata["Role"],
181
+ llm_settings=llm_settings,
182
  ).send()
183
+
184
+ user_session.set("context_state", document[0].metadata["Contextualisation"])
185
+ continuation = document[0].metadata["Continuation"]
186
+
187
+ while continuation != "":
188
+ document_continuation = vectordb.get(where={"Intent": continuation})
189
+
190
+ prompt = document_continuation["metadatas"][0]["Prompt"]
191
+ if not prompt:
192
+ await cl.Message(
193
+ content=document_continuation["metadatas"][0]["Response"],
194
+ author=document_continuation["metadatas"][0]["Role"],
195
+ ).send()
196
+ else:
197
+ agent.prompt = PromptTemplate.from_template(
198
+ df_prompts.loc[df_prompts["Prompt"] == prompt]["Template"].values[0]
199
+ )
200
+ llm_settings.temperature = df_prompts.loc[df_prompts["Prompt"] == prompt][
201
+ "Temperature"
202
+ ].values[0]
203
+ agent.llm.temperature = llm_settings.temperature
204
+
205
+ response = await agent.acall(
206
+ {
207
+ "Persona": df_persona.loc[
208
+ df_persona["Role"]
209
+ == document_continuation["metadatas"][0]["Role"]
210
+ ]["Persona"].values[0],
211
+ "Utterance": "",
212
+ "Response": document_continuation["metadatas"][0]["Response"],
213
+ },
214
+ callbacks=[cl.AsyncLangchainCallbackHandler()],
215
+ )
216
+ await cl.Message(
217
+ content=response["text"],
218
+ author=document_continuation["metadatas"][0]["Role"],
219
+ llm_settings=llm_settings,
220
+ ).send()
221
  user_session.set(
222
  "context_state",
223
  document_continuation["metadatas"][0]["Contextualisation"],