angry-meow commited on
Commit
0f09cc9
1 Parent(s): 35c49ca

moving to a graph

Browse files
__pycache__/constants.cpython-311.pyc CHANGED
Binary files a/__pycache__/constants.cpython-311.pyc and b/__pycache__/constants.cpython-311.pyc differ
 
__pycache__/models.cpython-311.pyc CHANGED
Binary files a/__pycache__/models.cpython-311.pyc and b/__pycache__/models.cpython-311.pyc differ
 
agents.py CHANGED
@@ -3,55 +3,6 @@ from operator import itemgetter
3
  from langchain_core.runnables.passthrough import RunnablePassthrough
4
  import models
5
  import prompts
6
- import tools
7
-
8
- search_agent = create_team_agent(
9
- models.gpt4o,
10
- [tools.tavily_tool],
11
- "You are a research assistant who can search for up-to-date info using the tavily search engine.",
12
- "Search",
13
- ["Search", "PaperInformationRetriever"]
14
- )
15
-
16
- research_agent = create_team_agent(
17
- models.gpt4o,
18
- [tools.retrieve_information],
19
- "You are a research assistant who can provide specific information on the provided paper: 'murthy-loneliness.pdf'. You must only respond with information about the paper related to the request.",
20
- "PaperInformationRetriever",
21
- ["Search", "PaperInformationRetriever"]
22
- )
23
-
24
- doc_writer_agent = create_team_agent(
25
- models.gpt4o,
26
- [tools.write_document, tools.edit_document, tools.read_document],
27
- "You are an expert writing technical social media posts.",
28
- "DocWriter",
29
- ["DocWriter", "NoteTaker", "CopyEditor", "VoiceEditor"]
30
- )
31
-
32
- note_taking_agent = create_team_agent(
33
- models.gpt4o,
34
- [tools.create_outline, tools.read_document],
35
- "You are an expert senior researcher tasked with writing a social media post outline and taking notes to craft a social media post.",
36
- "NoteTaker",
37
- ["DocWriter", "NoteTaker", "CopyEditor", "VoiceEditor"]
38
- )
39
-
40
- copy_editor_agent = create_team_agent(
41
- models.gpt4o,
42
- [tools.write_document, tools.edit_document, tools.read_document],
43
- "You are an expert copy editor who focuses on fixing grammar, spelling, and tone issues.",
44
- "CopyEditor",
45
- ["DocWriter", "NoteTaker", "CopyEditor", "VoiceEditor"]
46
- )
47
-
48
- voice_editor_agent = create_team_agent(
49
- models.gpt4o,
50
- [tools.write_document, tools.edit_document, tools.read_document],
51
- "You are an expert in crafting and refining the voice and tone of social media posts. You edit the document to ensure it has a consistent, professional, and engaging voice appropriate for social media platforms.",
52
- "VoiceEditor",
53
- ["DocWriter", "NoteTaker", "CopyEditor", "VoiceEditor"]
54
- )
55
 
56
  simple_rag_chain = (
57
  {
 
3
  from langchain_core.runnables.passthrough import RunnablePassthrough
4
  import models
5
  import prompts
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  simple_rag_chain = (
8
  {
app.py CHANGED
@@ -2,6 +2,7 @@ import chainlit as cl
2
  from helper_functions import process_file, load_documents_from_url
3
  import models
4
  import agents
 
5
  import asyncio
6
 
7
  @cl.on_chat_start
@@ -54,8 +55,10 @@ async def main(message: cl.Message):
54
  await handle_response(res)
55
  else:
56
  # Handle the question as usual
57
- await cl.Message(content="Thinking about it, give me a second...", disable_human_feedback=True).send()
58
- response = await asyncio.to_thread(retrieval_augmented_qa_chain.invoke, {"question": message.content})
 
 
59
  await cl.Message(content=response.content).send()
60
  res = await ask_action()
61
  await handle_response(res)
 
2
  from helper_functions import process_file, load_documents_from_url
3
  import models
4
  import agents
5
+ import graph
6
  import asyncio
7
 
8
  @cl.on_chat_start
 
55
  await handle_response(res)
56
  else:
57
  # Handle the question as usual
58
+ await cl.Message(content="Our specialist is working...", disable_human_feedback=True).send()
59
+ #response = await asyncio.to_thread(retrieval_augmented_qa_chain.invoke, {"question": message.content})
60
+ response = await asyncio.to_thread(graph.getSocialMediaPost, message.content)
61
+ print(response)
62
  await cl.Message(content=response.content).send()
63
  res = await ask_action()
64
  await handle_response(res)
graph.py CHANGED
@@ -1,33 +1,35 @@
1
- from typing import Dict, List, TypedDict, Annotated, Sequence
2
- from langgraph.graph import Graph, StateGraph, END
3
- from langgraph.prebuilt import ToolExecutor
4
  from langchain.schema import StrOutputParser
5
  from langchain.schema.runnable import RunnablePassthrough
6
  from langchain_community.tools.tavily_search import TavilySearchResults
7
  import models
8
  import prompts
9
- from helper_functions import format_docs
10
  from operator import itemgetter
 
11
 
12
- # Define the state structure
13
- class State(TypedDict):
14
- messages: Sequence[str]
 
 
 
15
  topic: str
16
  research_data: Dict[str, str]
17
- team_members: List[str]
18
- draft_posts: Sequence[str]
19
- final_post: str
20
 
21
-
22
- research_members = ["Qdrant_researcher", "Web_researcher"]
23
- # Research Agent Pieces
24
  qdrant_research_chain = (
25
  {"context": itemgetter("topic") | models.compression_retriever, "topic": itemgetter("topic")}
26
  | RunnablePassthrough.assign(context=itemgetter("context"))
27
  | {"response": prompts.research_query_prompt | models.gpt4o_mini | StrOutputParser(), "context": itemgetter("context")}
28
  )
29
 
30
- # Web Search Agent Pieces
31
  tavily_tool = TavilySearchResults(max_results=3)
32
  query_chain = ( prompts.search_query_prompt | models.gpt4o_mini | StrOutputParser() )
33
  tavily_simple = ({"tav_results": tavily_tool} | prompts.tavily_prompt | models.gpt4o_mini | StrOutputParser())
@@ -35,133 +37,304 @@ tavily_chain = (
35
  {"query": query_chain} | tavily_simple
36
  )
37
 
38
- def query_qdrant(state: State) -> State:
39
- # Extract the last message as the input
40
- topic = state["topic"]
41
 
42
- # Run the chain
 
 
 
 
 
43
  result = qdrant_research_chain.invoke({"topic": topic})
44
-
45
- # Update the state with the research results
46
- state["research_data"]["qdrant_results"] = result
 
47
 
48
  return state
49
 
50
- def web_search(state: State) -> State:
 
51
  # Extract the last message as the topic
52
  topic = state["topic"]
53
-
54
  # Get the Qdrant results from the state
55
  qdrant_results = state["research_data"].get("qdrant_results", "No previous results available.")
56
-
57
  # Run the web search chain
58
  result = tavily_chain.invoke({
59
  "topic": topic,
60
  "qdrant_results": qdrant_results
61
  })
62
-
63
  # Update the state with the web search results
64
  state["research_data"]["web_search_results"] = result
65
-
 
66
  return state
67
 
68
  def research_supervisor(state):
69
- # Implement research supervision logic
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  return state
71
 
72
- def post_creation(state):
73
- # Implement post creation logic
 
 
74
  return state
75
 
76
- def copy_editing(state):
77
- # Implement copy editing logic
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  return state
79
 
80
- def voice_editing(state):
81
- # Implement voice editing logic
 
 
 
 
 
 
82
  return state
83
 
84
  def post_review(state):
85
- # Implement post review logic
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  return state
87
 
88
  def writing_supervisor(state):
89
- # Implement writing supervision logic
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  return state
91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  def overall_supervisor(state):
 
93
  # Implement overall supervision logic
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94
  return state
95
 
96
- # Create the research team graph
97
- research_graph = StateGraph(State)
 
98
 
 
 
 
 
99
  research_graph.add_node("query_qdrant", query_qdrant)
100
  research_graph.add_node("web_search", web_search)
101
  research_graph.add_node("research_supervisor", research_supervisor)
102
-
 
 
 
 
103
  research_graph.add_edge("query_qdrant", "research_supervisor")
104
  research_graph.add_edge("web_search", "research_supervisor")
105
  research_graph.add_conditional_edges(
106
  "research_supervisor",
107
  lambda x: x["next"],
108
- {"query_qdrant": "query_qdrant", "web_search": "web_search", "FINISH": END},
109
  )
110
- #research_graph.add_edge("research_supervisor", END)
111
-
112
- research_graph.set_entry_point("research_supervisor")
113
  research_graph_comp = research_graph.compile()
114
 
115
- # Create the writing team graph
116
- writing_graph = StateGraph(State)
117
-
 
118
  writing_graph.add_node("post_creation", post_creation)
119
- writing_graph.add_node("copy_editing", copy_editing)
120
- writing_graph.add_node("voice_editing", voice_editing)
121
  writing_graph.add_node("post_review", post_review)
122
  writing_graph.add_node("writing_supervisor", writing_supervisor)
123
-
124
- writing_graph.add_edge("post_creation", "writing_supervisor")
125
- writing_graph.add_edge("copy_editing", "writing_supervisor")
126
- writing_graph.add_edge("voice_editing", "writing_supervisor")
 
 
 
127
  writing_graph.add_edge("post_review", "writing_supervisor")
128
  writing_graph.add_conditional_edges(
129
  "writing_supervisor",
130
  lambda x: x["next"],
131
- {"post_creation": "post_creation",
132
- "copy_editing": "copy_editing",
133
- "voice_editing": "voice_editing",
134
- "post_review": "post_review",
135
- "FINISH": END},
136
  )
137
- #writing_graph.add_edge("writing_supervisor", END)
138
-
139
- writing_graph.set_entry_point("writing_supervisor")
140
 
141
- writing_graph_comp = research_graph.compile()
142
-
143
- # Create the overall graph
144
  overall_graph = StateGraph(State)
145
-
146
- # Add the research and writing team graphs as nodes
147
- overall_graph.add_node("research_team", research_graph)
148
- overall_graph.add_node("writing_team", writing_graph)
149
-
150
- # Add the overall supervisor node
151
  overall_graph.add_node("overall_supervisor", overall_supervisor)
152
-
 
 
 
 
153
  overall_graph.set_entry_point("overall_supervisor")
154
-
155
- # Connect the nodes
156
- overall_graph.add_edge("research_team", "overall_supervisor")
157
- overall_graph.add_edge("writing_team", "overall_supervisor")
158
  overall_graph.add_conditional_edges(
159
  "overall_supervisor",
160
- lambda x: x["next"],
161
- {"research_team": "research_team",
162
- "writing_team": "writing_team",
163
  "FINISH": END},
164
  )
 
165
 
166
- # Compile the graph
167
- app = overall_graph.compile()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, TypedDict, Sequence
2
+ from langgraph.graph import StateGraph, END
 
3
  from langchain.schema import StrOutputParser
4
  from langchain.schema.runnable import RunnablePassthrough
5
  from langchain_community.tools.tavily_search import TavilySearchResults
6
  import models
7
  import prompts
8
+ import json
9
  from operator import itemgetter
10
+ from langgraph.errors import GraphRecursionError
11
 
12
+
13
+ #######################################
14
+ ### Research Team Components ###
15
+ #######################################
16
+ class ResearchState(TypedDict):
17
+ workflow: List[str]
18
  topic: str
19
  research_data: Dict[str, str]
20
+ next: str
21
+ message_to_manager: str
22
+ message_from_manager: str
23
 
24
+ #
25
+ # Reserach Chains and Tools
26
+ #
27
  qdrant_research_chain = (
28
  {"context": itemgetter("topic") | models.compression_retriever, "topic": itemgetter("topic")}
29
  | RunnablePassthrough.assign(context=itemgetter("context"))
30
  | {"response": prompts.research_query_prompt | models.gpt4o_mini | StrOutputParser(), "context": itemgetter("context")}
31
  )
32
 
 
33
  tavily_tool = TavilySearchResults(max_results=3)
34
  query_chain = ( prompts.search_query_prompt | models.gpt4o_mini | StrOutputParser() )
35
  tavily_simple = ({"tav_results": tavily_tool} | prompts.tavily_prompt | models.gpt4o_mini | StrOutputParser())
 
37
  {"query": query_chain} | tavily_simple
38
  )
39
 
40
+ research_supervisor_chain = (
41
+ prompts.research_supervisor_prompt | models.gpt4o | StrOutputParser()
42
+ )
43
 
44
+ #
45
+ # Reserach Node Defs
46
+ #
47
+ def query_qdrant(state: ResearchState) -> ResearchState:
48
+ #print("qdrant node")
49
+ topic = state["topic"]
50
  result = qdrant_research_chain.invoke({"topic": topic})
51
+ #print(result)
52
+ state["research_data"]["qdrant_results"] = result["response"]
53
+ state['workflow'].append("query_qdrant")
54
+ #print(state['workflow'])
55
 
56
  return state
57
 
58
+ def web_search(state: ResearchState) -> ResearchState:
59
+ #print("tavily node")
60
  # Extract the last message as the topic
61
  topic = state["topic"]
62
+ #print(topic)
63
  # Get the Qdrant results from the state
64
  qdrant_results = state["research_data"].get("qdrant_results", "No previous results available.")
 
65
  # Run the web search chain
66
  result = tavily_chain.invoke({
67
  "topic": topic,
68
  "qdrant_results": qdrant_results
69
  })
70
+ #print(result)
71
  # Update the state with the web search results
72
  state["research_data"]["web_search_results"] = result
73
+ state['workflow'].append("web_search")
74
+ #print(state['workflow'])
75
  return state
76
 
77
  def research_supervisor(state):
78
+ #print("research supervisor node")
79
+ message_from_manager = state["message_from_manager"]
80
+ collected_data = state["research_data"]
81
+ topic = state['topic']
82
+ supervisor_result = research_supervisor_chain.invoke({"message_from_manager": message_from_manager, "collected_data": collected_data, "topic": topic})
83
+ lines = supervisor_result.split('\n')
84
+ #print(supervisor_result)
85
+ for line in lines:
86
+ if line.startswith('Next Action: '):
87
+ state['next'] = line[len('Next Action: '):].strip() # Extract the next action content
88
+ elif line.startswith('Message to project manager: '):
89
+ state['message_to_manager'] = line[len('Message to project manager: '):].strip()
90
+ state['workflow'].append("research_supervisor")
91
+ #print(state['workflow'])
92
  return state
93
 
94
+ def research_end(state):
95
+ #print("research_end node")
96
+ state['workflow'].append("research_end")
97
+ #print(state['workflow'])
98
  return state
99
 
100
+ #######################################
101
+ ### Writing Team Components ###
102
+ #######################################
103
+ class WritingState(TypedDict):
104
+ workflow: List[str]
105
+ topic: str
106
+ research_data: Dict[str, str]
107
+ draft_posts: Sequence[str]
108
+ final_post: str
109
+ next: str
110
+ message_to_manager: str
111
+ message_from_manager: str
112
+ review_comments: str
113
+ style_checked: bool
114
+
115
+ #
116
+ # Writing Chains
117
+ #
118
+ writing_supervisor_chain = (
119
+ prompts.writing_supervisor_prompt | models.gpt4o | StrOutputParser()
120
+ )
121
+
122
+ post_creation_chain = (
123
+ prompts.post_creation_prompt | models.gpt4o_mini | StrOutputParser()
124
+ )
125
+
126
+ post_editor_chain = (
127
+ prompts.post_editor_prompt | models.gpt4o | StrOutputParser()
128
+ )
129
+
130
+ post_review_chain = (
131
+ prompts.post_review_prompt | models.gpt4o | StrOutputParser()
132
+ )
133
+
134
+ #
135
+ # Writing Node Defs
136
+ #
137
+ def post_creation(state):
138
+ print("post_creation node")
139
+ topic = state['topic']
140
+ drafts = state['draft_posts']
141
+ collected_data = state["research_data"]
142
+ review_comments = state['review_comments']
143
+ results = post_creation_chain.invoke({"topic": topic, "collected_data": collected_data, "drafts": drafts, "review_comments": review_comments})
144
+ state['draft_posts'].append(results)
145
+ state['workflow'].append("post_creation")
146
  return state
147
 
148
+ def post_editor(state):
149
+ print("post_editor node")
150
+ current_draft = state['draft_posts'][-1]
151
+ styleguide = prompts.style_guide_text
152
+ review_comments = state['review_comments']
153
+ results = post_editor_chain.invoke({"current_draft": current_draft, "styleguide": styleguide, "review_comments": review_comments})
154
+ state['draft_posts'].append(results)
155
+ state['workflow'].append("post_editor")
156
  return state
157
 
158
  def post_review(state):
159
+ print("post_review node")
160
+ current_draft = state['draft_posts'][-1]
161
+ styleguide = prompts.style_guide_text
162
+ results = post_review_chain.invoke({"current_draft": current_draft, "styleguide": styleguide})
163
+ data = json.loads(results.strip())
164
+ state['review_comments'] = data["Comments on current draft"]
165
+ if data["Draft Acceptable"] == 'Yes':
166
+ state['final_post'] = state['draft_posts'][-1]
167
+ state['workflow'].append("post_review")
168
+ return state
169
+
170
+ def writing_end(state):
171
+ print("writing_end node")
172
+ state['workflow'].append("writing_end")
173
+ print(state['workflow'])
174
  return state
175
 
176
  def writing_supervisor(state):
177
+ print("writing_supervisor node")
178
+ message_from_manager = state['message_from_manager']
179
+ topic = state['topic']
180
+ drafts = state['draft_posts']
181
+ final_draft = state['final_post']
182
+ review_comments = state['review_comments']
183
+ supervisor_result = writing_supervisor_chain.invoke({"review_comments": review_comments, "message_from_manager": message_from_manager, "topic": topic, "drafts": drafts, "final_draft": final_draft})
184
+ lines = supervisor_result.split('\n')
185
+ print(supervisor_result)
186
+ for line in lines:
187
+ if line.startswith('Next Action: '):
188
+ state['next'] = line[len('Next Action: '):].strip() # Extract the next action content
189
+ elif line.startswith('Message to project manager: '):
190
+ state['message_to_manager'] = line[len('Message to project manager: '):].strip()
191
+ state['workflow'].append("writing_supervisor")
192
  return state
193
 
194
+ #######################################
195
+ ### Overarching Graph Components ###
196
+ #######################################
197
+ class State(TypedDict):
198
+ workflow: List[str]
199
+ topic: str
200
+ research_data: Dict[str, str]
201
+ draft_posts: Sequence[str]
202
+ final_post: str
203
+ next: str
204
+ user_input: str
205
+ message_to_manager: str
206
+ message_from_manager: str
207
+ last_active_team :str
208
+ next_team: str
209
+ review_comments: str
210
+
211
+ #
212
+ # Complete Graph Chains
213
+ #
214
+ overall_supervisor_chain = (
215
+ prompts.overall_supervisor_prompt | models.gpt4o | StrOutputParser()
216
+ )
217
+
218
+ #
219
+ # Complete Graph Node defs
220
+ #
221
  def overall_supervisor(state):
222
+ print("overall supervisor node")
223
  # Implement overall supervision logic
224
+ init_user_query = state["user_input"]
225
+ message_to_manager = state['message_to_manager']
226
+ last_active_team = state['last_active_team']
227
+ supervisor_result = overall_supervisor_chain.invoke({"query": init_user_query, "message_to_manager": message_to_manager, "last_active_team": last_active_team})
228
+ lines = supervisor_result.split('\n')
229
+ print(supervisor_result)
230
+ for line in lines:
231
+ if line.startswith('Next Action: '):
232
+ state['next_team'] = line[len('Next Action: '):].strip() # Extract the next action content
233
+ elif line.startswith('Extracted Topic: '):
234
+ state['topic'] = line[len('Extracted Topic: '):].strip() # Extract the next action content
235
+ elif line.startswith('Message to supervisor: '):
236
+ state['message_from_manager'] = line[len('Message to supervisor: '):].strip() # Extract the next action content
237
+ state['workflow'].append("overall_supervisor")
238
+ print(state['next_team'])
239
+ print(state['workflow'])
240
  return state
241
 
242
+ #######################################
243
+ ### Graph structures ###
244
+ #######################################
245
 
246
+ #
247
+ # Reserach Graph Nodes
248
+ #
249
+ research_graph = StateGraph(ResearchState)
250
  research_graph.add_node("query_qdrant", query_qdrant)
251
  research_graph.add_node("web_search", web_search)
252
  research_graph.add_node("research_supervisor", research_supervisor)
253
+ research_graph.add_node("research_end", research_end)
254
+ #
255
+ # Reserach Graph Edges
256
+ #
257
+ research_graph.set_entry_point("research_supervisor")
258
  research_graph.add_edge("query_qdrant", "research_supervisor")
259
  research_graph.add_edge("web_search", "research_supervisor")
260
  research_graph.add_conditional_edges(
261
  "research_supervisor",
262
  lambda x: x["next"],
263
+ {"query_qdrant": "query_qdrant", "web_search": "web_search", "FINISH": "research_end"},
264
  )
 
 
 
265
  research_graph_comp = research_graph.compile()
266
 
267
+ #
268
+ # Writing Graph Nodes
269
+ #
270
+ writing_graph = StateGraph(WritingState)
271
  writing_graph.add_node("post_creation", post_creation)
272
+ writing_graph.add_node("post_editor", post_editor)
 
273
  writing_graph.add_node("post_review", post_review)
274
  writing_graph.add_node("writing_supervisor", writing_supervisor)
275
+ writing_graph.add_node("writing_end", writing_end)
276
+ #
277
+ # Writing Graph Edges
278
+ #
279
+ writing_graph.set_entry_point("writing_supervisor")
280
+ writing_graph.add_edge("post_creation", "post_editor")
281
+ writing_graph.add_edge("post_editor", "post_review")
282
  writing_graph.add_edge("post_review", "writing_supervisor")
283
  writing_graph.add_conditional_edges(
284
  "writing_supervisor",
285
  lambda x: x["next"],
286
+ {"NEW DRAFT": "post_creation",
287
+ "FINISH": "writing_end"},
 
 
 
288
  )
289
+ writing_graph_comp = writing_graph.compile()
 
 
290
 
291
+ #
292
+ # Complete Graph Nodes
293
+ #
294
  overall_graph = StateGraph(State)
 
 
 
 
 
 
295
  overall_graph.add_node("overall_supervisor", overall_supervisor)
296
+ overall_graph.add_node("research_team_graph", research_graph_comp)
297
+ overall_graph.add_node("writing_team_graph", writing_graph_comp)
298
+ #
299
+ # Complete Graph Edges
300
+ #
301
  overall_graph.set_entry_point("overall_supervisor")
302
+ overall_graph.add_edge("research_team_graph", "overall_supervisor")
303
+ overall_graph.add_edge("writing_team_graph", "overall_supervisor")
 
 
304
  overall_graph.add_conditional_edges(
305
  "overall_supervisor",
306
+ lambda x: x["next_team"],
307
+ {"research_team": "research_team_graph",
308
+ "writing_team": "writing_team_graph",
309
  "FINISH": END},
310
  )
311
+ app = overall_graph.compile()
312
 
313
+
314
+ #######################################
315
+ ### Run method ###
316
+ #######################################
317
+
318
+ def getSocialMediaPost(userInput: str) -> str:
319
+ finalPost = ""
320
+ initial_state = State(
321
+ workflow = [],
322
+ topic= "",
323
+ research_data = {},
324
+ draft_posts = [],
325
+ final_post = [],
326
+ next = [],
327
+ next_team = [],
328
+ user_input=userInput,
329
+ message_to_manager="",
330
+ message_from_manager="",
331
+ last_active_team="",
332
+ review_comments=""
333
+ )
334
+ results = app.invoke(initial_state)
335
+ try:
336
+ results = app.invoke(initial_state, {"recursion_limit": 3})
337
+ except GraphRecursionError:
338
+ return "Recursion Error"
339
+ finalPost = results.final_post
340
+ return finalPost
helper_functions.py CHANGED
@@ -80,7 +80,9 @@ def add_to_qdrant(documents, embeddings, qdrant_client, collection_name):
80
 
81
  def agent_node(state, agent, name):
82
  result = agent.invoke(state)
83
- return {"messages": [HumanMessage(content=result["output"], name=name)]}
 
 
84
 
85
  def create_team_agent(llm, tools, system_prompt, agent_name, team_members):
86
  return create_agent(
 
80
 
81
  def agent_node(state, agent, name):
82
  result = agent.invoke(state)
83
+ return {
84
+ "messages": [HumanMessage(content=result["messages"][-1].content, name=name)]
85
+ }
86
 
87
  def create_team_agent(llm, tools, system_prompt, agent_name, team_members):
88
  return create_agent(
models.py CHANGED
@@ -122,10 +122,11 @@ qdrant_client = QdrantClient(url=constants.QDRANT_ENDPOINT, api_key=constants.QD
122
  # collection_name="docs_from_ripped_urls_recursive",
123
  # embedding=te3_small
124
  #)
 
125
 
126
  semantic_tuned_Qdrant_vs = QdrantVectorStore(
127
  client=qdrant_client,
128
- collection_name="docs_from_ripped_urls_semantic_tuned",
129
  embedding=tuned_embeddings
130
  )
131
 
 
122
  # collection_name="docs_from_ripped_urls_recursive",
123
  # embedding=te3_small
124
  #)
125
+ collection_name="docs_from_ripped_urls_semantic_tuned"
126
 
127
  semantic_tuned_Qdrant_vs = QdrantVectorStore(
128
  client=qdrant_client,
129
+ collection_name=collection_name,
130
  embedding=tuned_embeddings
131
  )
132
 
prompts.py CHANGED
@@ -37,14 +37,17 @@ Answer:
37
  )
38
 
39
  search_query_prompt = ChatPromptTemplate.from_template(
40
- """Given the following topic and information from our database, create a search query to find supplementary information:
41
 
42
  Topic: {topic}
43
 
44
  Information from our database:
45
  {qdrant_results}
46
 
47
- Generate a search query to find additional, up-to-date information that complements what we already know:
 
 
 
48
  """
49
  )
50
 
@@ -61,3 +64,154 @@ tavily_prompt = ChatPromptTemplate.from_template(
61
  ...
62
  """
63
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  )
38
 
39
  search_query_prompt = ChatPromptTemplate.from_template(
40
+ """Given the following topic and information from our database, create a search query to find supplementary information about the topic, including facts, statistics, surveys, articles, and quotes:
41
 
42
  Topic: {topic}
43
 
44
  Information from our database:
45
  {qdrant_results}
46
 
47
+ Generate a search query to find additional, up-to-date information that complements what we already know. Only include the query you generate, do not include any extraneous language.
48
+
49
+ Output:
50
+ Query
51
  """
52
  )
53
 
 
64
  ...
65
  """
66
  )
67
+
68
+ research_supervisor_prompt = ChatPromptTemplate.from_template(
69
+ """You are the supervisor of a team of researchers with different roles.
70
+
71
+ Your team's task is to collect information about a given topic.
72
+
73
+ As you are the supervisor, you must determine which of your team members should perform their task next.
74
+
75
+ Your team members include 'query_qdrant', which can search a database of information that will relate to the topic, and 'web_search', which can search the wider internet for more information about the topic not contained in our database.
76
+
77
+ The available actions are the names of your team members (to indicate which team member should run next), or 'FINISH' to indicate that sufficient data has been collected about the topic, and you wish to send your team's work back to your own supervisor.
78
+
79
+ You will have access to the data that the team has collected so far, if any exists. You will use this to determine if any further collection is needed.
80
+
81
+ You can send a specific message or request to your supervisor when your next action is to send it back to them. When you are called to perform your own task by the project manager, they may have a helpful message or request of you.
82
+
83
+ Message from project manager: {message_from_manager}
84
+ Topic: {topic}
85
+ Data collected so far: {collected_data}
86
+
87
+ Output:
88
+ Next Action: ...
89
+ Message to project manager: ...
90
+ """
91
+ )
92
+
93
+ overall_supervisor_prompt = ChatPromptTemplate.from_template(
94
+ """You are the project manager of two teams who are working to create factually accurate and emotionally meaningful social media posts regarding emotional health in the workplace.
95
+
96
+ Your task is to coordinate the two teams. The first team, 'research_team', is tasked with collecting information and data about the specific topic that the user asks about. The second team, 'writing_team', will use that collected data and information to generaete posts, and then review and iterate over them until the post is of sufficient quality.
97
+ Each team is headed by its own supervisor, who will manage the individual members of that team.
98
+
99
+ As you are the project manager, you will deal directly with the user and their initial query, so if there is not an existing topic that has been extracted from the user's query, you must analyze the query and determine what topic your team will be dealing with.
100
+
101
+ After determining the topic, or if a specific topic has already been supplied, you must determine which of the teams should perform their task next.
102
+
103
+ The available actions are the names of your teams (to indicate which team should run next), or 'FINISH' to indicate that a sufficient quality social media post is ready to share with the user.
104
+
105
+ To help you determine which team should run next, or if the post is ready to share with the user, you will have access to the latest team supervisor requests and messages, as well as a flag indicating which team was most recently active.
106
+
107
+ If you have to pass along a message to the next team, such as if the writing team had asked that the research team collect more information, or if you want to give specific direction to a supervisor, you can do so.
108
+
109
+ User query: {query}
110
+ Team message: {message_to_manager}
111
+ Last active team: {last_active_team}
112
+
113
+ Output:
114
+ Extracted Topic: ...
115
+ Next Action: ...
116
+ Message to supervisor: ...
117
+ """
118
+ )
119
+
120
+ writing_supervisor_prompt = ChatPromptTemplate.from_template(
121
+ """You are the supervisor of a team of writers and editors.
122
+
123
+ Your team's task is to construct a high quality draft of a social media post about the topic, using information and data that has been compiled for you.
124
+
125
+ Your team members include:
126
+ 1) 'post_creation' - This team member is in charge of using the collected data to draft the initial social media posts.
127
+ 2) 'post_editor' - This team member takes the latest draft and ensures that it fits the tone and style requirements set by a writing guide, editting the post if required.
128
+ 3) 'post_review' - This team member reviews the latest draft, ensuring all quality checks are met, sources are present, and determines if it meets the standards of the final post.
129
+
130
+ Your available actions are 'NEW DRAFT' to indicate that you want your team to compile a new draft post, or 'FINISH' to indicate that a sufficient final post has been created, and you wish to send your team's work back to your own supervisor.
131
+
132
+ You will have access to drafts that your team has created so far, as well as the final draft, if either exist. You will use the presence of the final post to determine if your team is finished. If you do not have a final post given to you, you cannot turn your team's work in to your supervisor.
133
+
134
+ You will also be given the latest comments from the post_review team member regarding the current status of the draft, including if anything needs changed or not.
135
+
136
+ If you do not have a final post given to you, you cannot turn your team's work in to your supervisor.
137
+
138
+ You can send a specific message or request to your supervisor when your next action is to send it back to them. When you are called to perform your own task by the project manager, they may have a helpful message or request of you.
139
+
140
+ Message from project manager: {message_from_manager}
141
+ Topic: {topic}
142
+ Drafts so far: {drafts}
143
+ Final Post: {final_draft}
144
+ Comments from Reviewer: {review_comments}
145
+
146
+ Output:
147
+ Next Action: ...
148
+ Message to project manager: ...
149
+ """
150
+ )
151
+
152
+ post_creation_prompt = ChatPromptTemplate.from_template(
153
+ """You are a writer on a team tasked with creating professional social media posts about mental health in the workplace.
154
+
155
+ You are tasked with creating drafts of posts that a marketing team can use either as they are, or as bases for posts on various social media sites.
156
+
157
+ You will be given a set of collected data and information that are related to the market team's current post idea, including but not limited to facts, statistics, quotes, articles, surveys, etc. This data will include sources.
158
+
159
+ You will also be given a set of previous drafts of the post, if any exist, so that you can iterate and improve on what was created previously, as well as the latest set of comments from the post_review team member, if there are any. These comments will help you improve over previous drafts.
160
+
161
+ The previous review comments will outline what was satisfactory about the last draft and what was not satisfactory. Take the comments into account to ensure that you do not make the same mistakes on the current draft.
162
+
163
+ Using the data, write a draft of a social media post about the topic. Do not make up new facts or data to base your post on, use only the data provided. Where possible, also include the source of the information you use in the post.
164
+
165
+ Topic: {topic}
166
+ Drafts so far: {drafts}
167
+ Collected data: {collected_data}
168
+ Comments from previous review: {review_comments}
169
+
170
+ Output:
171
+ (Draft post)
172
+ """
173
+ )
174
+
175
+ post_editor_prompt = ChatPromptTemplate.from_template(
176
+ """You are a writer on a team tasked with creating professional social media posts about mental health in the workplace.
177
+
178
+ You are tasked with ensuring that a current draft of a social media post follows your organization's style, tone, and voice guidelines as dictated by a style guide.
179
+
180
+ You will be given the current draft of the post, the organization's writing style guide to review, and any comments from the previous draft's review.
181
+
182
+ The previous review comments will outline what was satisfactory about the last draft and what was not satisfactory. Take the comments into account to ensure that you do not make the same mistakes on the current draft.
183
+
184
+ You are to rewrite the draft post, if required, to adhere the writing style and professional expectations outlined in the style guide. You **MUST** adhere to the style guide.
185
+ The underlying message and content of the post should remain unchanged. All factual information, quotes, or sources included in the post should be included in the rewrite.
186
+
187
+ Your output should not include any of your supplementary comments.
188
+
189
+ Current draft: {current_draft}
190
+ Style guide: {styleguide}
191
+ Comments from previous review: {review_comments}
192
+
193
+ Output:
194
+ (Updated draft)
195
+ """
196
+ )
197
+
198
+ post_review_prompt = ChatPromptTemplate.from_template(
199
+ """You are a writer on a team tasked with creating professional social media posts about mental health in the workplace.
200
+
201
+ You are tasked with determining if a current draft social media post is of sufficient quality to become the final draft.
202
+
203
+ You will be given the current draft of the post, and the organization's writing style guide.
204
+
205
+ Checking the draft's spelling and grammar, as well as the sentence strucutre, tone, and style against the writing guide, determine if the current draft of the post is acceptable.
206
+ If it is not, please give your reasoning why, and what should be changed.
207
+
208
+ Current draft: {current_draft}
209
+ Style guide: {styleguide}
210
+
211
+ Your output should be a json object in the follwoing format, but should **not** include the triple backticks or the 'json' label:
212
+ {{
213
+ "Draft Acceptable": (Yes/No),
214
+ "Comments on current draft": (comments)
215
+ }}
216
+ """
217
+ )