HEHEBOIOG commited on
Commit
3a94433
1 Parent(s): 057c42d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +174 -20
app.py CHANGED
@@ -1,11 +1,179 @@
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  def respond(
11
  message,
@@ -26,6 +194,8 @@ def respond(
26
  messages.append({"role": "user", "content": message})
27
 
28
  response = ""
 
 
29
 
30
  for message in client.chat_completion(
31
  messages,
@@ -35,29 +205,13 @@ def respond(
35
  top_p=top_p,
36
  ):
37
  token = message.choices[0].delta.content
38
-
39
  response += token
40
  yield response
41
 
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
  demo = gr.ChatInterface(
46
  respond,
47
  additional_inputs=[
48
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
60
-
61
-
62
- if __name__ == "__main__":
63
- demo.launch()
 
1
+ import os
2
  import gradio as gr
3
  from huggingface_hub import InferenceClient
4
+ from langchain_openai import ChatOpenAI
5
+ from crewai_tools import PDFSearchTool
6
+ from langchain_community.tools.tavily_search import TavilySearchResults
7
+ from crewai_tools import tool
8
+ from crewai import Crew, Task, Agent
9
+ from google.colab import userdata
10
+ from sentence_transformers import SentenceTransformer
11
+
12
+ os.environ['GROQ_API_KEY'] = userdata.get('GROQ_API_KEY')
13
+ os.environ['TAVILY_API_KEY'] = userdata.get('TAVILY_API_KEY')
14
 
 
 
 
15
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
16
 
17
+ llm = ChatOpenAI(
18
+ openai_api_base="https://api.groq.com/openai/v1",
19
+ openai_api_key=os.environ['GROQ_API_KEY'],
20
+ model_name="llama3-70b-8192",
21
+ temperature=0.1,
22
+ max_tokens=1000
23
+ )
24
+
25
+ rag_tool = PDFSearchTool(pdf='finance.pdf',
26
+ config=dict(
27
+ llm=dict(
28
+ provider="groq",
29
+ config=dict(
30
+ model="llama3-8b-8192",
31
+ ),
32
+ ),
33
+ embedder=dict(
34
+ provider="huggingface",
35
+ config=dict(
36
+ model="BAAI/bge-small-en-v1.5",
37
+ ),
38
+ ),
39
+ )
40
+ )
41
+
42
+ web_search_tool = TavilySearchResults(k=3)
43
+
44
+ @tool
45
+ def router_tool(question):
46
+ """Router Function"""
47
+ return 'web_search'
48
+
49
+ Router_Agent = Agent(
50
+ role='Router',
51
+ goal='Route user question to a vectorstore or web search',
52
+ backstory=(
53
+ "You are an expert at routing a user question to a vectorstore or web search."
54
+ "Use the vectorstore for questions on concept related to Retrieval-Augmented Generation."
55
+ "You do not need to be stringent with the keywords in the question related to these topics. Otherwise, use web-search."
56
+ ),
57
+ verbose=True,
58
+ allow_delegation=False,
59
+ llm=llm,
60
+ )
61
+
62
+ Retriever_Agent = Agent(
63
+ role="Retriever",
64
+ goal="Use the information retrieved from the vectorstore to answer the question",
65
+ backstory=(
66
+ "You are an assistant for question-answering tasks."
67
+ "Use the information present in the retrieved context to answer the question."
68
+ "You have to provide a clear concise answer."
69
+ ),
70
+ verbose=True,
71
+ allow_delegation=False,
72
+ llm=llm,
73
+ )
74
+
75
+ Grader_agent = Agent(
76
+ role='Answer Grader',
77
+ goal='Filter out erroneous retrievals',
78
+ backstory=(
79
+ "You are a grader assessing relevance of a retrieved document to a user question."
80
+ "If the document contains keywords related to the user question, grade it as relevant."
81
+ "It does not need to be a stringent test.You have to make sure that the answer is relevant to the question."
82
+ ),
83
+ verbose=True,
84
+ allow_delegation=False,
85
+ llm=llm,
86
+ )
87
+
88
+ hallucination_grader = Agent(
89
+ role="Hallucination Grader",
90
+ goal="Filter out hallucination",
91
+ backstory=(
92
+ "You are a hallucination grader assessing whether an answer is grounded in / supported by a set of facts."
93
+ "Make sure you meticulously review the answer and check if the response provided is in alignment with the question asked"
94
+ ),
95
+ verbose=True,
96
+ allow_delegation=False,
97
+ llm=llm,
98
+ )
99
+
100
+ answer_grader = Agent(
101
+ role="Answer Grader",
102
+ goal="Filter out hallucination from the answer.",
103
+ backstory=(
104
+ "You are a grader assessing whether an answer is useful to resolve a question."
105
+ "Make sure you meticulously review the answer and check if it makes sense for the question asked"
106
+ "If the answer is relevant generate a clear and concise response."
107
+ "If the answer generated is not relevant then perform a websearch using 'web_search_tool'"
108
+ ),
109
+ verbose=True,
110
+ allow_delegation=False,
111
+ llm=llm,
112
+ )
113
+
114
+ router_task = Task(
115
+ description=("Analyse the keywords in the question {question}"
116
+ "Based on the keywords decide whether it is eligible for a vectorstore search or a web search."
117
+ "Return a single word 'vectorstore' if it is eligible for vectorstore search."
118
+ "Return a single word 'websearch' if it is eligible for web search."
119
+ "Do not provide any other preamble or explanation."
120
+ ),
121
+ expected_output=("Give a binary choice 'websearch' or 'vectorstore' based on the question"
122
+ "Do not provide any other preamble or explanation."),
123
+ agent=Router_Agent,
124
+ tools=[router_tool],
125
+ )
126
+
127
+ retriever_task = Task(
128
+ description=("Based on the response from the router task extract information for the question {question} with the help of the respective tool."
129
+ "Use the web_search_tool to retrieve information from the web in case the router task output is 'websearch'."
130
+ "Use the rag_tool to retrieve information from the vectorstore in case the router task output is 'vectorstore'."
131
+ ),
132
+ expected_output=("You should analyse the output of the 'router_task'"
133
+ "If the response is 'websearch' then use the web_search_tool to retrieve information from the web."
134
+ "If the response is 'vectorstore' then use the rag_tool to retrieve information from the vectorstore."
135
+ "Return a clear and concise text as response."),
136
+ agent=Retriever_Agent,
137
+ context=[router_task],
138
+ )
139
+
140
+ grader_task = Task(
141
+ description=("Based on the response from the retriever task for the question {question} evaluate whether the retrieved content is relevant to the question."
142
+ ),
143
+ expected_output=("Binary score 'yes' or 'no' score to indicate whether the document is relevant to the question"
144
+ "You must answer 'yes' if the response from the 'retriever_task' is in alignment with the question asked."
145
+ "You must answer 'no' if the response from the 'retriever_task' is not in alignment with the question asked."
146
+ "Do not provide any preamble or explanations except for 'yes' or 'no'."),
147
+ agent=Grader_agent,
148
+ context=[retriever_task],
149
+ )
150
+
151
+ hallucination_task = Task(
152
+ description=("Based on the response from the grader task for the question {question} evaluate whether the answer is grounded in / supported by a set of facts."),
153
+ expected_output=("Binary score 'yes' or 'no' score to indicate whether the answer is sync with the question asked"
154
+ "Respond 'yes' if the answer is useful and contains fact about the question asked."
155
+ "Respond 'no' if the answer is not useful and does not contains fact about the question asked."
156
+ "Do not provide any preamble or explanations except for 'yes' or 'no'."),
157
+ agent=hallucination_grader,
158
+ context=[grader_task],
159
+ )
160
+
161
+ answer_task = Task(
162
+ description=("Based on the response from the hallucination task for the question {question} evaluate whether the answer is useful to resolve the question."
163
+ "If the answer is 'yes' return a clear and concise answer."
164
+ "If the answer is 'no' then perform a 'websearch' and return the response"),
165
+ expected_output=("Return a clear and concise response if the response from 'hallucination_task' is 'yes'."
166
+ "Perform a web search using 'web_search_tool' and return a clear and concise response only if the response from 'hallucination_task' is 'no'."
167
+ "Otherwise respond as 'Sorry! unable to find a valid response'."),
168
+ context=[hallucination_task],
169
+ agent=answer_grader,
170
+ )
171
+
172
+ rag_crew = Crew(
173
+ agents=[Router_Agent, Retriever_Agent, Grader_agent, hallucination_grader, answer_grader],
174
+ tasks=[router_task, retriever_task, grader_task, hallucination_task, answer_task],
175
+ verbose=True,
176
+ )
177
 
178
  def respond(
179
  message,
 
194
  messages.append({"role": "user", "content": message})
195
 
196
  response = ""
197
+ inputs = {"question": message}
198
+ result = rag_crew.kickoff(inputs=inputs)
199
 
200
  for message in client.chat_completion(
201
  messages,
 
205
  top_p=top_p,
206
  ):
207
  token = message.choices[0].delta.content
 
208
  response += token
209
  yield response
210
 
 
 
 
211
  demo = gr.ChatInterface(
212
  respond,
213
  additional_inputs=[
214
  gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
215
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
216
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
217
+ gr.S