rohan13 commited on
Commit
028ac25
1 Parent(s): 440deef

Using csv agent, some code for custom vector store based agents too.

Browse files
Files changed (4) hide show
  1. app.py +40 -4
  2. grader.py +7 -4
  3. grader_qa.py +139 -91
  4. utils.py +63 -0
app.py CHANGED
@@ -1,7 +1,9 @@
1
  import asyncio
2
  import glob
3
  import os
 
4
  import time
 
5
 
6
  import gradio as gr
7
  from dotenv import load_dotenv
@@ -40,13 +42,18 @@ def run_model(text):
40
  global grader, grader_qa
41
  start_time = time.time()
42
  print("start time:" + str(start_time))
43
- response = grader_qa.chain(text)
 
 
 
 
 
44
  sources = []
45
- for document in response['source_documents']:
46
- sources.append(str(document.metadata))
47
 
48
  source = ','.join(set(sources))
49
- response = response['answer'] + '\nSources: ' + str(len(sources))
50
  end_time = time.time()
51
  # # If response contains string `SOURCES:`, then add a \n before `SOURCES`
52
  # if "SOURCES:" in response:
@@ -171,6 +178,31 @@ def reset_data(history):
171
  return history
172
 
173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
  def bot(history):
175
  return get_grading_status(history)
176
 
@@ -203,6 +235,7 @@ with gr.Blocks() as demo:
203
  label="Ask questions about how students did on the discussion",
204
  placeholder="Enter text and press enter, or upload an image", lines=1
205
  )
 
206
  ask = gr.Button(value="Ask", variant="secondary", scale=1)
207
 
208
  chatbot.value = get_first_message([])
@@ -231,6 +264,9 @@ with gr.Blocks() as demo:
231
  reset.click(reset_data, inputs=[chatbot], outputs=[chatbot], postprocess=False, show_progress=True, ).success(
232
  bot, chatbot, chatbot)
233
 
 
 
 
234
  if __name__ == "__main__":
235
  demo.queue()
236
  demo.queue(concurrency_count=5)
 
1
  import asyncio
2
  import glob
3
  import os
4
+ import shutil
5
  import time
6
+ import traceback
7
 
8
  import gradio as gr
9
  from dotenv import load_dotenv
 
42
  global grader, grader_qa
43
  start_time = time.time()
44
  print("start time:" + str(start_time))
45
+ try:
46
+ response = grader_qa.agent.run(text)
47
+ except Exception as e:
48
+ response = "I need a break. Please ask me again in a few minutes"
49
+ print(traceback.format_exc())
50
+
51
  sources = []
52
+ # for document in response['source_documents']:
53
+ # sources.append(str(document.metadata))
54
 
55
  source = ','.join(set(sources))
56
+ # response = response['answer'] + '\nSources: ' + str(len(sources))
57
  end_time = time.time()
58
  # # If response contains string `SOURCES:`, then add a \n before `SOURCES`
59
  # if "SOURCES:" in response:
 
178
  return history
179
 
180
 
181
+ def get_output_dir(orig_name):
182
+ script_dir = os.path.dirname(os.path.abspath(__file__))
183
+ output_dir = os.path.join(script_dir, 'output', orig_name)
184
+ return output_dir
185
+
186
+
187
+ def upload_grading_results(file, history):
188
+ global grader, grader_qa
189
+ # Delete output folder and save the file in output folder
190
+ if os.path.isdir('output'):
191
+ shutil.rmtree('output')
192
+ os.mkdir('output')
193
+ if os.path.isdir('vector_stores'):
194
+ shutil.rmtree('vector_stores')
195
+ os.mkdir('vector_stores')
196
+ # get current path
197
+ path = os.path.join("output", os.path.basename(file.name))
198
+ # Copy the uploaded file from its temporary location to the desired location
199
+ shutil.copyfile(file.name, path)
200
+ grader = Grader(qa_model)
201
+ grader_qa = GraderQA(grader, embeddings)
202
+ history = [(None, 'Grading results uploaded successfully')]
203
+ return history
204
+
205
+
206
  def bot(history):
207
  return get_grading_status(history)
208
 
 
235
  label="Ask questions about how students did on the discussion",
236
  placeholder="Enter text and press enter, or upload an image", lines=1
237
  )
238
+ upload = gr.UploadButton(label="Upload grading results", type="file", file_types=["csv"], scale=0.5)
239
  ask = gr.Button(value="Ask", variant="secondary", scale=1)
240
 
241
  chatbot.value = get_first_message([])
 
264
  reset.click(reset_data, inputs=[chatbot], outputs=[chatbot], postprocess=False, show_progress=True, ).success(
265
  bot, chatbot, chatbot)
266
 
267
+ upload.upload(upload_grading_results, inputs=[upload, chatbot], outputs=[chatbot], postprocess=False, ).then(
268
+ bot, chatbot, chatbot)
269
+
270
  if __name__ == "__main__":
271
  demo.queue()
272
  demo.queue(concurrency_count=5)
grader.py CHANGED
@@ -25,7 +25,8 @@ class Grader:
25
  self.model = model
26
  self.rubric_file = 'docs/rubric_data.json'
27
  self.discussions_file_path = "docs/discussion_entries.json"
28
- self.fieldnames = ['student_name', 'total_score', 'student_feedback', 'grader_comments', 'summary']
 
29
  self.docs = self.get_html_files()
30
  self.llm = ChatOpenAI(temperature=0, model_name=model)
31
  self.parser: PydanticOutputParser = self.create_parser()
@@ -42,16 +43,18 @@ class Grader:
42
  class ToolArgsSchema(BaseModel):
43
  student_name: Optional[str] = Field(description="The name of the student")
44
  total_score: int = Field(description="The grade of the student's answer")
45
- student_feedback: Optional[str] = Field(
46
- description="The developmental feedback from Grader's point of view to the student, some examples are: 'Great work, ...', 'Although, your submission is relevant to the question, it doesn't answer the question entirely...'. Give customized feedback based on student's answer")
47
  grader_comments: Optional[str] = Field(
48
  description="The grade split breakup based on rubric added as grader's one liner customized comments to explain how the grade was calculated for that particular student's answer")
 
 
49
  summary: Optional[str] = Field(
50
  description="The overall summary of the student's answer outlining key points from the student's answer based on the rubric which can be used as a portion of a vectorstore, used to answer summary based questions about all the discussions")
51
 
52
  class Config:
53
  schema_extra = {
54
- "required": ["student_name", "total_score", "student_feedback", "grader_comments", "summary"]
 
55
  }
56
 
57
  def create_parser(self):
 
25
  self.model = model
26
  self.rubric_file = 'docs/rubric_data.json'
27
  self.discussions_file_path = "docs/discussion_entries.json"
28
+ self.fieldnames = ['student_name', 'total_score', 'score_breakdown', 'grader_comments', 'student_feedback',
29
+ 'summary']
30
  self.docs = self.get_html_files()
31
  self.llm = ChatOpenAI(temperature=0, model_name=model)
32
  self.parser: PydanticOutputParser = self.create_parser()
 
43
  class ToolArgsSchema(BaseModel):
44
  student_name: Optional[str] = Field(description="The name of the student")
45
  total_score: int = Field(description="The grade of the student's answer")
46
+ score_breakdown: Optional[str] = Field(description="The grade split breakup based on rubric")
 
47
  grader_comments: Optional[str] = Field(
48
  description="The grade split breakup based on rubric added as grader's one liner customized comments to explain how the grade was calculated for that particular student's answer")
49
+ student_feedback: Optional[str] = Field(
50
+ description="The developmental feedback from Grader's point of view to the student, some examples are: 'Great work, ...', 'Although, your submission is relevant to the question, it doesn't answer the question entirely...'. Give customized feedback based on student's answer")
51
  summary: Optional[str] = Field(
52
  description="The overall summary of the student's answer outlining key points from the student's answer based on the rubric which can be used as a portion of a vectorstore, used to answer summary based questions about all the discussions")
53
 
54
  class Config:
55
  schema_extra = {
56
+ "required": ["student_name", "total_score", "score_breakdown", "grader_comments", "student_feedback",
57
+ "summary"]
58
  }
59
 
60
  def create_parser(self):
grader_qa.py CHANGED
@@ -1,19 +1,13 @@
1
- import os
2
-
3
  from langchain import FAISS
4
- from langchain.chains import ConversationalRetrievalChain
5
- from langchain.chat_models import ChatOpenAI
6
- from langchain.document_loaders import CSVLoader
 
7
  from langchain.memory import ConversationBufferMemory
8
  from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
9
- from langchain.text_splitter import RecursiveCharacterTextSplitter
10
-
11
 
12
- def search_index_from_docs(source_chunks, embeddings):
13
- # print("source chunks: " + str(len(source_chunks)))
14
- # print("embeddings: " + str(embeddings))
15
- search_index = FAISS.from_documents(source_chunks, embeddings)
16
- return search_index
17
 
18
 
19
  def get_chat_history(inputs) -> str:
@@ -27,108 +21,143 @@ class GraderQA:
27
  def __init__(self, grader, embeddings):
28
  self.grader = grader
29
  self.llm = self.grader.llm
30
- self.index_file = "vector_stores/canvas-discussions.faiss"
31
- self.pickle_file = "vector_stores/canvas-discussions.pkl"
 
 
 
 
 
 
 
 
32
  self.rubric_text = grader.rubric_text
33
- self.search_index = self.get_search_index(embeddings)
 
34
  self.chain = self.create_chain(embeddings)
 
 
 
 
 
 
 
35
  self.tokens = None
36
  self.question = None
37
 
 
 
 
38
  def get_search_index(self, embeddings):
39
- if os.path.isfile(self.pickle_file) and os.path.isfile(self.index_file) and os.path.getsize(
40
- self.pickle_file) > 0:
41
  # Load index from pickle file
42
- search_index = self.load_index(embeddings)
43
  else:
44
- search_index = self.create_index(embeddings)
45
  print("Created index")
46
  return search_index
47
 
48
- def load_index(self, embeddings):
49
- # Load index
50
- db = FAISS.load_local(
51
- folder_path="vector_stores/",
52
- index_name="canvas-discussions", embeddings=embeddings,
53
- )
54
- print("Loaded index")
55
- return db
56
-
57
- def create_index(self, embeddings):
58
- source_chunks = self.create_chunk_documents()
59
- search_index = search_index_from_docs(source_chunks, embeddings)
60
- FAISS.save_local(search_index, folder_path="vector_stores/", index_name="canvas-discussions")
61
  return search_index
62
 
63
- def create_chunk_documents(self):
64
- sources = self.fetch_data_for_embeddings()
65
-
66
- splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
67
-
68
- source_chunks = splitter.split_documents(sources)
69
 
70
- print("chunks: " + str(len(source_chunks)))
71
- print("sources: " + str(len(sources)))
 
 
72
 
73
- return source_chunks
 
 
 
 
 
74
 
75
- def fetch_data_for_embeddings(self):
76
- document_list = self.get_csv_files()
77
- print("document list: " + str(len(document_list)))
78
- return document_list
 
 
 
79
 
80
- def get_csv_files(self):
81
- loader = CSVLoader(file_path=self.grader.csv, source_column="student_name")
82
- document_list = loader.load()
83
- return document_list
84
 
85
- def create_chain(self, embeddings):
86
- if not self.search_index:
87
- self.search_index = self.load_index(embeddings)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
 
89
- question_prompt, combine_prompt = self.create_map_reduce_prompt()
90
- # create agent, 1 chain for summary based question, 2nd chain for semantic retrieval based question
 
 
 
 
 
 
 
 
 
 
91
 
92
- chain = ConversationalRetrievalChain.from_llm(llm=self.llm, chain_type='map_reduce',
93
- retriever=self.search_index.as_retriever(search_type='mmr',
94
- search_kwargs={
95
- 'lambda_mult': 1,
96
- 'fetch_k': 50,
97
- 'k': 30}),
98
- return_source_documents=True,
99
- verbose=True,
100
- memory=ConversationBufferMemory(memory_key='chat_history',
101
- return_messages=True,
102
- output_key='answer'),
103
- condense_question_llm=ChatOpenAI(temperature=0,
104
- model='gpt-3.5-turbo'),
105
- combine_docs_chain_kwargs={"question_prompt": question_prompt,
106
- "combine_prompt": combine_prompt})
107
- return chain
108
 
109
  def create_map_reduce_prompt(self):
110
- system_template = f"""Use the following portion of a long grading results document to answer the question BUT ONLY FOR THE STUDENT MENTIONED. Use the following examples to take guidance on how to answer the question.
 
 
 
 
 
111
  Examples:
112
  Question: How many students participated in the discussion?
 
113
  Answer: This student participated in the discussion./This student did not participate in the discussion.
114
  Question: What was the average score for the discussion?
 
115
  Answer: This student received a score of 10/10 for the discussion.
116
  Question: How many students received a full score?/How many students did not receive a full score?
 
117
  Answer: This student received a full score./This student did not receive a full score.
118
  Question: How many students lost marks in X category of the rubric?
 
119
  Answer: This student lost marks in X category of the rubric./This student did not lose marks in X category of the rubric.
120
  Question: Give me 3 best responses received for the discussion.
 
121
  Answer: This student gave the following responses for the discussion and received a score of 10/10.
122
-
123
-
124
- ______________________
125
- Grading Result For:
126
- {{context}}
127
  ______________________
128
- Following are the instructions and rubric of the discussion post for reference, used to grade the discussion.
129
- ----------------
130
- Instructions and Rubric:
131
- {self.rubric_text}
132
  """
133
  messages = [
134
  SystemMessagePromptTemplate.from_template(system_template),
@@ -136,7 +165,7 @@ class GraderQA:
136
  ]
137
  CHAT_QUESTION_PROMPT = ChatPromptTemplate.from_messages(messages)
138
  system_template = """You are Canvas Discussions Grading + Feedback QA Bot. Have a conversation with a human, answering the questions about the grading results, feedback, answers as accurately as possible.
139
- Use the following answers for each student to answer the users question as accurately as possible.
140
  You are an expert at basic calculations and answering questions on grading results and can answer the following questions with ease.
141
  If you don't know the answer, just say that you don't know. Don't try to make up an answer.
142
  ______________________
@@ -150,8 +179,8 @@ class GraderQA:
150
 
151
  def create_prompt(self):
152
  system_template = f"""You are Canvas Discussions Grading + Feedback QA Bot. Have a conversation with a human, answering the questions about the grading results, feedback, answers as accurately as possible.
153
- You are a grading assistant who graded the canvas discussions to create the following grading results and feedback.
154
- Use the following instruction, rubric of the discussion which were used to grade the discussions and refine the answer if needed.
155
  ----------------
156
  {self.rubric_text}
157
  ----------------
@@ -165,24 +194,43 @@ class GraderQA:
165
 
166
  def get_tokens(self):
167
  total_tokens = 0
168
- for doc in self.docs:
169
- chat_prompt = self.prompt.format(context=doc, question=self.question)
170
-
171
- num_tokens = self.llm.get_num_tokens(chat_prompt)
172
- total_tokens += num_tokens
173
 
174
- # summary = self.llm(summary_prompt)
175
 
176
- # print (f"Summary: {summary.strip()}")
177
- # print ("\n")
178
  return total_tokens
179
 
180
  def run_qa_chain(self, question):
 
 
 
 
 
 
181
  self.question = question
182
  self.get_tokens()
183
  answer = self.chain(question)
184
  return answer
185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  # system_template = """You are Canvas Discussions Grading + Feedback QA Bot. Have a conversation with a human, answering the following questions as best you can.
187
  # You are a grading assistant who graded the canvas discussions to create the following grading results and feedback. Use the following pieces of the grading results and feedback to answer the users question.
188
  # Use the following pieces of context to answer the users question.
 
 
 
1
  from langchain import FAISS
2
+ from langchain import LLMMathChain
3
+ from langchain.agents import AgentType, create_csv_agent
4
+ from langchain.chains import RetrievalQA
5
+ from langchain.chains.question_answering import load_qa_chain
6
  from langchain.memory import ConversationBufferMemory
7
  from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate
8
+ from langchain.tools import Tool
 
9
 
10
+ import utils
 
 
 
 
11
 
12
 
13
  def get_chat_history(inputs) -> str:
 
21
  def __init__(self, grader, embeddings):
22
  self.grader = grader
23
  self.llm = self.grader.llm
24
+ self.folder_path = "vector_stores/"
25
+ self.summary_index_name = "canvas-discussions-summary"
26
+ self.summary_index_file = "vector_stores/canvas-discussions-summary.faiss"
27
+ self.summary_pickle_file = "vector_stores/canvas-discussions-summary.pkl"
28
+ self.qa_index_name = "canvas-discussions-qa"
29
+ self.qa_index_file = "vector_stores/canvas-discussions-qa.faiss"
30
+ self.qa_pickle_file = "vector_stores/canvas-discussions-qa.pkl"
31
+ self.summary_docs = utils.get_csv_files(self.grader.csv, source_column='student_name')
32
+ self.qa_docs = utils.get_csv_files(self.grader.csv, source_column='student_name',
33
+ field_names=['student_name', 'total_score', 'score_breakdown'])
34
  self.rubric_text = grader.rubric_text
35
+ self.summary_index = self.get_search_index(embeddings)
36
+ self.qa_index = self.get_qa_index(embeddings)
37
  self.chain = self.create_chain(embeddings)
38
+ self.qa_chain = self.create_qa_chain()
39
+ self.math_chain = self.create_math_chain()
40
+ self.tools = self.get_tools()
41
+ self.memory = ConversationBufferMemory(memory_key='chat_history',
42
+ return_messages=True,
43
+ output_key='answer')
44
+ self.agent = self.create_agent()
45
  self.tokens = None
46
  self.question = None
47
 
48
+ def load_all_indexes(self, embeddings):
49
+ return self.get_search_index(embeddings), self.get_qa_index(embeddings)
50
+
51
  def get_search_index(self, embeddings):
52
+ if utils.index_exists(self.summary_pickle_file, self.summary_index_file):
 
53
  # Load index from pickle file
54
+ search_index = utils.load_index(self.folder_path, self.summary_index_name, embeddings)
55
  else:
56
+ search_index = utils.create_index(self.folder_path, self.summary_index_name, embeddings, self.summary_docs)
57
  print("Created index")
58
  return search_index
59
 
60
+ def get_qa_index(self, embeddings):
61
+ if utils.index_exists(self.qa_pickle_file, self.qa_index_file):
62
+ # Load index from pickle file
63
+ search_index = utils.load_index(self.folder_path, self.qa_index_name, embeddings)
64
+ else:
65
+ search_index = utils.create_index(self.folder_path, self.qa_index_name, embeddings, self.qa_docs)
66
+ print("Created index")
 
 
 
 
 
 
67
  return search_index
68
 
69
+ def create_chain(self, embeddings):
70
+ if not self.summary_index:
71
+ self.summary_index = self.get_search_index(embeddings)
 
 
 
72
 
73
+ question_prompt, combine_prompt = self.create_map_reduce_prompt()
74
+ # create agent, 1 chain for summary based question, 2nd chain for semantic retrieval based question
75
+ qa_chain = load_qa_chain(self.llm, chain_type="map_reduce", question_prompt=question_prompt,
76
+ combine_prompt=combine_prompt, verbose=True)
77
 
78
+ chain = RetrievalQA(combine_documents_chain=qa_chain,
79
+ retriever=self.summary_index.as_retriever(search_type='mmr',
80
+ search_kwargs={'lambda_mult': 1, 'fetch_k': 50,
81
+ 'k': 30}),
82
+ return_source_documents=True, verbose=True, )
83
+ return chain
84
 
85
+ def create_qa_chain(self):
86
+ qa = RetrievalQA.from_chain_type(llm=self.llm, chain_type="stuff",
87
+ retriever=self.qa_index.as_retriever(search_type='mmr',
88
+ search_kwargs={'lambda_mult': 1,
89
+ 'fetch_k': 50,
90
+ 'k': 30}), verbose=True)
91
+ return qa
92
 
93
+ def create_math_chain(self):
94
+ return LLMMathChain.from_llm(llm=self.llm, verbose=True)
 
 
95
 
96
+ def get_tools(self):
97
+ tools = [
98
+ Tool(
99
+ name="Grading Score Results",
100
+ func=self.run_qa_chain,
101
+ description="useful when you need to answer questions related to GRADES, SCORING or SCORE BREAKDOWN(INDIVIDUAL OR OVERALL) based questions from the grading results of the canvas discussion. Use this more often because this has a higher accuracy about the SCORING and GRADES of the students."
102
+ ),
103
+ Tool(
104
+ name="Summary",
105
+ func=self.run_summary_chain,
106
+ description="useful when you need to answer summary based questions for all students' grading results for the canvas discussion where the question is complicated and ONLY WHEN the answer is not directly available in the grading score results"
107
+ ),
108
+ Tool(
109
+ name="Calculator",
110
+ func=self.run_math_chain,
111
+ description="Useful for when you need to compute mathematical expressions"
112
+ )
113
+ ]
114
+ return tools
115
 
116
+ def create_agent(self):
117
+ # Initialize a Conversational Agent with the existing chain as a tool
118
+ # planner = load_chat_planner(self.llm)
119
+ #
120
+ # # agent = initialize_agent(self.tools, self.llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=self.memory)
121
+ # executor = load_agent_executor(self.llm,self.tools, verbose=True)
122
+ #
123
+ #
124
+ # agent = PlanAndExecute(planner=planner, executor=executor, verbose=True)
125
+ # agent = initialize_agent(
126
+ # self.tools, self.llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
127
+ # )
128
 
129
+ agent = create_csv_agent(
130
+ self.llm,
131
+ self.grader.csv,
132
+ verbose=True,
133
+ agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
134
+ )
135
+ return agent
 
 
 
 
 
 
 
 
 
136
 
137
  def create_map_reduce_prompt(self):
138
+ system_template = f"""Use the following student's grading result document to answer a summary based question. The question will always be related to the overall grading results, feedback, score, summary of student responses for the discussion. But the answer will ALWAYS be specific to the student based on the question. There are examples to help you understand how to answer the question.
139
+ ______________________
140
+ Grading Result For:
141
+ {{context}}
142
+ ______________________
143
+ Use the following examples to take guidance on how to answer the question.
144
  Examples:
145
  Question: How many students participated in the discussion?
146
+ Rephrased question: Did this student participate in the discussion?
147
  Answer: This student participated in the discussion./This student did not participate in the discussion.
148
  Question: What was the average score for the discussion?
149
+ Rephrased question: What was the score for this student for the discussion?
150
  Answer: This student received a score of 10/10 for the discussion.
151
  Question: How many students received a full score?/How many students did not receive a full score?
152
+ Rephrased question: Did this student receive a full score?
153
  Answer: This student received a full score./This student did not receive a full score.
154
  Question: How many students lost marks in X category of the rubric?
155
+ Rephrased question: Did this student lose marks in X category of the rubric?
156
  Answer: This student lost marks in X category of the rubric./This student did not lose marks in X category of the rubric.
157
  Question: Give me 3 best responses received for the discussion.
158
+ Rephrased question: What were the 3 best responses received for the discussion?
159
  Answer: This student gave the following responses for the discussion and received a score of 10/10.
 
 
 
 
 
160
  ______________________
 
 
 
 
161
  """
162
  messages = [
163
  SystemMessagePromptTemplate.from_template(system_template),
 
165
  ]
166
  CHAT_QUESTION_PROMPT = ChatPromptTemplate.from_messages(messages)
167
  system_template = """You are Canvas Discussions Grading + Feedback QA Bot. Have a conversation with a human, answering the questions about the grading results, feedback, answers as accurately as possible.
168
+ Use the following answers for each student to answer the users question as accurately as possible.
169
  You are an expert at basic calculations and answering questions on grading results and can answer the following questions with ease.
170
  If you don't know the answer, just say that you don't know. Don't try to make up an answer.
171
  ______________________
 
179
 
180
  def create_prompt(self):
181
  system_template = f"""You are Canvas Discussions Grading + Feedback QA Bot. Have a conversation with a human, answering the questions about the grading results, feedback, answers as accurately as possible.
182
+ You are a grading assistant who graded the canvas discussions to create the following grading results and feedback.
183
+ Use the following instruction, rubric of the discussion which were used to grade the discussions and refine the answer if needed.
184
  ----------------
185
  {self.rubric_text}
186
  ----------------
 
194
 
195
  def get_tokens(self):
196
  total_tokens = 0
197
+ # for doc in self.docs:
198
+ # chat_prompt = self.prompt.format(context=doc, question=self.question)
199
+ #
200
+ # num_tokens = self.llm.get_num_tokens(chat_prompt)
201
+ # total_tokens += num_tokens
202
 
203
+ # summary = self.llm(summary_prompt)
204
 
205
+ # print (f"Summary: {summary.strip()}")
206
+ # print ("\n")
207
  return total_tokens
208
 
209
  def run_qa_chain(self, question):
210
+ self.question = question
211
+ self.get_tokens()
212
+ answer = self.qa_chain.run(question)
213
+ return answer
214
+
215
+ def run_summary_chain(self, question):
216
  self.question = question
217
  self.get_tokens()
218
  answer = self.chain(question)
219
  return answer
220
 
221
+ def run_math_chain(self, question):
222
+ self.question = question
223
+ self.get_tokens()
224
+ answer = self.math_chain.run(question)
225
+ return answer
226
+
227
+
228
+ def search_index_from_docs(source_chunks, embeddings):
229
+ # print("source chunks: " + str(len(source_chunks)))
230
+ # print("embeddings: " + str(embeddings))
231
+ search_index = FAISS.from_documents(source_chunks, embeddings)
232
+ return search_index
233
+
234
  # system_template = """You are Canvas Discussions Grading + Feedback QA Bot. Have a conversation with a human, answering the following questions as best you can.
235
  # You are a grading assistant who graded the canvas discussions to create the following grading results and feedback. Use the following pieces of the grading results and feedback to answer the users question.
236
  # Use the following pieces of context to answer the users question.
utils.py CHANGED
@@ -2,6 +2,11 @@ import os
2
  import shutil
3
  import time
4
 
 
 
 
 
 
5
 
6
  def reset_folder(destination):
7
  # synchrnously and recursively delete the destination folder and all its contents, donot return until done
@@ -12,3 +17,61 @@ def reset_folder(destination):
12
  os.mkdir(destination)
13
  while not os.path.isdir(destination):
14
  time.sleep(4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import shutil
3
  import time
4
 
5
+ from langchain import FAISS
6
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
7
+
8
+ from custom_csv_loader import CSVLoader
9
+
10
 
11
  def reset_folder(destination):
12
  # synchrnously and recursively delete the destination folder and all its contents, donot return until done
 
17
  os.mkdir(destination)
18
  while not os.path.isdir(destination):
19
  time.sleep(4)
20
+
21
+
22
+ def search_index_from_docs(source_chunks, embeddings):
23
+ # print("source chunks: " + str(len(source_chunks)))
24
+ # print("embeddings: " + str(embeddings))
25
+ search_index = FAISS.from_documents(source_chunks, embeddings)
26
+ return search_index
27
+
28
+
29
+ def load_index(folder_path, index_name, embeddings):
30
+ # Load index
31
+ db = FAISS.load_local(
32
+ folder_path=folder_path,
33
+ index_name=index_name, embeddings=embeddings,
34
+ )
35
+ print("Loaded index")
36
+ return db
37
+
38
+
39
+ def fetch_data_for_embeddings(document_list):
40
+ print("document list: " + str(len(document_list)))
41
+ return document_list
42
+
43
+
44
+ def create_chunk_documents(document_list):
45
+ sources = fetch_data_for_embeddings(document_list)
46
+
47
+ splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
48
+
49
+ source_chunks = splitter.split_documents(sources)
50
+
51
+ print("chunks: " + str(len(source_chunks)))
52
+ print("sources: " + str(len(sources)))
53
+
54
+ return source_chunks
55
+
56
+
57
+ def create_index(folder_path, index_name, embeddings, document_list):
58
+ source_chunks = create_chunk_documents(document_list)
59
+ search_index = search_index_from_docs(source_chunks, embeddings)
60
+ FAISS.save_local(search_index, folder_path=folder_path, index_name=index_name)
61
+ return search_index
62
+
63
+
64
+ def get_csv_files(csv_file, source_column, field_names=None):
65
+ loader = None
66
+ if field_names:
67
+ loader = CSVLoader(file_path=csv_file, source_column=source_column,
68
+ csv_args={'fieldnames': field_names, 'restkey': 'restkey'})
69
+ else:
70
+ loader = CSVLoader(file_path=csv_file, source_column=source_column, )
71
+ document_list = loader.load()
72
+ return document_list
73
+
74
+
75
+ def index_exists(pickle_file, index_file):
76
+ return os.path.isfile(pickle_file) and os.path.isfile(index_file) and os.path.getsize(
77
+ pickle_file) > 0