ak3ra commited on
Commit
660cea6
1 Parent(s): c6040d0

modified the chatui

Browse files
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ rag/__pycache__/rag_pipeline.cpython-311.pyc
2
+ utils/__pycache__/prompts.cpython-311.pyc
app.py CHANGED
@@ -1,8 +1,8 @@
1
  import gradio as gr
2
- import json
3
  from rag.rag_pipeline import RAGPipeline
4
  from utils.prompts import highlight_prompt, evidence_based_prompt, sample_questions
5
  from config import STUDY_FILES
 
6
 
7
  # Cache for RAG pipelines
8
  rag_cache = {}
@@ -20,14 +20,11 @@ def get_rag_pipeline(study_name):
20
 
21
  def chat_function(message, history, study_name, prompt_type):
22
  rag = get_rag_pipeline(study_name)
23
-
24
- if prompt_type == "Highlight":
25
- prompt = highlight_prompt
26
- elif prompt_type == "Evidence-based":
27
- prompt = evidence_based_prompt
28
- else:
29
- prompt = None
30
-
31
  response = rag.query(message, prompt_template=prompt)
32
  return response.response
33
 
@@ -42,26 +39,33 @@ def get_study_info(study_name):
42
  return "Invalid study name"
43
 
44
 
45
- with gr.Blocks() as demo:
46
  gr.Markdown("# RAG Pipeline Demo")
47
 
48
  with gr.Row():
49
- study_dropdown = gr.Dropdown(
50
- choices=list(STUDY_FILES.keys()),
51
- label="Select Study",
52
- value=list(STUDY_FILES.keys())[0],
53
- )
54
- study_info = gr.Markdown()
55
-
56
- prompt_type = gr.Radio(
57
- ["Default", "Highlight", "Evidence-based"],
58
- label="Prompt Type",
59
- value="Default",
60
- )
 
 
 
 
 
 
 
 
 
61
 
62
- chatbot = gr.Chatbot()
63
- msg = gr.Textbox()
64
- clear = gr.Button("Clear")
65
 
66
  def user(user_message, history):
67
  return "", history + [[user_message, None]]
@@ -75,15 +79,16 @@ with gr.Blocks() as demo:
75
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
76
  bot, [chatbot, study_dropdown, prompt_type], chatbot
77
  )
 
 
 
78
  clear.click(lambda: None, None, chatbot, queue=False)
79
 
80
  study_dropdown.change(
81
  fn=get_study_info,
82
  inputs=study_dropdown,
83
  outputs=study_info,
84
- ).then(lambda: None, None, chatbot, queue=False)
85
-
86
- gr.Examples(examples=sample_questions[list(STUDY_FILES.keys())[0]], inputs=msg)
87
 
88
  if __name__ == "__main__":
89
  demo.launch(share=True, debug=True)
 
1
  import gradio as gr
 
2
  from rag.rag_pipeline import RAGPipeline
3
  from utils.prompts import highlight_prompt, evidence_based_prompt, sample_questions
4
  from config import STUDY_FILES
5
+ import json
6
 
7
  # Cache for RAG pipelines
8
  rag_cache = {}
 
20
 
21
  def chat_function(message, history, study_name, prompt_type):
22
  rag = get_rag_pipeline(study_name)
23
+ prompt = (
24
+ highlight_prompt
25
+ if prompt_type == "Highlight"
26
+ else evidence_based_prompt if prompt_type == "Evidence-based" else None
27
+ )
 
 
 
28
  response = rag.query(message, prompt_template=prompt)
29
  return response.response
30
 
 
39
  return "Invalid study name"
40
 
41
 
42
+ with gr.Blocks(css="#chatbot {height: 600px; overflow-y: auto;}") as demo:
43
  gr.Markdown("# RAG Pipeline Demo")
44
 
45
  with gr.Row():
46
+ with gr.Column(scale=3):
47
+ chatbot = gr.Chatbot(elem_id="chatbot")
48
+ with gr.Row():
49
+ msg = gr.Textbox(
50
+ show_label=False, placeholder="Enter your message here...", scale=4
51
+ )
52
+ send_btn = gr.Button("Send", scale=1)
53
+
54
+ with gr.Column(scale=1):
55
+ study_dropdown = gr.Dropdown(
56
+ choices=list(STUDY_FILES.keys()),
57
+ label="Select Study",
58
+ value=list(STUDY_FILES.keys())[0],
59
+ )
60
+ study_info = gr.Markdown()
61
+ prompt_type = gr.Radio(
62
+ ["Default", "Highlight", "Evidence-based"],
63
+ label="Prompt Type",
64
+ value="Default",
65
+ )
66
+ clear = gr.Button("Clear Chat")
67
 
68
+ gr.Examples(examples=sample_questions[list(STUDY_FILES.keys())[0]], inputs=msg)
 
 
69
 
70
  def user(user_message, history):
71
  return "", history + [[user_message, None]]
 
79
  msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
80
  bot, [chatbot, study_dropdown, prompt_type], chatbot
81
  )
82
+ send_btn.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
83
+ bot, [chatbot, study_dropdown, prompt_type], chatbot
84
+ )
85
  clear.click(lambda: None, None, chatbot, queue=False)
86
 
87
  study_dropdown.change(
88
  fn=get_study_info,
89
  inputs=study_dropdown,
90
  outputs=study_info,
91
+ )
 
 
92
 
93
  if __name__ == "__main__":
94
  demo.launch(share=True, debug=True)
rag/__pycache__/rag_pipeline.cpython-311.pyc CHANGED
Binary files a/rag/__pycache__/rag_pipeline.cpython-311.pyc and b/rag/__pycache__/rag_pipeline.cpython-311.pyc differ
 
rag/rag_pipeline.py CHANGED
@@ -70,11 +70,12 @@ class RAGPipeline:
70
  "---------------------\n"
71
  "{context_str}\n"
72
  "---------------------\n"
73
- "Given this information, please answer the question provided in the context. "
74
- "Include all relevant information from the provided context. "
75
- "If information comes from multiple sources, please mention all of them. "
76
- "If the information is not available in the context, please state that clearly. "
77
- "When quoting specific information, please use square brackets to indicate the source, e.g. [1], [2], etc."
 
78
  )
79
 
80
  # This is a hack to index all the documents in the store :)
 
70
  "---------------------\n"
71
  "{context_str}\n"
72
  "---------------------\n"
73
+ "Given this information, please answer the question: {query_str}\n"
74
+ "Provide an answer to the question using evidence from the context above. "
75
+ "Cite sources using square brackets for EVERY piece of information, e.g. [1], [2], etc. "
76
+ "Even if there's only one source, still include the citation. "
77
+ "If you're unsure about a source, use [?]. "
78
+ "Ensure that EVERY statement from the context is properly cited."
79
  )
80
 
81
  # This is a hack to index all the documents in the store :)
utils/__pycache__/prompts.cpython-311.pyc CHANGED
Binary files a/utils/__pycache__/prompts.cpython-311.pyc and b/utils/__pycache__/prompts.cpython-311.pyc differ
 
utils/prompts.py CHANGED
@@ -111,5 +111,8 @@ evidence_based_prompt = PromptTemplate(
111
  "---------------------\n"
112
  "Given this information, please answer the question: {query_str}\n"
113
  "Provide an answer to the question using evidence from the context above. "
114
- "Cite sources using square brackets."
 
 
 
115
  )
 
111
  "---------------------\n"
112
  "Given this information, please answer the question: {query_str}\n"
113
  "Provide an answer to the question using evidence from the context above. "
114
+ "Cite sources using square brackets for EVERY piece of information, e.g. [1], [2], etc. "
115
+ "Even if there's only one source, still include the citation. "
116
+ "If you're unsure about a source, use [?]. "
117
+ "Ensure that EVERY statement from the context is properly cited."
118
  )