ak3ra commited on
Commit
1eb5783
1 Parent(s): 52a64e1

final version of chat interface

Browse files
.gitignore CHANGED
@@ -1,2 +1,162 @@
1
- rag/__pycache__/rag_pipeline.cpython-311.pyc
2
- utils/__pycache__/prompts.cpython-311.pyc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
110
+ .pdm.toml
111
+ .pdm-python
112
+ .pdm-build/
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
+
143
+ # mypy
144
+ .mypy_cache/
145
+ .dmypy.json
146
+ dmypy.json
147
+
148
+ # Pyre type checker
149
+ .pyre/
150
+
151
+ # pytype static type analyzer
152
+ .pytype/
153
+
154
+ # Cython debug symbols
155
+ cython_debug/
156
+
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
__pycache__/app.cpython-311.pyc CHANGED
Binary files a/__pycache__/app.cpython-311.pyc and b/__pycache__/app.cpython-311.pyc differ
 
app.py CHANGED
@@ -1,128 +1,218 @@
 
 
 
1
  import gradio as gr
2
- from rag.rag_pipeline import RAGPipeline
3
- from utils.prompts import highlight_prompt, evidence_based_prompt, sample_questions
4
  from config import STUDY_FILES
5
- import json
 
 
 
 
 
 
6
 
7
- # Cache for RAG pipelines
8
  rag_cache = {}
9
 
10
 
11
- def get_rag_pipeline(study_name):
 
12
  if study_name not in rag_cache:
13
  study_file = STUDY_FILES.get(study_name)
14
- if study_file:
15
- rag_cache[study_name] = RAGPipeline(study_file)
16
- else:
17
  raise ValueError(f"Invalid study name: {study_name}")
 
18
  return rag_cache[study_name]
19
 
20
 
21
- def chat_function(message, history, study_name, prompt_type):
 
 
 
 
22
  if not message.strip():
23
  return "Please enter a valid query."
24
 
25
  rag = get_rag_pipeline(study_name)
26
- prompt = (
27
- highlight_prompt
28
- if prompt_type == "Highlight"
29
- else evidence_based_prompt if prompt_type == "Evidence-based" else None
30
- )
31
  response = rag.query(message, prompt_template=prompt)
32
  return response.response
33
 
34
 
35
- def get_study_info(study_name):
 
 
36
  study_file = STUDY_FILES.get(study_name)
37
- if study_file:
38
- with open(study_file, "r") as f:
39
- data = json.load(f)
40
- return f"Number of documents: {len(data)}\nFirst document title: {data[0]['title']}"
41
- else:
42
  return "Invalid study name"
43
 
 
 
 
 
 
 
 
44
 
45
- def update_interface(study_name):
46
  study_info = get_study_info(study_name)
47
  questions = sample_questions.get(study_name, [])[:3]
48
- return (
49
- study_info,
50
- *[gr.update(visible=True, value=q) for q in questions],
51
- *[gr.update(visible=False) for _ in range(3 - len(questions))],
52
- )
 
 
53
 
54
 
55
- def set_question(question):
56
- return question
 
57
 
 
 
 
 
 
 
58
 
59
- with gr.Blocks() as demo:
60
- gr.Markdown("# ACRES RAG Platform")
 
61
 
62
- with gr.Row():
63
- with gr.Column(scale=2):
64
- chatbot = gr.Chatbot(elem_id="chatbot", show_label=False, height=400)
65
- with gr.Row():
66
- msg = gr.Textbox(
 
 
67
  show_label=False,
68
- placeholder="Type your message here...",
69
- scale=4,
70
- lines=1,
71
- autofocus=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  )
73
- send_btn = gr.Button("Send", scale=1)
74
- with gr.Accordion("Sample Questions", open=False):
75
- sample_btn1 = gr.Button("Sample Question 1", visible=False)
76
- sample_btn2 = gr.Button("Sample Question 2", visible=False)
77
- sample_btn3 = gr.Button("Sample Question 3", visible=False)
78
-
79
- with gr.Column(scale=1):
80
- gr.Markdown("### Study Information")
81
- study_dropdown = gr.Dropdown(
82
- choices=list(STUDY_FILES.keys()),
83
- label="Select Study",
84
- value=list(STUDY_FILES.keys())[0],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  )
86
- study_info = gr.Textbox(label="Study Details", lines=4)
87
- gr.Markdown("### Settings")
88
- prompt_type = gr.Radio(
89
- ["Default", "Highlight", "Evidence-based"],
90
- label="Prompt Type",
91
- value="Default",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  )
93
- clear = gr.Button("Clear Chat")
94
-
95
- def user(user_message, history):
96
- if not user_message.strip():
97
- return "", history # Return unchanged if the message is empty
98
- return "", history + [[user_message, None]]
99
-
100
- def bot(history, study_name, prompt_type):
101
- if not history:
102
- return history
103
- user_message = history[-1][0]
104
- bot_message = chat_function(user_message, history, study_name, prompt_type)
105
- history[-1][1] = bot_message
106
- return history
107
-
108
- msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
109
- bot, [chatbot, study_dropdown, prompt_type], chatbot
110
- )
111
- send_btn.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
112
- bot, [chatbot, study_dropdown, prompt_type], chatbot
113
- )
114
- clear.click(lambda: None, None, chatbot, queue=False)
115
-
116
- study_dropdown.change(
117
- fn=update_interface,
118
- inputs=study_dropdown,
119
- outputs=[study_info, sample_btn1, sample_btn2, sample_btn3],
120
- )
121
-
122
- sample_btn1.click(set_question, inputs=[sample_btn1], outputs=[msg])
123
- sample_btn2.click(set_question, inputs=[sample_btn2], outputs=[msg])
124
- sample_btn3.click(set_question, inputs=[sample_btn3], outputs=[msg])
125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
  if __name__ == "__main__":
 
128
  demo.launch(share=True, debug=True)
 
1
+ import json
2
+ from typing import List, Tuple
3
+
4
  import gradio as gr
5
+
 
6
  from config import STUDY_FILES
7
+ from rag.rag_pipeline import RAGPipeline
8
+ from utils.helpers import generate_follow_up_questions
9
+ from utils.prompts import (
10
+ highlight_prompt,
11
+ evidence_based_prompt,
12
+ sample_questions,
13
+ )
14
 
 
15
  rag_cache = {}
16
 
17
 
18
+ def get_rag_pipeline(study_name: str) -> RAGPipeline:
19
+ """Get or create a RAGPipeline instance for the given study."""
20
  if study_name not in rag_cache:
21
  study_file = STUDY_FILES.get(study_name)
22
+ if not study_file:
 
 
23
  raise ValueError(f"Invalid study name: {study_name}")
24
+ rag_cache[study_name] = RAGPipeline(study_file)
25
  return rag_cache[study_name]
26
 
27
 
28
+ def chat_function(
29
+ message: str, history: List[List[str]], study_name: str, prompt_type: str
30
+ ) -> str:
31
+ """Process a chat message and generate a response using the RAG pipeline."""
32
+
33
  if not message.strip():
34
  return "Please enter a valid query."
35
 
36
  rag = get_rag_pipeline(study_name)
37
+ prompt = {
38
+ "Highlight": highlight_prompt,
39
+ "Evidence-based": evidence_based_prompt,
40
+ }.get(prompt_type)
41
+
42
  response = rag.query(message, prompt_template=prompt)
43
  return response.response
44
 
45
 
46
+ def get_study_info(study_name: str) -> str:
47
+ """Retrieve information about the specified study."""
48
+
49
  study_file = STUDY_FILES.get(study_name)
50
+ if not study_file:
 
 
 
 
51
  return "Invalid study name"
52
 
53
+ with open(study_file, "r") as f:
54
+ data = json.load(f)
55
+ return f"### Number of documents: {len(data)}"
56
+
57
+
58
+ def update_interface(study_name: str) -> Tuple[str, gr.update, gr.update, gr.update]:
59
+ """Update the interface based on the selected study."""
60
 
 
61
  study_info = get_study_info(study_name)
62
  questions = sample_questions.get(study_name, [])[:3]
63
+ visible_questions = [gr.update(visible=True, value=q) for q in questions]
64
+ hidden_questions = [gr.update(visible=False) for _ in range(3 - len(questions))]
65
+ return (study_info, *visible_questions, *hidden_questions)
66
+
67
+
68
+ def set_question(question: str) -> str:
69
+ return question.lstrip("✨ ")
70
 
71
 
72
+ def create_gr_interface() -> gr.Blocks:
73
+ """
74
+ Create and configure the Gradio interface for the RAG platform.
75
 
76
+ This function sets up the entire user interface, including:
77
+ - Chat interface with message input and display
78
+ - Study selection dropdown
79
+ - Sample and follow-up question buttons
80
+ - Prompt type selection
81
+ - Event handlers for user interactions
82
 
83
+ Returns:
84
+ gr.Blocks: The configured Gradio interface ready for launching.
85
+ """
86
 
87
+ with gr.Blocks() as demo:
88
+ gr.Markdown("# ACRES RAG Platform")
89
+
90
+ with gr.Row():
91
+ with gr.Column(scale=2):
92
+ chatbot = gr.Chatbot(
93
+ elem_id="chatbot",
94
  show_label=False,
95
+ height=600,
96
+ container=False,
97
+ show_copy_button=False,
98
+ layout="bubble",
99
+ visible=True,
100
+ )
101
+ with gr.Row():
102
+ msg = gr.Textbox(
103
+ show_label=False,
104
+ placeholder="Type your message here...",
105
+ scale=4,
106
+ lines=1,
107
+ autofocus=True,
108
+ )
109
+ send_btn = gr.Button("Send", scale=1)
110
+
111
+ with gr.Column(scale=1):
112
+ gr.Markdown("### Study Information")
113
+ study_dropdown = gr.Dropdown(
114
+ choices=list(STUDY_FILES.keys()),
115
+ label="Select Study",
116
+ value=list(STUDY_FILES.keys())[0],
117
  )
118
+ study_info = gr.Markdown(label="Study Details")
119
+ with gr.Accordion("Sample Questions", open=False):
120
+ sample_btns = [
121
+ gr.Button(f"Sample Question {i+1}", visible=False)
122
+ for i in range(3)
123
+ ]
124
+
125
+ gr.Markdown("### Generated Questions")
126
+ with gr.Row():
127
+ follow_up_btns = [
128
+ gr.Button(f"Follow-up {i+1}", visible=False) for i in range(3)
129
+ ]
130
+
131
+ gr.Markdown("### Settings")
132
+ prompt_type = gr.Radio(
133
+ ["Default", "Highlight", "Evidence-based"],
134
+ label="Prompt Type",
135
+ value="Default",
136
+ )
137
+ clear = gr.Button("Clear Chat")
138
+
139
+ def user(
140
+ user_message: str, history: List[List[str]]
141
+ ) -> Tuple[str, List[List[str]]]:
142
+ return "", (
143
+ history + [[user_message, None]] if user_message.strip() else history
144
  )
145
+
146
+ def bot(
147
+ history: List[List[str]], study_name: str, prompt_type: str
148
+ ) -> Tuple[List[List[str]], gr.update, gr.update, gr.update]:
149
+ """
150
+ Generate bot response and update the interface.
151
+
152
+ This function:
153
+ 1. Processes the latest user message
154
+ 2. Generates a response using the RAG pipeline
155
+ 3. Updates the chat history
156
+ 4. Generates follow-up questions
157
+ 5. Prepares interface updates for follow-up buttons
158
+
159
+ Args:
160
+ history (List[List[str]]): The current chat history.
161
+ study_name (str): The name of the current study.
162
+ prompt_type (str): The type of prompt being used.
163
+
164
+ Returns:
165
+ Tuple[List[List[str]], gr.update, gr.update, gr.update]:
166
+ Updated chat history and interface components for follow-up questions.
167
+ """
168
+ if not history:
169
+ return history, [], [], []
170
+
171
+ user_message = history[-1][0]
172
+ bot_message = chat_function(user_message, history, study_name, prompt_type)
173
+ history[-1][1] = bot_message
174
+
175
+ rag = get_rag_pipeline(study_name)
176
+ follow_up_questions = generate_follow_up_questions(
177
+ rag, bot_message, user_message, study_name
178
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
+ visible_questions = [
181
+ gr.update(visible=True, value=q) for q in follow_up_questions
182
+ ]
183
+ hidden_questions = [
184
+ gr.update(visible=False) for _ in range(3 - len(follow_up_questions))
185
+ ]
186
+
187
+ return (history, *visible_questions, *hidden_questions)
188
+
189
+ msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
190
+ bot,
191
+ [chatbot, study_dropdown, prompt_type],
192
+ [chatbot, *follow_up_btns],
193
+ )
194
+ send_btn.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
195
+ bot,
196
+ [chatbot, study_dropdown, prompt_type],
197
+ [chatbot, *follow_up_btns],
198
+ )
199
+
200
+ for btn in follow_up_btns + sample_btns:
201
+ btn.click(set_question, inputs=[btn], outputs=[msg])
202
+
203
+ clear.click(lambda: None, None, chatbot, queue=False)
204
+
205
+ study_dropdown.change(
206
+ fn=update_interface,
207
+ inputs=study_dropdown,
208
+ outputs=[study_info, *sample_btns],
209
+ )
210
+
211
+ return demo
212
+
213
+
214
+ demo = create_gr_interface()
215
 
216
  if __name__ == "__main__":
217
+ # demo = create_gr_interface()
218
  demo.launch(share=True, debug=True)
utils/__pycache__/prompts.cpython-311.pyc CHANGED
Binary files a/utils/__pycache__/prompts.cpython-311.pyc and b/utils/__pycache__/prompts.cpython-311.pyc differ
 
utils/helpers.py CHANGED
@@ -1,42 +1,79 @@
1
  from typing import Dict, Any
2
  from llama_index.core import Response
 
 
 
 
 
 
 
3
 
4
 
5
- def process_response(response: Response) -> Dict[str, Any]:
6
- source_nodes = response.source_nodes
7
- sources = {}
8
- for i, node in enumerate(source_nodes, 1):
9
- source = format_source(node.metadata)
10
- if source not in sources.values():
11
- sources[i] = source
 
 
 
 
 
 
 
 
12
 
13
- markdown_text = response.response + "\n\n### Sources\n\n"
14
- raw_text = response.response + "\n\nSources:\n"
15
-
16
- for i, source in sources.items():
17
- markdown_text += f"{i}. {source}\n"
18
- raw_text += f"[{i}] {source}\n"
19
-
20
- return {"markdown": markdown_text, "raw": raw_text, "sources": sources}
21
-
22
-
23
- def format_source(metadata: Dict[str, Any]) -> str:
24
- authors = metadata.get("authors", "Unknown Author")
25
- year = metadata.get("year", "n.d.")
26
- title = metadata.get("title", "Untitled")
27
-
28
- author_list = authors.split(",")
29
- if len(author_list) > 2:
30
- formatted_authors = f"{author_list[0].strip()} et al."
31
- elif len(author_list) == 2:
32
- formatted_authors = f"{author_list[0].strip()} and {author_list[1].strip()}"
 
 
 
 
 
33
  else:
34
- formatted_authors = author_list[0].strip()
 
35
 
36
- year = "n.d." if year is None or year == "None" else str(year)
 
37
 
38
- max_title_length = 250
39
- if len(title) > max_title_length:
40
- title = title[:max_title_length] + "..."
 
 
 
 
 
41
 
42
- return f"{formatted_authors} ({year}). {title}"
 
 
 
 
 
 
 
 
 
 
 
1
  from typing import Dict, Any
2
  from llama_index.core import Response
3
+ from typing import List
4
+ from rag.rag_pipeline import RAGPipeline
5
+ from utils.prompts import (
6
+ structured_follow_up_prompt,
7
+ VaccineCoverageVariables,
8
+ StudyCharacteristics,
9
+ )
10
 
11
 
12
+ def generate_follow_up_questions(
13
+ rag: RAGPipeline, response: str, query: str, study_name: str
14
+ ) -> List[str]:
15
+ """
16
+ Generates follow-up questions based on the given RAGPipeline, response, query, and study_name.
17
+ Args:
18
+ rag (RAGPipeline): The RAGPipeline object used for generating follow-up questions.
19
+ response (str): The response to the initial query.
20
+ query (str): The initial query.
21
+ study_name (str): The name of the study.
22
+ Returns:
23
+ List[str]: A list of generated follow-up questions.
24
+ Raises:
25
+ None
26
+ """
27
 
28
+ # Determine the study type based on the study_name
29
+ if "Vaccine Coverage" in study_name:
30
+ study_type = "Vaccine Coverage"
31
+ key_variables = list(VaccineCoverageVariables.__annotations__.keys())
32
+ elif "Ebola Virus" in study_name:
33
+ study_type = "Ebola Virus"
34
+ key_variables = [
35
+ "SAMPLE_SIZE",
36
+ "PLASMA_TYPE",
37
+ "DOSAGE",
38
+ "FREQUENCY",
39
+ "SIDE_EFFECTS",
40
+ "VIRAL_LOAD_CHANGE",
41
+ "SURVIVAL_RATE",
42
+ ]
43
+ elif "Gene Xpert" in study_name:
44
+ study_type = "Gene Xpert"
45
+ key_variables = [
46
+ "OBJECTIVE",
47
+ "OUTCOME_MEASURES",
48
+ "SENSITIVITY",
49
+ "SPECIFICITY",
50
+ "COST_COMPARISON",
51
+ "TURNAROUND_TIME",
52
+ ]
53
  else:
54
+ study_type = "General"
55
+ key_variables = list(StudyCharacteristics.__annotations__.keys())
56
 
57
+ # Add key variables to the context
58
+ context = f"Study type: {study_type}\nKey variables to consider: {', '.join(key_variables)}\n\n{response}"
59
 
60
+ follow_up_response = rag.query(
61
+ structured_follow_up_prompt.format(
62
+ context_str=context,
63
+ query_str=query,
64
+ response_str=response,
65
+ study_type=study_type,
66
+ )
67
+ )
68
 
69
+ questions = follow_up_response.response.strip().split("\n")
70
+ cleaned_questions = []
71
+ for q in questions:
72
+ # Remove leading numbers and periods, and strip whitespace
73
+ cleaned_q = q.split(". ", 1)[-1].strip()
74
+ # Ensure the question ends with a question mark
75
+ if cleaned_q and not cleaned_q.endswith("?"):
76
+ cleaned_q += "?"
77
+ if cleaned_q:
78
+ cleaned_questions.append(f"✨ {cleaned_q}")
79
+ return cleaned_questions[:3]
utils/prompts.py CHANGED
@@ -116,3 +116,20 @@ evidence_based_prompt = PromptTemplate(
116
  "If you're unsure about a source, use [?]. "
117
  "Ensure that EVERY statement from the context is properly cited."
118
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  "If you're unsure about a source, use [?]. "
117
  "Ensure that EVERY statement from the context is properly cited."
118
  )
119
+
120
+
121
+ structured_follow_up_prompt = PromptTemplate(
122
+ "Context information is below.\n"
123
+ "---------------------\n"
124
+ "{context_str}\n"
125
+ "---------------------\n"
126
+ "Original question: {query_str}\n"
127
+ "Response: {response_str}\n"
128
+ "Study type: {study_type}\n"
129
+ "Based on the above information and the study type, generate 3 follow-up questions that help extract key variables or information from the study. "
130
+ "Focus on the following aspects:\n"
131
+ "1. Any missing key variables that are typically reported in this type of study.\n"
132
+ "2. Clarification on methodology or results that might affect the interpretation of the study.\n"
133
+ "3. Potential implications or applications of the study findings.\n"
134
+ "Ensure each question is specific, relevant to the study type, and ends with a question mark."
135
+ )