Spaces:
Sleeping
Sleeping
# Code adapted from https://docs.llamaindex.ai/en/stable/examples/customization/prompts/chat_prompts/ | |
import gradio as gr | |
from llama_index.core import ChatPromptTemplate | |
def define_custom_prompts(): | |
qa_prompt_str = ( | |
"Context information is below.\n" | |
"---------------------\n" | |
"{context_str}\n" | |
"---------------------\n" | |
"Given the context information and not prior knowledge, " | |
"answer the question: {query_str}\n") | |
refine_prompt_str = ( | |
"We have the opportunity to refine the original answer " | |
"(only if needed) with some more context below.\n" | |
"------------\n" | |
"{context_msg}\n" | |
"------------\n" | |
"Given the new context, refine the original answer to better " | |
"answer the question: {query_str}. " | |
"If the context isn't useful, output the original answer again.\n" | |
"Original Answer: {existing_answer}") | |
# Text QA Prompt | |
chat_text_qa_msgs = [ | |
( | |
"system", | |
"Always answer the question, even if the context isn't helpful.", | |
), | |
("user", qa_prompt_str), | |
] | |
text_qa_template = ChatPromptTemplate.from_messages(chat_text_qa_msgs) | |
# Refine Prompt | |
chat_refine_msgs = [ | |
( | |
"system", | |
"Always answer the question, even if the context isn't helpful.", | |
), | |
("user", refine_prompt_str), | |
] | |
refine_template = ChatPromptTemplate.from_messages(chat_refine_msgs) | |
return text_qa_template, refine_template | |
def answer_questions(user_question): | |
text_qa_template, refine_template = define_custom_prompts() | |
import openai | |
import os | |
os.environ["OPENAI_API_KEY"] | |
openai.api_key = os.environ["OPENAI_API_KEY"] | |
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader | |
from llama_index.llms.openai import OpenAI | |
documents = SimpleDirectoryReader("./data/").load_data() | |
# Create an index using a chat model, so that we can use the chat prompts! | |
llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1) | |
index = VectorStoreIndex.from_documents(documents) | |
response = index.as_query_engine(text_qa_template=text_qa_template, refine_template=refine_template, llm=llm).query(user_question) | |
return str(response) | |
text_qa_template, refine_template = define_custom_prompts() | |
#question = "Which countries were affected?" | |
#question = "What are the number of injuries in Gaziantep?" | |
#answer = answer_questions(question, text_qa_template, refine_template) | |
#answer | |
demo = gr.Interface(fn=answer_questions, inputs="text", outputs="text") | |
demo.launch() |