Spaces:
Sleeping
Sleeping
########################### | |
# UI for Meeting RAG Q&A. # | |
########################### | |
##################### Imports ##################### | |
import uuid | |
import threading | |
import gradio as gr | |
import spaces | |
from utilities.setup import get_files | |
from connections.pinecone import PineconeConnector | |
from connections.model import InferencePipeline | |
from services.embed_service.embed import EmbeddingService | |
from services.qa_service.qna import QAService | |
#################### Functions #################### | |
def process_transcripts(files, context, session_key): | |
print(session_key) | |
with EmbeddingService(conf, | |
pinecone=pc_connector, | |
session_key=session_key) as e: | |
f = e.run(files) | |
return "Completed Loading Data" | |
def retrieve_answer(question, goals, session_key): | |
print(session_key) | |
with QAService(conf, | |
pinecone=pc_connector, | |
model_pipeline=pipelines, | |
question=question, | |
goals=goals, | |
session_key=session_key) as q: | |
f, c = q.run() | |
return f, c | |
def drop_namespace(arg): | |
print("Deleted namespace: " + arg) | |
def generate_key(): | |
unique_key = str(uuid.uuid1()) | |
unique_key = 'User_' + unique_key | |
timer = threading.Timer(10, drop_namespace, [unique_key]) | |
timer.start() | |
# generate new namespace in pinecone | |
return unique_key | |
def b_clicked(o): | |
return gr.Button(interactive=True) | |
##################### Process ##################### | |
def main(conf): | |
with gr.Blocks() as demo: | |
# Main page | |
with gr.TabItem(conf["layout"]["page_names"][0]): | |
gr.Markdown(get_files.load_markdown_file(conf["layout"]["about"])) | |
# User config page | |
with gr.TabItem(conf["layout"]["page_names"][1]): | |
gr.Markdown("# Your User Configurations") | |
gr.Markdown("**2 Options:**") | |
gr.Markdown("""1. Generate a unique key to upload your personal transcripts. | |
Copy this key to use in the next page. | |
Your documents will be queryable for 1 hour after generation.""") | |
gr.Markdown("""2. Or, go straight to the next tab to just ask your question to the | |
meetings that are already included!""") | |
create_unique_key = gr.Button("Generate unique key") | |
output_unique_key = gr.Textbox(label="Your session key", | |
interactive=True , | |
show_copy_button=True, | |
show_label=True) | |
create_unique_key.click(fn=generate_key, | |
outputs=output_unique_key) | |
### This should not be visible until key is generated. | |
load_file = gr.UploadButton(label="Upload Transcript (.vtt)", | |
file_types=[".vtt"], | |
file_count='multiple', interactive=False) | |
repository = gr.Textbox(label="Progress", value="Waiting for load...", visible=True) | |
gr.Markdown("## Additional context you want to provide?") | |
gr.Markdown("Try to keep this portion as concise as possible.") | |
goals = gr.Textbox(label="Analysis Goals", | |
value=conf["defaults"]["goals"]) # not incorporated yet. Will be with Q&A. | |
load_file.upload(process_transcripts, [load_file, goals, output_unique_key], repository) | |
create_unique_key.click(fn=b_clicked, | |
inputs=create_unique_key, | |
outputs=load_file) | |
# Meeting Question & Answer Page | |
with gr.TabItem(conf["layout"]["page_names"][2]): | |
session_key = gr.Textbox(label="Paste Session key here.", | |
value="") | |
question = gr.Textbox(label="Ask a Question", | |
value=conf["defaults"]["question"]) | |
ask_button = gr.Button("Ask!") | |
model_output = gr.Markdown("### Answer") | |
context_output = gr.components.Textbox(label="Retrieved Context") | |
ask_button.click(fn=retrieve_answer, | |
inputs=[question, goals, session_key], | |
outputs=[model_output,context_output]) | |
demo.launch() | |
##################### Execute ##################### | |
if __name__ == "__main__": | |
# Get config | |
conf = get_files.json_cfg() | |
# Get keys | |
keys = get_files.get_keys() | |
# initialize pinecone connector | |
pc_connector = PineconeConnector( | |
api_key=keys["pinecone"], | |
index_name=conf["embeddings"]["index_name"], | |
embedding=conf["embeddings"]["embedding"], | |
) | |
# initialize model connector | |
pipelines = InferencePipeline(conf, | |
api_key=keys["huggingface"] | |
) | |
# run main | |
main(conf) | |