tykiww's picture
Update app.py
a65c301 verified
raw
history blame
3.39 kB
###########################
# UI for Meeting RAG Q&A. #
###########################
##################### Imports #####################
import gradio as gr
from utilities.setup import get_files
from connections.pinecone import PineconeConnector
from connections.model import InferencePipeline
from services.embed_service.embed import EmbeddingService
from services.qa_service.qna import QAService
#from server import QAService
import spaces
#################### Functions ####################
@spaces.GPU
def process_transcripts(files, context):
print(files)
with EmbeddingService(conf,
pinecone=pinecones) as e:
f = e.run(files)
# some way to wait or a progress bar?
return "Completed Loading Data"
@spaces.GPU
def retrieve_answer(question, goals):
#with QAService(conf) as q:
# q.infer(question)
with QAService(conf,
pinecone=pinecones,
model_pipeline=pipeline,
question=question,
goals=goals) as q:
q.run()
return question + goals
##################### Process #####################
def main(conf):
with gr.Blocks() as demo:
# Main page
with gr.TabItem(conf["layout"]["page_names"][0]):
gr.Markdown(get_files.load_markdown_file(conf["layout"]["about"]))
# User config page
with gr.TabItem(conf["layout"]["page_names"][1]):
gr.Markdown("# Upload Transcript and Necessary Context")
gr.Markdown("Please wait as the transcript is being processed.")
load_file = gr.UploadButton(label="Upload Transcript (.vtt)",
file_types=[".vtt"],
file_count='multiple')
goals = gr.Textbox(label="Project Goals",
value=conf["defaults"]["goals"]) # not incorporated yet. Will be with Q&A.
repository = gr.Textbox(label="Progress", value="Waiting for load...", visible=True)
load_file.upload(process_transcripts, [load_file, goals], repository)
# Meeting Question & Answer Page
with gr.TabItem(conf["layout"]["page_names"][2]):
question = gr.Textbox(label="Ask a Question",
value=conf["defaults"]["question"])
ask_button = gr.Button("Ask!")
model_output = gr.components.Textbox(label="Answer")
ask_button.click(fn=retrieve_answer,
inputs=[question, goals],
outputs=model_output)
demo.launch()
##################### Execute #####################
if __name__ == "__main__":
# Get config
conf = get_files.json_cfg()
# Get keys
keys = get_files.get_keys()
# initialize pinecone connector
pc_connector = PineconeConnector(
api_key=keys["pinecone"],
index_name=conf["embeddings"]["index_name"],
embedding=conf["embeddings"]["embedding"],
)
pinecones = pc_connector.run()
# initialize model connector
pipeline = InferencePipeline()
pipeline = InferencePipeline(conf,
api_key=keys["huggingface"]
)
# run main
main(conf)