Spaces:
Sleeping
Sleeping
File size: 3,389 Bytes
c7759ea bfbbbea c7759ea dfb781f 0b3012a a65c301 8e1c60e bcf5794 a65c301 dfb781f a65c301 bdf3e70 c7759ea a65c301 e24890b e9f9106 df7a162 bcf5794 d959e7d c7759ea 6b0a5a8 b1cf10f a65c301 bcf5794 e991c57 bcf5794 e991c57 a65c301 e991c57 52eac76 a40632d a52308c c7759ea aeb447f c7759ea f0bdafb aeb447f 2f1a468 c7759ea 1a8278b ff6bbfc c7759ea 9675192 bb2fd4a c7759ea bc1c38e c7759ea 4df0f2d e991c57 4df0f2d c7759ea 17dfda2 52589e7 c7759ea 52589e7 c7759ea 8e1c60e a65c301 bcf5794 a65c301 8e1c60e c7759ea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
###########################
# UI for Meeting RAG Q&A. #
###########################
##################### Imports #####################
import gradio as gr
from utilities.setup import get_files
from connections.pinecone import PineconeConnector
from connections.model import InferencePipeline
from services.embed_service.embed import EmbeddingService
from services.qa_service.qna import QAService
#from server import QAService
import spaces
#################### Functions ####################
@spaces.GPU
def process_transcripts(files, context):
print(files)
with EmbeddingService(conf,
pinecone=pinecones) as e:
f = e.run(files)
# some way to wait or a progress bar?
return "Completed Loading Data"
@spaces.GPU
def retrieve_answer(question, goals):
#with QAService(conf) as q:
# q.infer(question)
with QAService(conf,
pinecone=pinecones,
model_pipeline=pipeline,
question=question,
goals=goals) as q:
q.run()
return question + goals
##################### Process #####################
def main(conf):
with gr.Blocks() as demo:
# Main page
with gr.TabItem(conf["layout"]["page_names"][0]):
gr.Markdown(get_files.load_markdown_file(conf["layout"]["about"]))
# User config page
with gr.TabItem(conf["layout"]["page_names"][1]):
gr.Markdown("# Upload Transcript and Necessary Context")
gr.Markdown("Please wait as the transcript is being processed.")
load_file = gr.UploadButton(label="Upload Transcript (.vtt)",
file_types=[".vtt"],
file_count='multiple')
goals = gr.Textbox(label="Project Goals",
value=conf["defaults"]["goals"]) # not incorporated yet. Will be with Q&A.
repository = gr.Textbox(label="Progress", value="Waiting for load...", visible=True)
load_file.upload(process_transcripts, [load_file, goals], repository)
# Meeting Question & Answer Page
with gr.TabItem(conf["layout"]["page_names"][2]):
question = gr.Textbox(label="Ask a Question",
value=conf["defaults"]["question"])
ask_button = gr.Button("Ask!")
model_output = gr.components.Textbox(label="Answer")
ask_button.click(fn=retrieve_answer,
inputs=[question, goals],
outputs=model_output)
demo.launch()
##################### Execute #####################
if __name__ == "__main__":
# Get config
conf = get_files.json_cfg()
# Get keys
keys = get_files.get_keys()
# initialize pinecone connector
pc_connector = PineconeConnector(
api_key=keys["pinecone"],
index_name=conf["embeddings"]["index_name"],
embedding=conf["embeddings"]["embedding"],
)
pinecones = pc_connector.run()
# initialize model connector
pipeline = InferencePipeline()
pipeline = InferencePipeline(conf,
api_key=keys["huggingface"]
)
# run main
main(conf)
|