File size: 4,429 Bytes
c7759ea
 
 
bfbbbea
c7759ea
 
 
dfb781f
0b3012a
a65c301
8e1c60e
bcf5794
a65c301
 
dfb781f
a65c301
bdf3e70
c7759ea
a65c301
e24890b
e9f9106
df7a162
bcf5794
 
d959e7d
c7759ea
6b0a5a8
b1cf10f
a65c301
bcf5794
e991c57
a65c301
 
b817f2f
a65c301
 
b87318d
e991c57
b87318d
a40632d
57298a6
 
 
 
 
 
 
32beee3
57298a6
 
 
 
 
 
a52308c
c7759ea
 
 
aeb447f
c7759ea
 
f0bdafb
aeb447f
2f1a468
 
c7759ea
 
bbca73f
57298a6
 
 
 
bbca73f
c01defd
bbca73f
57298a6
 
 
 
bbca73f
c7759ea
1a8278b
 
bbca73f
c7759ea
9675192
bb2fd4a
c7759ea
 
 
 
 
 
bc1c38e
c7759ea
b87318d
 
 
4df0f2d
e991c57
b87318d
c7759ea
17dfda2
52589e7
 
c7759ea
52589e7
c7759ea
 
8e1c60e
 
 
 
 
 
 
 
 
 
 
 
 
 
b817f2f
 
 
8e1c60e
c7759ea
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
###########################
# UI for Meeting RAG Q&A. #
###########################

##################### Imports #####################
import gradio as gr
from utilities.setup import get_files

from connections.pinecone import PineconeConnector
from connections.model import InferencePipeline

from services.embed_service.embed import EmbeddingService
from services.qa_service.qna import QAService
#from server import QAService

import spaces

#################### Functions ####################
@spaces.GPU
def process_transcripts(files, context):
    
    print(files)
    with EmbeddingService(conf, 
                          pinecone=pinecones) as e:
        f = e.run(files)
    # some way to wait or a progress bar?
    return "Completed Loading Data"

@spaces.GPU
def retrieve_answer(question, goals):

    with QAService(conf, 
                   pinecone=pinecones, 
                   model_pipeline=pipelines, 
                   question=question, 
                   goals=goals) as q:
        f, c = q.run()

    return f, c

import uuid
import threading

def drop_namespace():
  print("Function executed after delay!")

def generate_key():
    unique_key = str(uuid.uuid1())
    unique_key = 'User_' + unique_key
    timer = threading.Timer(10, drop_namespace) 
    timer.start()
    # generate new namespace in pinecone
    
    return unique_key+': Started 1 hour session.'

##################### Process #####################
def main(conf):
    with gr.Blocks() as demo:
    
        # Main page
        with gr.TabItem(conf["layout"]["page_names"][0]):
            gr.Markdown(get_files.load_markdown_file(conf["layout"]["about"]))
    
    
        
        # User config page
        with gr.TabItem(conf["layout"]["page_names"][1]):
            gr.Markdown("# Your user Configurations")
            gr.Markdown("**2 Options:**")
            gr.Markdown("""1. Generate a unique key to upload your personal transcripts.
                           Your documents will be queryable for 1 hour after generation.""")
            gr.Markdown("""2. Or, go straight to the next tab to just ask your question to the 
                           meetings that are already included!""")
            create_unique_key = gr.Button("Generate unique key")
            output_unique_key = gr.Textbox(label="Your session key & time.")
            create_unique_key.click(fn=generate_key,
                                    #inputs=create_unique_key,
                                    outputs=output_unique_key)

            gr.Markdown("### Upload Transcript and Necessary Context")
            load_file = gr.UploadButton(label="Upload Transcript (.vtt)", 
                                                     file_types=[".vtt"],
                                        file_count='multiple')
            goals = gr.Textbox(label="Analysis Goals",
                                            value=conf["defaults"]["goals"]) # not incorporated yet. Will be with Q&A.
            repository = gr.Textbox(label="Progress", value="Waiting for load...", visible=True)
            load_file.upload(process_transcripts, [load_file, goals], repository)
    
    
    
        # Meeting Question & Answer Page
        with gr.TabItem(conf["layout"]["page_names"][2]):
            question = gr.Textbox(label="Ask a Question",
                                  value=conf["defaults"]["question"])
            ask_button = gr.Button("Ask!")
            model_output = gr.Markdown("### Answer")
            context_output = gr.components.Textbox(label="Retrieved Context")
            
            ask_button.click(fn=retrieve_answer,
                             inputs=[question, goals],
                             outputs=[model_output,context_output])
    
        demo.launch()


##################### Execute #####################
if __name__ == "__main__":
    # Get config
    conf = get_files.json_cfg()
    
    # Get keys
    keys = get_files.get_keys()

    # initialize pinecone connector
    pc_connector = PineconeConnector(
        api_key=keys["pinecone"],
        index_name=conf["embeddings"]["index_name"],
        embedding=conf["embeddings"]["embedding"],
    )
    
    pinecones = pc_connector.run()

    # initialize model connector
    pipelines = InferencePipeline(conf,
                                  api_key=keys["huggingface"]
                                  )
    # run main
    main(conf)