Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,29 +5,34 @@
|
|
5 |
##################### Imports #####################
|
6 |
import gradio as gr
|
7 |
from utilities.setup import get_files
|
8 |
-
#from server import EmbeddingService, QAService
|
9 |
|
10 |
from connections.pinecone import PineconeConnector
|
11 |
-
from
|
12 |
|
|
|
|
|
13 |
from server import QAService
|
14 |
|
15 |
#import spaces
|
16 |
|
17 |
#################### Functions ####################
|
|
|
18 |
def process_transcripts(files, context):
|
19 |
|
20 |
print(files)
|
21 |
-
with EmbeddingService(conf,
|
|
|
22 |
f = e.run(files)
|
23 |
# some way to wait or a progress bar?
|
24 |
return "Completed Loading Data"
|
25 |
|
26 |
#@spaces.GPU
|
27 |
-
def retrieve_answer(question,
|
28 |
#with QAService(conf) as q:
|
29 |
-
# q.
|
30 |
|
|
|
|
|
31 |
|
32 |
return question + context
|
33 |
|
@@ -66,8 +71,6 @@ def main(conf):
|
|
66 |
inputs=[question, goals],
|
67 |
outputs=model_output)
|
68 |
|
69 |
-
|
70 |
-
|
71 |
demo.launch()
|
72 |
|
73 |
|
@@ -89,6 +92,14 @@ if __name__ == "__main__":
|
|
89 |
pinecones = pc_connector.run()
|
90 |
|
91 |
# initialize model connector
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
|
|
|
93 |
# run main
|
94 |
main(conf)
|
|
|
5 |
##################### Imports #####################
|
6 |
import gradio as gr
|
7 |
from utilities.setup import get_files
|
|
|
8 |
|
9 |
from connections.pinecone import PineconeConnector
|
10 |
+
#from connections.model import InferencePipeline
|
11 |
|
12 |
+
from services.embed_service.embed import EmbeddingService
|
13 |
+
#from services.qa_service.qna import QAService
|
14 |
from server import QAService
|
15 |
|
16 |
#import spaces
|
17 |
|
18 |
#################### Functions ####################
|
19 |
+
#@spaces.GPU
|
20 |
def process_transcripts(files, context):
|
21 |
|
22 |
print(files)
|
23 |
+
with EmbeddingService(conf,
|
24 |
+
pinecone=pinecones) as e:
|
25 |
f = e.run(files)
|
26 |
# some way to wait or a progress bar?
|
27 |
return "Completed Loading Data"
|
28 |
|
29 |
#@spaces.GPU
|
30 |
+
def retrieve_answer(question, goals):
|
31 |
#with QAService(conf) as q:
|
32 |
+
# q.infer(question)
|
33 |
|
34 |
+
#with QAService(conf, pinecone=pinecones, model_pipeline=pipeline, question=question, goals=goals) as q:
|
35 |
+
#q.run()
|
36 |
|
37 |
return question + context
|
38 |
|
|
|
71 |
inputs=[question, goals],
|
72 |
outputs=model_output)
|
73 |
|
|
|
|
|
74 |
demo.launch()
|
75 |
|
76 |
|
|
|
92 |
pinecones = pc_connector.run()
|
93 |
|
94 |
# initialize model connector
|
95 |
+
#pipeline = InferencePipeline()
|
96 |
+
|
97 |
+
#pipeline = InferencePipeline(conf,
|
98 |
+
# api_key=keys["huggingface"]
|
99 |
+
# )
|
100 |
+
#
|
101 |
+
#pipeline.infer(prompt)
|
102 |
|
103 |
+
|
104 |
# run main
|
105 |
main(conf)
|