tykiww commited on
Commit
a65c301
1 Parent(s): 0cd2aee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -16
app.py CHANGED
@@ -7,16 +7,16 @@ import gradio as gr
7
  from utilities.setup import get_files
8
 
9
  from connections.pinecone import PineconeConnector
10
- #from connections.model import InferencePipeline
11
 
12
  from services.embed_service.embed import EmbeddingService
13
- #from services.qa_service.qna import QAService
14
- from server import QAService
15
 
16
- #import spaces
17
 
18
  #################### Functions ####################
19
- #@spaces.GPU
20
  def process_transcripts(files, context):
21
 
22
  print(files)
@@ -26,13 +26,17 @@ def process_transcripts(files, context):
26
  # some way to wait or a progress bar?
27
  return "Completed Loading Data"
28
 
29
- #@spaces.GPU
30
  def retrieve_answer(question, goals):
31
  #with QAService(conf) as q:
32
  # q.infer(question)
33
 
34
- #with QAService(conf, pinecone=pinecones, model_pipeline=pipeline, question=question, goals=goals) as q:
35
- #q.run()
 
 
 
 
36
 
37
  return question + goals
38
 
@@ -92,14 +96,10 @@ if __name__ == "__main__":
92
  pinecones = pc_connector.run()
93
 
94
  # initialize model connector
95
- #pipeline = InferencePipeline()
96
 
97
- #pipeline = InferencePipeline(conf,
98
- # api_key=keys["huggingface"]
99
- # )
100
- #
101
- #pipeline.infer(prompt)
102
-
103
-
104
  # run main
105
  main(conf)
 
7
  from utilities.setup import get_files
8
 
9
  from connections.pinecone import PineconeConnector
10
+ from connections.model import InferencePipeline
11
 
12
  from services.embed_service.embed import EmbeddingService
13
+ from services.qa_service.qna import QAService
14
+ #from server import QAService
15
 
16
+ import spaces
17
 
18
  #################### Functions ####################
19
+ @spaces.GPU
20
  def process_transcripts(files, context):
21
 
22
  print(files)
 
26
  # some way to wait or a progress bar?
27
  return "Completed Loading Data"
28
 
29
+ @spaces.GPU
30
  def retrieve_answer(question, goals):
31
  #with QAService(conf) as q:
32
  # q.infer(question)
33
 
34
+ with QAService(conf,
35
+ pinecone=pinecones,
36
+ model_pipeline=pipeline,
37
+ question=question,
38
+ goals=goals) as q:
39
+ q.run()
40
 
41
  return question + goals
42
 
 
96
  pinecones = pc_connector.run()
97
 
98
  # initialize model connector
99
+ pipeline = InferencePipeline()
100
 
101
+ pipeline = InferencePipeline(conf,
102
+ api_key=keys["huggingface"]
103
+ )
 
 
 
 
104
  # run main
105
  main(conf)