abdullahT commited on
Commit
d8a4ed2
1 Parent(s): e23e8a5

Fixed pdf file save error

Browse files
Files changed (1) hide show
  1. app.py +14 -15
app.py CHANGED
@@ -5,20 +5,21 @@ from langchain.embeddings import HuggingFaceEmbeddings
5
  from langchain.prompts import PromptTemplate
6
  from langchain.vectorstores import FAISS
7
  from langchain.llms import CTransformers
8
- import os
9
  import torch
10
 
11
 
 
12
  def model_response(query,docs_page_content):
13
 
14
  ### Check if GPU is available run the model on GPU
15
  if torch.cuda.is_available():
16
  config={'max_new_tokens':1024,
17
  'context_length': 4096,
18
- 'temperature':0.5,
19
  'gpu_layers': 10}
20
  else:
21
- config={'max_new_tokens':1028,
22
  'context_length': 4096,
23
  'temperature':0.5}
24
 
@@ -28,7 +29,7 @@ def model_response(query,docs_page_content):
28
  config=config)
29
 
30
  template= """
31
- You are a helpful assistant that that can answer questions about documents using the provided transcripts. Answer the following question: "{query}" by searching the following transcript: "{docs_page_content}"
32
  Only use the factual information from the transcript to answer the question. If you feel like you don't have enough information to answer the question, say "I don't know". Your answers should be verbose and detailed.
33
  """
34
  prompt=PromptTemplate(input_variables=["query","docs_page_content"],
@@ -37,11 +38,6 @@ def model_response(query,docs_page_content):
37
  ## Generate the response from the LLama 2 model
38
  response=llm(prompt.format(query=query,docs_page_content=docs_page_content))
39
 
40
- ## Remove the pdf file after the response is generated
41
- if response:
42
- if doc_type == 'PDF File':
43
- os.remove("files/pdf_file.pdf")
44
-
45
  container = st.container(border=True)
46
  container.write(response)
47
 
@@ -90,16 +86,19 @@ elif doc_type == 'PDF File':
90
  query = st.text_area('What is your question?','', placeholder='Ask question from the uploaded pdf')
91
  with st.sidebar:
92
  pdf_file = st.file_uploader("Choose a PDF file:", accept_multiple_files=False, type=['pdf'])
93
- if pdf_file is not None:
94
- with open('files/pdf_file.pdf', "wb") as f:
95
- f.write(pdf_file.read())
96
  if st.button('Submit'):
97
  if pdf_file is not None:
98
  if query != '':
99
- loader = PyPDFLoader("files/pdf_file.pdf")
100
- data = loader.load()
 
 
 
 
101
  docs_page_content = similarity_finder(data)
102
- print(docs_page_content)
103
  if pdf_file is not None:
104
  if query != '':
105
  model_response(query,docs_page_content)
 
5
  from langchain.prompts import PromptTemplate
6
  from langchain.vectorstores import FAISS
7
  from langchain.llms import CTransformers
8
+ import tempfile
9
  import torch
10
 
11
 
12
+
13
  def model_response(query,docs_page_content):
14
 
15
  ### Check if GPU is available run the model on GPU
16
  if torch.cuda.is_available():
17
  config={'max_new_tokens':1024,
18
  'context_length': 4096,
19
+ 'temperature':0.0,
20
  'gpu_layers': 10}
21
  else:
22
+ config={'max_new_tokens':1024,
23
  'context_length': 4096,
24
  'temperature':0.5}
25
 
 
29
  config=config)
30
 
31
  template= """
32
+ You are a helpful assistant that that can answer questions about documents using the provided transcripts. Answer the following question: "{query}" by searching the following transcript: {docs_page_content}
33
  Only use the factual information from the transcript to answer the question. If you feel like you don't have enough information to answer the question, say "I don't know". Your answers should be verbose and detailed.
34
  """
35
  prompt=PromptTemplate(input_variables=["query","docs_page_content"],
 
38
  ## Generate the response from the LLama 2 model
39
  response=llm(prompt.format(query=query,docs_page_content=docs_page_content))
40
 
 
 
 
 
 
41
  container = st.container(border=True)
42
  container.write(response)
43
 
 
86
  query = st.text_area('What is your question?','', placeholder='Ask question from the uploaded pdf')
87
  with st.sidebar:
88
  pdf_file = st.file_uploader("Choose a PDF file:", accept_multiple_files=False, type=['pdf'])
89
+ # if pdf_file is not None:
90
+ # with open('files/pdf_file.pdf', "wb") as f:
91
+ # f.write(pdf_file.read())
92
  if st.button('Submit'):
93
  if pdf_file is not None:
94
  if query != '':
95
+ temp_file = tempfile.NamedTemporaryFile(delete=False)
96
+ temp_file.write(pdf_file.read())
97
+ temp_file.close()
98
+ pdf_loader = PyPDFLoader(temp_file.name)
99
+ data = pdf_loader.load()
100
+ print(data)
101
  docs_page_content = similarity_finder(data)
 
102
  if pdf_file is not None:
103
  if query != '':
104
  model_response(query,docs_page_content)