Spaces:
Runtime error
Runtime error
File size: 2,769 Bytes
d24f707 4c700e7 d24f707 b36e6d7 00a0dbb b36e6d7 4c700e7 b36e6d7 4c700e7 b36e6d7 00a0dbb b36e6d7 4c700e7 b36e6d7 00a0dbb 4c700e7 00a0dbb d24f707 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
import streamlit as st
import pysbd
from transformers import pipeline
from sentence_transformers import CrossEncoder
from transformers import AutoTokenizer, AutoModelWithLMHead, pipeline
def fetch_answers(question, document ):
document_paragraphs = document.splitlines()
query_paragraph_list = [(question, para) for para in document_paragraphs if len(para.strip()) > 0 ]
scores = passage_retreival_model.predict(query_paragraph_list)
top_5_indices = scores.argsort()[-5:]
top_5_query_paragraph_list = [query_paragraph_list[i] for i in top_5_indices ]
top_5_query_paragraph_list.reverse()
top_5_query_paragraph_answer_list = ""
count = 1
for query, passage in top_5_query_paragraph_list:
passage_sentences = sentence_segmenter.segment(passage)
answer = qa_model(question = query, context = passage)['answer']
evidence_sentence = ""
for i in range(len(passage_sentences)):
if answer.startswith('.') or answer.startswith(':'):
answer = answer[1:].strip()
if answer in passage_sentences[i]:
evidence_sentence = evidence_sentence + " " + passage_sentences[i]
model_input = f"question: {query} context: {evidence_sentence}"
encoded_input = tokenizer([model_input],
return_tensors='pt',
max_length=512,
truncation=True)
output = model.generate(input_ids = encoded_input.input_ids,
attention_mask = encoded_input.attention_mask)
output_answer = tokenizer.decode(output[0], skip_special_tokens=True)
result_str = "# ANSWER "+str(count)+": "+ output_answer +"\n"
result_str = result_str + "REFERENCE: "+ evidence_sentence + "\n\n"
top_5_query_paragraph_answer_list += result_str
count+=1
return top_5_query_paragraph_answer_list
st.title('Document Question Answering System')
query = st.text_area("Query", "", height=25)
document = st.text_area("Document Text", "", height=100)
st.text("Progress Bar")
my_bar = st.progress(0)
my_bar.progress(10)
model_name = "MaRiOrOsSi/t5-base-finetuned-question-answering"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelWithLMHead.from_pretrained(model_name)
my_bar.progress(25)
sentence_segmenter = pysbd.Segmenter(language='en',clean=False)
passage_retreival_model = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
my_bar.progress(50)
qa_model = pipeline("question-answering",'a-ware/bart-squadv2')
my_bar.progress(75)
if st.button("Get Answers From Document"):
st.markdown(fetch_answers(query, document))
my_bar.progress(100)
|