Spaces:
Runtime error
Runtime error
import gradio as gr | |
import pysbd | |
from transformers import pipeline | |
from sentence_transformers import CrossEncoder | |
from transformers import AutoTokenizer, AutoModelWithLMHead, pipeline | |
model_name = "MaRiOrOsSi/t5-base-finetuned-question-answering" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelWithLMHead.from_pretrained(model_name) | |
#from transformers import pipeline | |
#text2text_generator = pipeline("text2text-generation", model = "gpt2") | |
sentence_segmenter = pysbd.Segmenter(language='en',clean=False) | |
passage_retreival_model = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2') | |
qa_model = pipeline("question-answering",'a-ware/bart-squadv2') | |
def fetch_answers(question, clincal_note ): | |
clincal_note_paragraphs = clincal_note.splitlines() | |
query_paragraph_list = [(question, para) for para in clincal_note_paragraphs if len(para.strip()) > 0 ] | |
scores = passage_retreival_model.predict(query_paragraph_list) | |
top_5_indices = scores.argsort()[-5:] | |
top_5_query_paragraph_list = [query_paragraph_list[i] for i in top_5_indices ] | |
top_5_query_paragraph_list.reverse() | |
top_5_query_paragraph_answer_list = "" | |
count = 1 | |
for query, passage in top_5_query_paragraph_list: | |
passage_sentences = sentence_segmenter.segment(passage) | |
answer = qa_model(question = query, context = passage)['answer'] | |
evidence_sentence = "" | |
for i in range(len(passage_sentences)): | |
if answer.startswith('.') or answer.startswith(':'): | |
answer = answer[1:].strip() | |
if answer in passage_sentences[i]: | |
evidence_sentence = evidence_sentence + " " + passage_sentences[i] | |
model_input = f"question: {query} context: {evidence_sentence}" | |
#output_answer = text2text_generator(model_input)[0]['generated_text'] | |
encoded_input = tokenizer([model_input], | |
return_tensors='pt', | |
max_length=512, | |
truncation=True) | |
output = model.generate(input_ids = encoded_input.input_ids, | |
attention_mask = encoded_input.attention_mask) | |
output_answer = tokenizer.decode(output[0], skip_special_tokens=True) | |
result_str = "# ANSWER "+str(count)+": "+ output_answer +"\n" | |
result_str = result_str + "REFERENCE: "+ evidence_sentence + "\n\n" | |
top_5_query_paragraph_answer_list += result_str | |
count+=1 | |
return top_5_query_paragraph_answer_list | |
demo = gr.Interface( | |
fn=fetch_answers, | |
#take input as real time audio and use OPENAPI whisper for S2T | |
#clinical note upload as file (.This is an example of simple text. or doc/docx file) | |
inputs=[gr.Textbox(lines=2, label='Question', show_label=True), | |
gr.Textbox(lines=10, label='Document Text', show_label=True)], | |
outputs="markdown", | |
examples='.', | |
title='Document Question Answering System with Evidence from document' | |
) | |
demo.launch() | |