|
import gradio as gr |
|
|
|
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline |
|
|
|
model_name = "deepset/roberta-base-squad2" |
|
|
|
nlp = pipeline('question-answering', model=model_name, tokenizer=model_name) |
|
|
|
def chat(context, question): |
|
QA_input = { |
|
"question" : question, |
|
"context" : context |
|
} |
|
res = nlp(QA_input) |
|
|
|
return res['answer'] |
|
|
|
screen = gr.Interface( |
|
fn = chat, |
|
inputs = [gr.Textbox(lines = 8, placeholder = "Enter your context here π"), gr.Textbox(lines = 2, placeholder = "Enter your question here π")], |
|
outputs = gr.Textbox(lines = 10, placeholder = "Your answer will be here soon π"), |
|
title="Facilitating the QnA with roberta-base-squad2 π©π»βπ»πβπ»π‘", |
|
description="This app aims to facilitate the simple QnA with the provided contextπ‘", |
|
theme="soft", |
|
article = """ |
|
### Disclaimer : This model is purely used for QnA. User is expected to paste the text from which they want the answer in **context** section. |
|
Then paste the question in the **question** section. |
|
User will get the answer in the answer section.""" |
|
|
|
) |
|
|
|
screen.launch() |