|
import gradio as gr |
|
import os |
|
import textwrap |
|
from langchain.embeddings import HuggingFaceEmbeddings |
|
from langchain.llms import OpenAI |
|
from langchain.prompts import PromptTemplate |
|
from langchain.chains import RetrievalQA |
|
from langchain.llms import OpenAI |
|
from langchain.docstore.document import Document |
|
from langchain.vectorstores import FAISS |
|
|
|
|
|
def setup_generator(api_key): |
|
os.environ["OPENAI_API_KEY"] = api_key |
|
embeddings = HuggingFaceEmbeddings(model_name="dangvantuan/sentence-camembert-base") |
|
db = FAISS.load_local("faiss_index", embeddings) |
|
|
|
prompt_template = """Use the following pieces of context to answer the question at the end. |
|
Give many informations as possible. |
|
If you don't know the answer, just say that you don't know, don't try to make up an answer. |
|
|
|
{context} |
|
|
|
Question: {question} |
|
Answer in French:""" |
|
PROMPT = PromptTemplate( |
|
template=prompt_template, input_variables=["context", "question"] |
|
) |
|
chain_type_kwargs = {"prompt": PROMPT} |
|
qa = RetrievalQA.from_chain_type(llm=OpenAI(), chain_type="stuff", |
|
retriever=db.as_retriever(), |
|
chain_type_kwargs=chain_type_kwargs, |
|
return_source_documents=True) |
|
|
|
return qa |
|
|
|
def generate_text(api_key, query): |
|
qa = setup_generator(api_key) |
|
result = qa({"query": query}) |
|
res = result["result"] |
|
source = '\n'.join([meta.metadata["title"] for meta in result["source_documents"]]) |
|
formatted_text = '\n'.join(textwrap.wrap(res, width=90)) |
|
return formatted_text, source |
|
|
|
iface = gr.Interface( |
|
fn=generate_text, |
|
inputs=[gr.inputs.Textbox(lines=1, placeholder="Input API Key..."), |
|
gr.inputs.Textbox(lines=3, placeholder="Input Text...", |
|
default="comment vois tu le future de l'intelligence artificielle?")], |
|
outputs=[gr.outputs.Textbox(label="Generated Text"), |
|
gr.outputs.Textbox(label="Source")] |
|
) |
|
iface.launch() |
|
|
|
|