Spaces:
Runtime error
Runtime error
from pymilvus import utility, DataType, FieldSchema, CollectionSchema, Collection | |
from utils import connect_vector_db, load_collection, convert_query, clean_query | |
from routing import routing_query | |
from search import search_chunks | |
from sentence_transformers import SentenceTransformer | |
import re | |
import gradio as gr | |
import requests | |
import json | |
def search(query, path_stop_words, path_define, model_name, topk): | |
collection_name = routing_query(query, path_stop_words, path_define) | |
results = search_chunks(query, model_name, collection_name, topk) | |
refs, relevant_chunks = [], [] | |
pattern = r"'chunk_ref': '([^']*)', 'chunk_text': '([^']*)'" | |
for sample in results: | |
matches = re.findall(pattern, str(sample)) | |
for match in matches: | |
refs.append(match[0]) | |
relevant_chunks.append(match[1]) | |
return refs, relevant_chunks | |
def response_saola2m(PROMPT, temperature=0.7): | |
url = "https://api.fpt.ai/nlp/llm/api/v1/completions" | |
headers = { | |
'Authorization': 'Bearer sk-8oIY6XLrokZEJMl6aopCuQ', | |
'Content-Type': 'application/json', | |
} | |
data = { | |
"model": "SaoLa2M-instruct", | |
"prompt": PROMPT, | |
"temperature": temperature, | |
"max_tokens": 512 | |
} | |
response = requests.post(url, headers=headers, json=data) | |
response_text = response.text | |
response_json = json.loads(response_text) | |
result = response_json['choices'][0]['text'] | |
return result | |
PATH_STOP_WORDS = 'vi_stopword.txt' | |
PATH_DEFINE = 'define.json' | |
MODEL_NAME = 'qminh369/datn-dense_embedding' | |
TOPK = 5 | |
connect_vector_db() | |
def answer(question): | |
refs, relevant_chunks = search(question, PATH_STOP_WORDS, PATH_DEFINE, MODEL_NAME, TOPK) | |
INSTRUCTION = "Hãy trả lời câu hỏi sau dựa trên thông tin được cung cấp. Nếu thông tin được cung cấp không liên quan dến câu hỏi thì trả về câu trả lời 'Không có thông tin'" | |
INPUT_TEXT = "\n".join(relevant_chunks) | |
PROMPT = f"<s>[INST] Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{INSTRUCTION}\n\n{question}\n\n### Input:\n{INPUT_TEXT}\n\n[/INST]### Response:\n" | |
print(PROMPT) | |
response = response_saola2m(PROMPT, temperature=0) | |
ref = "\n" + "Trích dẫn từ: " + refs[0] | |
response = response + ref | |
return response.strip() | |
def chatbot(question, history=[]): | |
output = answer(question) | |
history.append((question, output)) | |
return history, history | |
demo = gr.Interface( | |
fn=chatbot, | |
inputs=["text", "state"], | |
outputs=["chatbot", "state"]) | |
demo.queue().launch(share=True) | |