Spaces:
Running
Running
from model.propmt.prompt_handler import * | |
from model.llm.llm import * | |
from model.rag.rag_handler import * | |
from config import * | |
class Chat: | |
def __init__(self, chat_id, rag_handler) -> None: | |
self.chat_id = chat_id | |
self.message_history = [] | |
self.response_history = [] | |
self.prompt_handler = Prompt() | |
self.llm = LLM_API_Call("gilas") | |
self.rag_handler = rag_handler | |
def response(self, message: str) -> str: | |
self.message_history.append(message) | |
info_list = self.rag_handler.get_information(message) | |
prompt = self.prompt_handler.get_prompt(message, info_list) | |
llm_response = self.llm.get_LLM_response(prompt=prompt) | |
final_response = f"**Response**:\n{llm_response}\n\n" | |
if info_list: | |
final_response += "The following legal cases and information were retrieved and considered:\n" | |
for i, info in enumerate(info_list): | |
case_text = info['text'].replace("[end]", "") | |
final_response += f"\n**Case {i+1}:** {info['title']}\n{case_text}\n" | |
self.response_history.append(final_response) | |
return final_response |