Spaces:
Runtime error
Runtime error
File size: 5,441 Bytes
234eac0 8c0f8b8 234eac0 8c0f8b8 234eac0 8c0f8b8 234eac0 5353dbf 234eac0 8c0f8b8 5353dbf 8c0f8b8 5353dbf 8c0f8b8 234eac0 8c0f8b8 234eac0 8c0f8b8 234eac0 5353dbf 8c0f8b8 234eac0 5353dbf 8c0f8b8 234eac0 8c0f8b8 234eac0 8c0f8b8 234eac0 8c0f8b8 234eac0 8c0f8b8 234eac0 8c0f8b8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 |
import os
from typing import List
from chainlit.types import AskFileResponse
from aimakerspace.text_utils import CharacterTextSplitter, TextFileLoader
from aimakerspace.openai_utils.prompts import (
UserRolePrompt,
SystemRolePrompt,
AssistantRolePrompt,
)
from aimakerspace.openai_utils.embedding import EmbeddingModel
from aimakerspace.vectordatabase import VectorDatabase
from aimakerspace.openai_utils.chatmodel import ChatOpenAI
import chainlit as cl
import tempfile
import pandas as pd
import pdfplumber
system_template = """\
Use the following context to answer the user's question. If you cannot find the answer in the context,
say you don't know the answer. Additionally, if the user requests a summary or context overview,
generate an engaging and concise summary that captures the main ideas with an interesting and appealing tone.
"""
system_role_prompt = SystemRolePrompt(system_template)
user_prompt_template = """\
Context:
{context}
Question:
{question}
"""
user_role_prompt = UserRolePrompt(user_prompt_template)
class RetrievalAugmentedQAPipeline:
def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase) -> None:
self.llm = llm
self.vector_db_retriever = vector_db_retriever
async def arun_pipeline(self, user_query: str):
context_list = self.vector_db_retriever.search_by_text(user_query, k=4)
context_prompt = ""
for context in context_list:
context_prompt += context[0] + "\n"
formatted_system_prompt = system_role_prompt.create_message()
formatted_user_prompt = user_role_prompt.create_message(question=user_query, context=context_prompt)
async def generate_response():
async for chunk in self.llm.astream([formatted_system_prompt, formatted_user_prompt]):
yield chunk
return {"response": generate_response(), "context": context_list}
text_splitter = CharacterTextSplitter()
def process_text_file(file: AskFileResponse):
with tempfile.NamedTemporaryFile(mode="wb", delete=False, suffix=".txt") as temp_file:
temp_file_path = temp_file.name
temp_file.write(file.read)
text_loader = TextFileLoader(temp_file_path)
documents = text_loader.load_documents()
texts = text_splitter.split_texts(documents)
return texts
def process_pdf_file(file: AskFileResponse):
# Use the path attribute to read the file directly
temp_file_path = file.path # Get the path of the uploaded file
extracted_text = ""
with pdfplumber.open(temp_file_path) as pdf:
for page in pdf.pages:
extracted_text += page.extract_text()
texts = text_splitter.split_texts([extracted_text])
return texts
def process_csv_file(file: AskFileResponse):
with tempfile.NamedTemporaryFile(mode="wb", delete=False, suffix=".csv") as temp_file:
temp_file_path = temp_file.name
temp_file.write(file.content)
df = pd.read_csv(temp_file_path)
texts = df.apply(lambda row: ' '.join(row.astype(str)), axis=1).tolist()
return text_splitter.split_texts(texts)
@cl.on_chat_start
async def on_chat_start():
cl.user_session.set("all_texts", [])
files = await cl.AskFileMessage(
content="Please upload one or more Text, PDF, or CSV files to begin!",
accept=["text/plain", "application/pdf", "text/csv"],
max_size_mb=20,
timeout=180,
).send()
if not files:
await cl.Message(content="No files were uploaded. Please upload at least one file to proceed.").send()
return
all_texts = cl.user_session.get("all_texts", [])
for file in files:
file_type = file.name.split(".")[-1].lower()
msg = cl.Message(content=f"Processing `{file.name}`...")
await msg.send()
# Process each file based on its type
if file_type == "txt":
texts = process_text_file(file)
elif file_type == "pdf":
texts = process_pdf_file(file)
elif file_type == "csv":
texts = process_csv_file(file)
else:
await cl.Message(content=f"Unsupported file type: `{file.name}`. Please upload text, PDF, or CSV files.").send()
continue
all_texts.extend(texts) # Combine texts from all uploaded files
cl.user_session.set("all_texts", all_texts)
await cl.Message(content="Files processed! You can now start asking questions.").send()
@cl.on_message
async def main(message):
chain = cl.user_session.get("chain")
if not chain:
all_texts = cl.user_session.get("all_texts")
if not all_texts:
await cl.Message(content="Please upload at least one file before asking questions.").send()
return
# Create a dict vector store
vector_db = VectorDatabase()
vector_db = await vector_db.abuild_from_list(all_texts)
chat_openai = ChatOpenAI()
# Create a chain
retrieval_augmented_qa_pipeline = RetrievalAugmentedQAPipeline(
vector_db_retriever=vector_db,
llm=chat_openai
)
cl.user_session.set("chain", retrieval_augmented_qa_pipeline)
chain = retrieval_augmented_qa_pipeline
msg = cl.Message(content="")
result = await chain.arun_pipeline(message.content)
async for stream_resp in result["response"]:
await msg.stream_token(stream_resp)
await msg.send()
|