|
|
|
|
|
import os |
|
import openai |
|
import gradio as gr |
|
from langchain.chains import ConversationalRetrievalChain |
|
from langchain.text_splitter import CharacterTextSplitter |
|
from langchain_community.document_loaders import PyMuPDFLoader, PyPDFLoader |
|
from langchain.vectorstores import Chroma |
|
from langchain_community.embeddings import OpenAIEmbeddings |
|
from langchain_community.chat_models import ChatOpenAI |
|
import shutil |
|
import logging |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
api_key_env = os.getenv("OPENAI_API_KEY") |
|
if api_key_env: |
|
openai.api_key = api_key_env |
|
logger.info("OpenAI API 密鑰已設置。") |
|
else: |
|
logger.info("未設置固定的 OpenAI API 密鑰。將使用使用者提供的密鑰。") |
|
|
|
|
|
VECTORDB_DIR = os.path.abspath("./data") |
|
os.makedirs(VECTORDB_DIR, exist_ok=True) |
|
os.chmod(VECTORDB_DIR, 0o755) |
|
logger.info(f"VECTORDB_DIR set to: {VECTORDB_DIR}") |
|
|
|
|
|
def test_pdf_loader(file_path, loader_type='PyMuPDFLoader'): |
|
logger.info(f"Testing PDF loader ({loader_type}) with file: {file_path}") |
|
try: |
|
if loader_type == 'PyMuPDFLoader': |
|
loader = PyMuPDFLoader(file_path) |
|
elif loader_type == 'PyPDFLoader': |
|
loader = PyPDFLoader(file_path) |
|
else: |
|
logger.error(f"Unknown loader type: {loader_type}") |
|
return |
|
loaded_docs = loader.load() |
|
if loaded_docs: |
|
logger.info(f"Successfully loaded {file_path} with {len(loaded_docs)} documents.") |
|
logger.info(f"Document content (first 500 chars): {loaded_docs[0].page_content[:500]}") |
|
else: |
|
logger.error(f"No documents loaded from {file_path}.") |
|
except Exception as e: |
|
logger.error(f"Error loading {file_path} with {loader_type}: {e}") |
|
|
|
|
|
def load_and_process_documents(file_paths, loader_type='PyMuPDFLoader', api_key=None): |
|
if not api_key: |
|
raise ValueError("未提供 OpenAI API 密鑰。") |
|
documents = [] |
|
logger.info("開始載入上傳的 PDF 文件。") |
|
|
|
for file_path in file_paths: |
|
logger.info(f"載入 PDF 文件: {file_path}") |
|
if not os.path.exists(file_path): |
|
logger.error(f"文件不存在: {file_path}") |
|
continue |
|
try: |
|
if loader_type == 'PyMuPDFLoader': |
|
loader = PyMuPDFLoader(file_path) |
|
elif loader_type == 'PyPDFLoader': |
|
loader = PyPDFLoader(file_path) |
|
else: |
|
logger.error(f"Unknown loader type: {loader_type}") |
|
continue |
|
loaded_docs = loader.load() |
|
if loaded_docs: |
|
logger.info(f"載入 {file_path} 成功,包含 {len(loaded_docs)} 個文檔。") |
|
|
|
logger.info(f"第一個文檔內容: {loaded_docs[0].page_content[:500]}") |
|
documents.extend(loaded_docs) |
|
else: |
|
logger.error(f"載入 {file_path} 但未找到任何文檔。") |
|
except Exception as e: |
|
logger.error(f"載入 {file_path} 時出現錯誤: {e}") |
|
|
|
if not documents: |
|
raise ValueError("沒有找到任何 PDF 文件或 PDF 文件無法載入。") |
|
else: |
|
logger.info(f"總共載入了 {len(documents)} 個文檔。") |
|
|
|
|
|
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=50) |
|
documents = text_splitter.split_documents(documents) |
|
logger.info(f"分割後的文檔數量: {len(documents)}") |
|
|
|
if not documents: |
|
raise ValueError("分割後的文檔列表為空。請檢查 PDF 文件內容。") |
|
|
|
|
|
try: |
|
embeddings = OpenAIEmbeddings(openai_api_key=api_key) |
|
logger.info("初始化 OpenAIEmbeddings 成功。") |
|
except Exception as e: |
|
raise ValueError(f"初始化 OpenAIEmbeddings 時出現錯誤: {e}") |
|
|
|
try: |
|
vectordb = Chroma.from_documents( |
|
documents, |
|
embedding=embeddings, |
|
persist_directory=VECTORDB_DIR |
|
) |
|
logger.info("初始化 Chroma 向量資料庫成功。") |
|
except Exception as e: |
|
raise ValueError(f"初始化 Chroma 向量資料庫時出現錯誤: {e}") |
|
|
|
return vectordb |
|
|
|
|
|
def handle_query(user_message, chat_history, vectordb, api_key): |
|
try: |
|
if not user_message: |
|
return chat_history |
|
|
|
|
|
preface = """ |
|
指令: 以繁體中文回答問題,200字以內。你是一位專業心理學家與調酒師,專精於 MBTI 人格與經典調酒主題。 |
|
非相關問題,請回應:「目前僅支援 MBTI 分析與經典調酒主題。」。 |
|
""" |
|
query = f"{preface} 查詢內容:{user_message}" |
|
|
|
|
|
pdf_qa = ConversationalRetrievalChain.from_llm( |
|
ChatOpenAI(temperature=0.7, model="gpt-4", openai_api_key=api_key), |
|
retriever=vectordb.as_retriever(search_kwargs={'k': 6}), |
|
return_source_documents=True |
|
) |
|
|
|
|
|
result = pdf_qa.invoke({"question": query, "chat_history": chat_history}) |
|
|
|
|
|
if "answer" in result: |
|
chat_history = chat_history + [(user_message, result["answer"])] |
|
else: |
|
chat_history = chat_history + [(user_message, "抱歉,未能獲得有效回應。")] |
|
return chat_history |
|
|
|
except Exception as e: |
|
logger.error(f"Error in handle_query: {e}") |
|
return chat_history + [("系統", f"出現錯誤: {str(e)}")] |
|
|
|
|
|
def save_api_key(api_key, state): |
|
if not api_key.startswith("sk-"): |
|
return "請輸入有效的 OpenAI API 密鑰。", state |
|
state['api_key'] = api_key |
|
logger.info("使用者已保存自己的 OpenAI API 密鑰。") |
|
return "API 密鑰已成功保存。您現在可以上傳 PDF 文件並開始提問。", state |
|
|
|
|
|
def process_files(files, state): |
|
logger.info("process_files called") |
|
if files: |
|
try: |
|
|
|
api_key = state.get('api_key', None) |
|
if not api_key: |
|
logger.error("使用者未提供 OpenAI API 密鑰。") |
|
return "請先在「設定 API 密鑰」標籤中輸入並保存您的 OpenAI API 密鑰。", state |
|
|
|
logger.info(f"Received {len(files)} files") |
|
saved_file_paths = [] |
|
for idx, file_data in enumerate(files): |
|
|
|
filename = f"uploaded_{idx}.pdf" |
|
save_path = os.path.join(VECTORDB_DIR, filename) |
|
with open(save_path, "wb") as f: |
|
f.write(file_data) |
|
|
|
if os.path.exists(save_path): |
|
file_size = os.path.getsize(save_path) |
|
if file_size > 0: |
|
logger.info(f"File successfully saved to: {save_path} (Size: {file_size} bytes)") |
|
else: |
|
logger.error(f"File saved to {save_path} is empty.") |
|
raise ValueError(f"上傳的文件 {filename} 為空。") |
|
else: |
|
logger.error(f"Failed to save file to: {save_path}") |
|
raise FileNotFoundError(f"無法保存文件到 {save_path}") |
|
saved_file_paths.append(save_path) |
|
|
|
try: |
|
test_pdf_loader(save_path, loader_type='PyMuPDFLoader') |
|
except Exception as e: |
|
logger.error(f"PyMuPDFLoader failed: {e}") |
|
logger.info("Attempting to load with PyPDFLoader...") |
|
test_pdf_loader(save_path, loader_type='PyPDFLoader') |
|
|
|
saved_files = os.listdir(VECTORDB_DIR) |
|
logger.info(f"Files in VECTORDB_DIR ({VECTORDB_DIR}): {saved_files}") |
|
|
|
file_sizes = {file: os.path.getsize(os.path.join(VECTORDB_DIR, file)) for file in saved_files} |
|
logger.info(f"File sizes in VECTORDB_DIR: {file_sizes}") |
|
vectordb = load_and_process_documents(saved_file_paths, loader_type='PyMuPDFLoader', api_key=api_key) |
|
state['vectordb'] = vectordb |
|
return "PDF 文件已成功上傳並處理。您現在可以開始提問。", state |
|
except Exception as e: |
|
logger.error(f"Error in process_files: {e}") |
|
return f"處理文件時出現錯誤: {e}", state |
|
else: |
|
return "請上傳至少一個 PDF 文件。", state |
|
|
|
def chat_interface(user_message, chat_history, state): |
|
vectordb = state.get('vectordb', None) |
|
api_key = state.get('api_key', None) |
|
if not vectordb: |
|
return chat_history, state, "請先上傳 PDF 文件以進行處理。" |
|
if not api_key: |
|
return chat_history, state, "請先在「設定 API 密鑰」標籤中輸入並保存您的 OpenAI API 密鑰。" |
|
|
|
|
|
updated_history = handle_query(user_message, chat_history, vectordb, api_key) |
|
return updated_history, state, "" |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("<h1 style='text-align: center;'>MBTI 與經典調酒 AI 助理</h1>") |
|
|
|
|
|
state = gr.State({"vectordb": None, "api_key": None}) |
|
|
|
with gr.Tab("設定 API 密鑰"): |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
api_key_input = gr.Textbox( |
|
label="輸入您的 OpenAI API 密鑰", |
|
placeholder="sk-...", |
|
type="password", |
|
interactive=True |
|
) |
|
save_api_key_btn = gr.Button("保存 API 密鑰") |
|
api_key_status = gr.Textbox(label="狀態", interactive=False) |
|
|
|
with gr.Tab("上傳 PDF 文件"): |
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
upload = gr.File( |
|
file_count="multiple", |
|
file_types=[".pdf"], |
|
label="上傳 PDF 文件", |
|
interactive=True, |
|
type="binary" |
|
) |
|
upload_btn = gr.Button("上傳並處理") |
|
upload_status = gr.Textbox(label="上傳狀態", interactive=False) |
|
|
|
with gr.Tab("聊天機器人"): |
|
chatbot = gr.Chatbot() |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=0.85): |
|
txt = gr.Textbox(show_label=False, placeholder="請輸入您的問題...") |
|
with gr.Column(scale=0.15, min_width=0): |
|
submit_btn = gr.Button("提問") |
|
|
|
|
|
submit_btn.click( |
|
chat_interface, |
|
inputs=[txt, chatbot, state], |
|
outputs=[chatbot, state, txt] |
|
) |
|
|
|
|
|
txt.submit( |
|
chat_interface, |
|
inputs=[txt, chatbot, state], |
|
outputs=[chatbot, state, txt] |
|
) |
|
|
|
|
|
save_api_key_btn.click( |
|
save_api_key, |
|
inputs=[api_key_input, state], |
|
outputs=[api_key_status, state] |
|
) |
|
|
|
|
|
upload_btn.click( |
|
process_files, |
|
inputs=[upload, state], |
|
outputs=[upload_status, state] |
|
) |
|
|
|
|
|
demo.launch() |