Spaces:
Running
Running
oceansweep
commited on
Commit
•
8a348ee
1
Parent(s):
d988bf2
Update App_Function_Libraries/RAG/RAG_Library_2.py
Browse files
App_Function_Libraries/RAG/RAG_Library_2.py
CHANGED
@@ -1,795 +1,795 @@
|
|
1 |
-
# RAG_Library_2.py
|
2 |
-
# Description: This script contains the main RAG pipeline function and related functions for the RAG pipeline.
|
3 |
-
#
|
4 |
-
# Import necessary modules and functions
|
5 |
-
import configparser
|
6 |
-
import logging
|
7 |
-
import os
|
8 |
-
import time
|
9 |
-
from typing import Dict, Any, List, Optional
|
10 |
-
|
11 |
-
from App_Function_Libraries.DB.Character_Chat_DB import get_character_chats, perform_full_text_search_chat, \
|
12 |
-
fetch_keywords_for_chats, search_character_chat, search_character_cards, fetch_character_ids_by_keywords
|
13 |
-
from App_Function_Libraries.DB.RAG_QA_Chat_DB import search_rag_chat, search_rag_notes
|
14 |
-
#
|
15 |
-
# Local Imports
|
16 |
-
from App_Function_Libraries.RAG.ChromaDB_Library import process_and_store_content, vector_search, chroma_client
|
17 |
-
from App_Function_Libraries.RAG.RAG_Persona_Chat import perform_vector_search_chat
|
18 |
-
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_custom_openai
|
19 |
-
from App_Function_Libraries.Web_Scraping.Article_Extractor_Lib import scrape_article
|
20 |
-
from App_Function_Libraries.DB.DB_Manager import fetch_keywords_for_media, search_media_db, get_notes_by_keywords, \
|
21 |
-
search_conversations_by_keywords
|
22 |
-
from App_Function_Libraries.Utils.Utils import load_comprehensive_config
|
23 |
-
from App_Function_Libraries.Metrics.metrics_logger import log_counter, log_histogram
|
24 |
-
#
|
25 |
-
# 3rd-Party Imports
|
26 |
-
import openai
|
27 |
-
from flashrank import Ranker, RerankRequest
|
28 |
-
#
|
29 |
-
########################################################################################################################
|
30 |
-
#
|
31 |
-
# Functions:
|
32 |
-
|
33 |
-
# Initialize OpenAI client (adjust this based on your API key management)
|
34 |
-
openai.api_key = "your-openai-api-key"
|
35 |
-
|
36 |
-
# Get the directory of the current script
|
37 |
-
current_dir = os.path.dirname(os.path.abspath(__file__))
|
38 |
-
# Construct the path to the config file
|
39 |
-
config_path = os.path.join(current_dir, 'Config_Files', 'config.txt')
|
40 |
-
# Read the config file
|
41 |
-
config = configparser.ConfigParser()
|
42 |
-
# Read the configuration file
|
43 |
-
config.read('config.txt')
|
44 |
-
|
45 |
-
|
46 |
-
search_functions = {
|
47 |
-
"Media DB": search_media_db,
|
48 |
-
"RAG Chat": search_rag_chat,
|
49 |
-
"RAG Notes": search_rag_notes,
|
50 |
-
"Character Chat": search_character_chat,
|
51 |
-
"Character Cards": search_character_cards
|
52 |
-
}
|
53 |
-
|
54 |
-
# RAG pipeline function for web scraping
|
55 |
-
# def rag_web_scraping_pipeline(url: str, query: str, api_choice=None) -> Dict[str, Any]:
|
56 |
-
# try:
|
57 |
-
# # Extract content
|
58 |
-
# try:
|
59 |
-
# article_data = scrape_article(url)
|
60 |
-
# content = article_data['content']
|
61 |
-
# title = article_data['title']
|
62 |
-
# except Exception as e:
|
63 |
-
# logging.error(f"Error scraping article: {str(e)}")
|
64 |
-
# return {"error": "Failed to scrape article", "details": str(e)}
|
65 |
-
#
|
66 |
-
# # Store the article in the database and get the media_id
|
67 |
-
# try:
|
68 |
-
# media_id = add_media_to_database(url, title, 'article', content)
|
69 |
-
# except Exception as e:
|
70 |
-
# logging.error(f"Error adding article to database: {str(e)}")
|
71 |
-
# return {"error": "Failed to store article in database", "details": str(e)}
|
72 |
-
#
|
73 |
-
# # Process and store content
|
74 |
-
# collection_name = f"article_{media_id}"
|
75 |
-
# try:
|
76 |
-
# # Assuming you have a database object available, let's call it 'db'
|
77 |
-
# db = get_database_connection()
|
78 |
-
#
|
79 |
-
# process_and_store_content(
|
80 |
-
# database=db,
|
81 |
-
# content=content,
|
82 |
-
# collection_name=collection_name,
|
83 |
-
# media_id=media_id,
|
84 |
-
# file_name=title,
|
85 |
-
# create_embeddings=True,
|
86 |
-
# create_contextualized=True,
|
87 |
-
# api_name=api_choice
|
88 |
-
# )
|
89 |
-
# except Exception as e:
|
90 |
-
# logging.error(f"Error processing and storing content: {str(e)}")
|
91 |
-
# return {"error": "Failed to process and store content", "details": str(e)}
|
92 |
-
#
|
93 |
-
# # Perform searches
|
94 |
-
# try:
|
95 |
-
# vector_results = vector_search(collection_name, query, k=5)
|
96 |
-
# fts_results = search_db(query, ["content"], "", page=1, results_per_page=5)
|
97 |
-
# except Exception as e:
|
98 |
-
# logging.error(f"Error performing searches: {str(e)}")
|
99 |
-
# return {"error": "Failed to perform searches", "details": str(e)}
|
100 |
-
#
|
101 |
-
# # Combine results with error handling for missing 'content' key
|
102 |
-
# all_results = []
|
103 |
-
# for result in vector_results + fts_results:
|
104 |
-
# if isinstance(result, dict) and 'content' in result:
|
105 |
-
# all_results.append(result['content'])
|
106 |
-
# else:
|
107 |
-
# logging.warning(f"Unexpected result format: {result}")
|
108 |
-
# all_results.append(str(result))
|
109 |
-
#
|
110 |
-
# context = "\n".join(all_results)
|
111 |
-
#
|
112 |
-
# # Generate answer using the selected API
|
113 |
-
# try:
|
114 |
-
# answer = generate_answer(api_choice, context, query)
|
115 |
-
# except Exception as e:
|
116 |
-
# logging.error(f"Error generating answer: {str(e)}")
|
117 |
-
# return {"error": "Failed to generate answer", "details": str(e)}
|
118 |
-
#
|
119 |
-
# return {
|
120 |
-
# "answer": answer,
|
121 |
-
# "context": context
|
122 |
-
# }
|
123 |
-
#
|
124 |
-
# except Exception as e:
|
125 |
-
# logging.error(f"Unexpected error in rag_pipeline: {str(e)}")
|
126 |
-
# return {"error": "An unexpected error occurred", "details": str(e)}
|
127 |
-
|
128 |
-
|
129 |
-
# RAG Search with keyword filtering
|
130 |
-
# FIXME - Update each called function to support modifiable top-k results
|
131 |
-
def enhanced_rag_pipeline(
|
132 |
-
query: str,
|
133 |
-
api_choice: str,
|
134 |
-
keywords: Optional[str] = None,
|
135 |
-
fts_top_k: int = 10,
|
136 |
-
apply_re_ranking: bool = True,
|
137 |
-
database_types: List[str] = ["Media DB"]
|
138 |
-
) -> Dict[str, Any]:
|
139 |
-
"""
|
140 |
-
Perform full text search across specified database type.
|
141 |
-
|
142 |
-
Args:
|
143 |
-
query: Search query string
|
144 |
-
api_choice: API to use for generating the response
|
145 |
-
keywords: Optional list of media IDs to filter results
|
146 |
-
fts_top_k: Maximum number of results to return
|
147 |
-
apply_re_ranking: Whether to apply re-ranking to results
|
148 |
-
database_types: Type of database to search
|
149 |
-
|
150 |
-
Returns:
|
151 |
-
Dictionary containing search results with content
|
152 |
-
"""
|
153 |
-
log_counter("enhanced_rag_pipeline_attempt", labels={"api_choice": api_choice})
|
154 |
-
start_time = time.time()
|
155 |
-
|
156 |
-
try:
|
157 |
-
# Load embedding provider from config, or fallback to 'openai'
|
158 |
-
embedding_provider = config.get('Embeddings', 'provider', fallback='openai')
|
159 |
-
logging.debug(f"Using embedding provider: {embedding_provider}")
|
160 |
-
|
161 |
-
# Initialize relevant IDs dictionary
|
162 |
-
relevant_ids: Dict[str, Optional[List[str]]] = {}
|
163 |
-
|
164 |
-
# Process keywords if provided
|
165 |
-
if keywords:
|
166 |
-
keyword_list = [k.strip().lower() for k in keywords.split(',')]
|
167 |
-
logging.debug(f"enhanced_rag_pipeline - Keywords: {keyword_list}")
|
168 |
-
|
169 |
-
try:
|
170 |
-
for db_type in database_types:
|
171 |
-
if db_type == "Media DB":
|
172 |
-
media_ids = fetch_relevant_media_ids(keyword_list)
|
173 |
-
relevant_ids[db_type] = [str(id_) for id_ in media_ids]
|
174 |
-
elif db_type == "RAG Chat":
|
175 |
-
conversations, _, _ = search_conversations_by_keywords(keywords=keyword_list)
|
176 |
-
relevant_ids[db_type] = [str(conv['conversation_id']) for conv in conversations]
|
177 |
-
elif db_type == "RAG Notes":
|
178 |
-
notes, _, _ = get_notes_by_keywords(keyword_list)
|
179 |
-
relevant_ids[db_type] = [str(note_id) for note_id, _, _, _ in notes]
|
180 |
-
elif db_type == "Character Chat":
|
181 |
-
relevant_ids[db_type] = [str(id_) for id_ in fetch_keywords_for_chats(keyword_list)]
|
182 |
-
elif db_type == "Character Cards":
|
183 |
-
relevant_ids[db_type] = [str(id_) for id_ in fetch_character_ids_by_keywords(keyword_list)]
|
184 |
-
else:
|
185 |
-
logging.error(f"Unsupported database type: {db_type}")
|
186 |
-
|
187 |
-
logging.debug(f"enhanced_rag_pipeline - {db_type} relevant IDs: {relevant_ids[db_type]}")
|
188 |
-
except Exception as e:
|
189 |
-
logging.error(f"Error fetching relevant IDs: {str(e)}")
|
190 |
-
relevant_ids = {db_type: None for db_type in database_types}
|
191 |
-
else:
|
192 |
-
relevant_ids = {db_type: None for db_type in database_types}
|
193 |
-
|
194 |
-
# Perform vector search
|
195 |
-
vector_results = []
|
196 |
-
for db_type in database_types:
|
197 |
-
try:
|
198 |
-
db_relevant_ids = relevant_ids.get(db_type)
|
199 |
-
results = perform_vector_search(query, db_relevant_ids, top_k=fts_top_k)
|
200 |
-
vector_results.extend(results)
|
201 |
-
logging.debug(f"\nenhanced_rag_pipeline - Vector search results for {db_type}: {results}")
|
202 |
-
except Exception as e:
|
203 |
-
logging.error(f"Error performing vector search on {db_type}: {str(e)}")
|
204 |
-
|
205 |
-
# Perform vector search
|
206 |
-
# FIXME
|
207 |
-
#vector_results = perform_vector_search(query, relevant_media_ids)
|
208 |
-
#ogging.debug(f"\n\nenhanced_rag_pipeline - Vector search results: {vector_results}")
|
209 |
-
|
210 |
-
# Perform full-text search
|
211 |
-
#v1
|
212 |
-
#fts_results = perform_full_text_search(query, database_type, relevant_media_ids, fts_top_k)
|
213 |
-
|
214 |
-
# v2
|
215 |
-
# Perform full-text search across specified databases
|
216 |
-
fts_results = []
|
217 |
-
for db_type in database_types:
|
218 |
-
try:
|
219 |
-
db_relevant_ids = relevant_ids.get(db_type)
|
220 |
-
db_results = perform_full_text_search(query, db_type, db_relevant_ids, fts_top_k)
|
221 |
-
fts_results.extend(db_results)
|
222 |
-
logging.debug(f"enhanced_rag_pipeline - FTS results for {db_type}: {db_results}")
|
223 |
-
except Exception as e:
|
224 |
-
logging.error(f"Error performing full-text search on {db_type}: {str(e)}")
|
225 |
-
|
226 |
-
#logging.debug("\n\nenhanced_rag_pipeline - Full-text search results:")
|
227 |
-
logging.debug(
|
228 |
-
"\n\nenhanced_rag_pipeline - Full-text search results:\n" + "\n".join(
|
229 |
-
[str(item) for item in fts_results]) + "\n"
|
230 |
-
)
|
231 |
-
|
232 |
-
# Combine results
|
233 |
-
all_results = vector_results + fts_results
|
234 |
-
|
235 |
-
# FIXME - specify model + add param to modify at call time
|
236 |
-
# You can specify a model if necessary, e.g., model_name="ms-marco-MiniLM-L-12-v2"
|
237 |
-
# Apply re-ranking if enabled and results exist
|
238 |
-
if apply_re_ranking and all_results:
|
239 |
-
logging.debug(f"\nenhanced_rag_pipeline - Applying Re-Ranking")
|
240 |
-
|
241 |
-
if all_results:
|
242 |
-
ranker = Ranker()
|
243 |
-
|
244 |
-
# Prepare passages for re-ranking
|
245 |
-
passages = [{"id": i, "text": result['content']} for i, result in enumerate(all_results)]
|
246 |
-
rerank_request = RerankRequest(query=query, passages=passages)
|
247 |
-
|
248 |
-
# Rerank the results
|
249 |
-
reranked_results = ranker.rerank(rerank_request)
|
250 |
-
|
251 |
-
# Sort results based on the re-ranking score
|
252 |
-
reranked_results = sorted(reranked_results, key=lambda x: x['score'], reverse=True)
|
253 |
-
|
254 |
-
# Log reranked results
|
255 |
-
logging.debug(f"\n\nenhanced_rag_pipeline - Reranked results: {reranked_results}")
|
256 |
-
|
257 |
-
# Update all_results based on reranking
|
258 |
-
all_results = [all_results[result['id']] for result in reranked_results]
|
259 |
-
|
260 |
-
# Extract content from results (top fts_top_k by default)
|
261 |
-
context = "\n".join([result['content'] for result in all_results[:fts_top_k]])
|
262 |
-
#logging.debug(f"Context length: {len(context)}")
|
263 |
-
logging.debug(f"Context: {context[:200]}")
|
264 |
-
|
265 |
-
# Generate answer using the selected API
|
266 |
-
answer = generate_answer(api_choice, context, query)
|
267 |
-
|
268 |
-
if not all_results:
|
269 |
-
logging.info(f"No results found. Query: {query}, Keywords: {keywords}")
|
270 |
-
return {
|
271 |
-
"answer": "No relevant information based on your query and keywords were found in the database. Your query has been directly passed to the LLM, and here is its answer: \n\n" + answer,
|
272 |
-
"context": "No relevant information based on your query and keywords were found in the database. The only context used was your query: \n\n" + query
|
273 |
-
}
|
274 |
-
|
275 |
-
# Log metrics
|
276 |
-
pipeline_duration = time.time() - start_time
|
277 |
-
log_histogram("enhanced_rag_pipeline_duration", pipeline_duration, labels={"api_choice": api_choice})
|
278 |
-
log_counter("enhanced_rag_pipeline_success", labels={"api_choice": api_choice})
|
279 |
-
|
280 |
-
return {
|
281 |
-
"answer": answer,
|
282 |
-
"context": context
|
283 |
-
}
|
284 |
-
|
285 |
-
except Exception as e:
|
286 |
-
log_counter("enhanced_rag_pipeline_error", labels={"api_choice": api_choice, "error": str(e)})
|
287 |
-
logging.error(f"Error in enhanced_rag_pipeline: {str(e)}")
|
288 |
-
logging.error(f"Error in enhanced_rag_pipeline: {str(e)}")
|
289 |
-
return {
|
290 |
-
"answer": "An error occurred while processing your request.",
|
291 |
-
"context": ""
|
292 |
-
}
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
# Need to write a test for this function FIXME
|
297 |
-
def generate_answer(api_choice: str, context: str, query: str) -> str:
|
298 |
-
# Metrics
|
299 |
-
log_counter("generate_answer_attempt", labels={"api_choice": api_choice})
|
300 |
-
start_time = time.time()
|
301 |
-
logging.debug("Entering generate_answer function")
|
302 |
-
config = load_comprehensive_config()
|
303 |
-
logging.debug(f"Config sections: {config.sections()}")
|
304 |
-
prompt = f"Context: {context}\n\nQuestion: {query}"
|
305 |
-
try:
|
306 |
-
if api_choice == "OpenAI":
|
307 |
-
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_openai
|
308 |
-
answer_generation_duration = time.time() - start_time
|
309 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
310 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
311 |
-
return summarize_with_openai(config['API']['openai_api_key'], prompt, "")
|
312 |
-
|
313 |
-
elif api_choice == "Anthropic":
|
314 |
-
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_anthropic
|
315 |
-
answer_generation_duration = time.time() - start_time
|
316 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
317 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
318 |
-
return summarize_with_anthropic(config['API']['anthropic_api_key'], prompt, "")
|
319 |
-
|
320 |
-
elif api_choice == "Cohere":
|
321 |
-
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_cohere
|
322 |
-
answer_generation_duration = time.time() - start_time
|
323 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
324 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
325 |
-
return summarize_with_cohere(config['API']['cohere_api_key'], prompt, "")
|
326 |
-
|
327 |
-
elif api_choice == "Groq":
|
328 |
-
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_groq
|
329 |
-
answer_generation_duration = time.time() - start_time
|
330 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
331 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
332 |
-
return summarize_with_groq(config['API']['groq_api_key'], prompt, "")
|
333 |
-
|
334 |
-
elif api_choice == "OpenRouter":
|
335 |
-
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_openrouter
|
336 |
-
answer_generation_duration = time.time() - start_time
|
337 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
338 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
339 |
-
return summarize_with_openrouter(config['API']['openrouter_api_key'], prompt, "")
|
340 |
-
|
341 |
-
elif api_choice == "HuggingFace":
|
342 |
-
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_huggingface
|
343 |
-
answer_generation_duration = time.time() - start_time
|
344 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
345 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
346 |
-
return summarize_with_huggingface(config['API']['huggingface_api_key'], prompt, "")
|
347 |
-
|
348 |
-
elif api_choice == "DeepSeek":
|
349 |
-
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_deepseek
|
350 |
-
answer_generation_duration = time.time() - start_time
|
351 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
352 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
353 |
-
return summarize_with_deepseek(config['API']['deepseek_api_key'], prompt, "")
|
354 |
-
|
355 |
-
elif api_choice == "Mistral":
|
356 |
-
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_mistral
|
357 |
-
answer_generation_duration = time.time() - start_time
|
358 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
359 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
360 |
-
return summarize_with_mistral(config['API']['mistral_api_key'], prompt, "")
|
361 |
-
|
362 |
-
# Local LLM APIs
|
363 |
-
elif api_choice == "Local-LLM":
|
364 |
-
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_local_llm
|
365 |
-
answer_generation_duration = time.time() - start_time
|
366 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
367 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
368 |
-
# FIXME
|
369 |
-
return summarize_with_local_llm(config['Local-API']['local_llm_path'], prompt, "")
|
370 |
-
|
371 |
-
elif api_choice == "Llama.cpp":
|
372 |
-
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_llama
|
373 |
-
answer_generation_duration = time.time() - start_time
|
374 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
375 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
376 |
-
return summarize_with_llama(prompt, "", config['Local-API']['llama_api_key'], None, None)
|
377 |
-
elif api_choice == "Kobold":
|
378 |
-
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_kobold
|
379 |
-
answer_generation_duration = time.time() - start_time
|
380 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
381 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
382 |
-
return summarize_with_kobold(prompt, config['Local-API']['kobold_api_key'], "", system_message=None, temp=None)
|
383 |
-
|
384 |
-
elif api_choice == "Ooba":
|
385 |
-
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_oobabooga
|
386 |
-
answer_generation_duration = time.time() - start_time
|
387 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
388 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
389 |
-
return summarize_with_oobabooga(prompt, config['Local-API']['ooba_api_key'], custom_prompt="", system_message=None, temp=None)
|
390 |
-
|
391 |
-
elif api_choice == "TabbyAPI":
|
392 |
-
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_tabbyapi
|
393 |
-
answer_generation_duration = time.time() - start_time
|
394 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
395 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
396 |
-
return summarize_with_tabbyapi(prompt, None, None, None, None, )
|
397 |
-
|
398 |
-
elif api_choice == "vLLM":
|
399 |
-
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_vllm
|
400 |
-
answer_generation_duration = time.time() - start_time
|
401 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
402 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
403 |
-
return summarize_with_vllm(prompt, "", config['Local-API']['vllm_api_key'], None, None)
|
404 |
-
|
405 |
-
elif api_choice.lower() == "ollama":
|
406 |
-
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_ollama
|
407 |
-
answer_generation_duration = time.time() - start_time
|
408 |
-
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
409 |
-
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
410 |
-
return summarize_with_ollama(prompt, "", config['Local-API']['ollama_api_IP'], config['Local-API']['ollama_api_key'], None, None, None)
|
411 |
-
|
412 |
-
elif api_choice.lower() == "custom_openai_api":
|
413 |
-
logging.debug(f"RAG Answer Gen: Trying with Custom_OpenAI API")
|
414 |
-
summary = summarize_with_custom_openai(prompt, "", config['API']['custom_openai_api_key'], None,
|
415 |
-
None)
|
416 |
-
else:
|
417 |
-
log_counter("generate_answer_error", labels={"api_choice": api_choice, "error": str()})
|
418 |
-
raise ValueError(f"Unsupported API choice: {api_choice}")
|
419 |
-
except Exception as e:
|
420 |
-
log_counter("generate_answer_error", labels={"api_choice": api_choice, "error": str(e)})
|
421 |
-
logging.error(f"Error in generate_answer: {str(e)}")
|
422 |
-
return "An error occurred while generating the answer."
|
423 |
-
|
424 |
-
|
425 |
-
def perform_vector_search(query: str, relevant_media_ids: List[str] = None, top_k=10) -> List[Dict[str, Any]]:
|
426 |
-
log_counter("perform_vector_search_attempt")
|
427 |
-
start_time = time.time()
|
428 |
-
all_collections = chroma_client.list_collections()
|
429 |
-
vector_results = []
|
430 |
-
try:
|
431 |
-
for collection in all_collections:
|
432 |
-
collection_results = vector_search(collection.name, query, k=top_k)
|
433 |
-
if not collection_results:
|
434 |
-
continue # Skip empty results
|
435 |
-
filtered_results = [
|
436 |
-
result for result in collection_results
|
437 |
-
if relevant_media_ids is None or result['metadata'].get('media_id') in relevant_media_ids
|
438 |
-
]
|
439 |
-
vector_results.extend(filtered_results)
|
440 |
-
search_duration = time.time() - start_time
|
441 |
-
log_histogram("perform_vector_search_duration", search_duration)
|
442 |
-
log_counter("perform_vector_search_success", labels={"result_count": len(vector_results)})
|
443 |
-
return vector_results
|
444 |
-
except Exception as e:
|
445 |
-
log_counter("perform_vector_search_error", labels={"error": str(e)})
|
446 |
-
logging.error(f"Error in perform_vector_search: {str(e)}")
|
447 |
-
raise
|
448 |
-
|
449 |
-
|
450 |
-
# V2
|
451 |
-
def perform_full_text_search(query: str, database_type: str, relevant_ids: List[str] = None, fts_top_k=None) -> List[Dict[str, Any]]:
|
452 |
-
"""
|
453 |
-
Perform full-text search on a specified database type.
|
454 |
-
|
455 |
-
Args:
|
456 |
-
query: Search query string
|
457 |
-
database_type: Type of database to search ("Media DB", "RAG Chat", "RAG Notes", "Character Chat", "Character Cards")
|
458 |
-
relevant_ids: Optional list of media IDs to filter results
|
459 |
-
fts_top_k: Maximum number of results to return
|
460 |
-
|
461 |
-
Returns:
|
462 |
-
List of search results with content and metadata
|
463 |
-
"""
|
464 |
-
log_counter("perform_full_text_search_attempt", labels={"database_type": database_type})
|
465 |
-
start_time = time.time()
|
466 |
-
|
467 |
-
try:
|
468 |
-
# Set default for fts_top_k
|
469 |
-
if fts_top_k is None:
|
470 |
-
fts_top_k = 10
|
471 |
-
|
472 |
-
# Call appropriate search function based on database type
|
473 |
-
if database_type not in search_functions:
|
474 |
-
raise ValueError(f"Unsupported database type: {database_type}")
|
475 |
-
|
476 |
-
# Call the appropriate search function
|
477 |
-
results = search_functions[database_type](query, fts_top_k, relevant_ids)
|
478 |
-
|
479 |
-
search_duration = time.time() - start_time
|
480 |
-
log_histogram("perform_full_text_search_duration", search_duration,
|
481 |
-
labels={"database_type": database_type})
|
482 |
-
log_counter("perform_full_text_search_success",
|
483 |
-
labels={"database_type": database_type, "result_count": len(results)})
|
484 |
-
|
485 |
-
return results
|
486 |
-
|
487 |
-
except Exception as e:
|
488 |
-
log_counter("perform_full_text_search_error",
|
489 |
-
labels={"database_type": database_type, "error": str(e)})
|
490 |
-
logging.error(f"Error in perform_full_text_search ({database_type}): {str(e)}")
|
491 |
-
raise
|
492 |
-
|
493 |
-
|
494 |
-
# v1
|
495 |
-
# def perform_full_text_search(query: str, relevant_media_ids: List[str] = None, fts_top_k=None) -> List[Dict[str, Any]]:
|
496 |
-
# log_counter("perform_full_text_search_attempt")
|
497 |
-
# start_time = time.time()
|
498 |
-
# try:
|
499 |
-
# fts_results = search_db(query, ["content"], "", page=1, results_per_page=fts_top_k or 10)
|
500 |
-
# filtered_fts_results = [
|
501 |
-
# {
|
502 |
-
# "content": result['content'],
|
503 |
-
# "metadata": {"media_id": result['id']}
|
504 |
-
# }
|
505 |
-
# for result in fts_results
|
506 |
-
# if relevant_media_ids is None or result['id'] in relevant_media_ids
|
507 |
-
# ]
|
508 |
-
# search_duration = time.time() - start_time
|
509 |
-
# log_histogram("perform_full_text_search_duration", search_duration)
|
510 |
-
# log_counter("perform_full_text_search_success", labels={"result_count": len(filtered_fts_results)})
|
511 |
-
# return filtered_fts_results
|
512 |
-
# except Exception as e:
|
513 |
-
# log_counter("perform_full_text_search_error", labels={"error": str(e)})
|
514 |
-
# logging.error(f"Error in perform_full_text_search: {str(e)}")
|
515 |
-
# raise
|
516 |
-
|
517 |
-
|
518 |
-
def fetch_relevant_media_ids(keywords: List[str], top_k=10) -> List[int]:
|
519 |
-
log_counter("fetch_relevant_media_ids_attempt", labels={"keyword_count": len(keywords)})
|
520 |
-
start_time = time.time()
|
521 |
-
relevant_ids = set()
|
522 |
-
for keyword in keywords:
|
523 |
-
try:
|
524 |
-
media_ids = fetch_keywords_for_media(keyword)
|
525 |
-
relevant_ids.update(media_ids)
|
526 |
-
except Exception as e:
|
527 |
-
log_counter("fetch_relevant_media_ids_error", labels={"error": str(e)})
|
528 |
-
logging.error(f"Error fetching relevant media IDs for keyword '{keyword}': {str(e)}")
|
529 |
-
# Continue processing other keywords
|
530 |
-
|
531 |
-
fetch_duration = time.time() - start_time
|
532 |
-
log_histogram("fetch_relevant_media_ids_duration", fetch_duration)
|
533 |
-
log_counter("fetch_relevant_media_ids_success", labels={"result_count": len(relevant_ids)})
|
534 |
-
return list(relevant_ids)
|
535 |
-
|
536 |
-
|
537 |
-
def filter_results_by_keywords(results: List[Dict[str, Any]], keywords: List[str]) -> List[Dict[str, Any]]:
|
538 |
-
log_counter("filter_results_by_keywords_attempt", labels={"result_count": len(results), "keyword_count": len(keywords)})
|
539 |
-
start_time = time.time()
|
540 |
-
if not keywords:
|
541 |
-
return results
|
542 |
-
|
543 |
-
filtered_results = []
|
544 |
-
for result in results:
|
545 |
-
try:
|
546 |
-
metadata = result.get('metadata', {})
|
547 |
-
if metadata is None:
|
548 |
-
logging.warning(f"No metadata found for result: {result}")
|
549 |
-
continue
|
550 |
-
if not isinstance(metadata, dict):
|
551 |
-
logging.warning(f"Unexpected metadata type: {type(metadata)}. Expected dict.")
|
552 |
-
continue
|
553 |
-
|
554 |
-
media_id = metadata.get('media_id')
|
555 |
-
if media_id is None:
|
556 |
-
logging.warning(f"No media_id found in metadata: {metadata}")
|
557 |
-
continue
|
558 |
-
|
559 |
-
media_keywords = fetch_keywords_for_media(media_id)
|
560 |
-
if any(keyword.lower() in [mk.lower() for mk in media_keywords] for keyword in keywords):
|
561 |
-
filtered_results.append(result)
|
562 |
-
except Exception as e:
|
563 |
-
logging.error(f"Error processing result: {result}. Error: {str(e)}")
|
564 |
-
|
565 |
-
filter_duration = time.time() - start_time
|
566 |
-
log_histogram("filter_results_by_keywords_duration", filter_duration)
|
567 |
-
log_counter("filter_results_by_keywords_success", labels={"filtered_count": len(filtered_results)})
|
568 |
-
return filtered_results
|
569 |
-
|
570 |
-
# FIXME: to be implememted
|
571 |
-
def extract_media_id_from_result(result: str) -> Optional[int]:
|
572 |
-
# Implement this function based on how you store the media_id in your results
|
573 |
-
# For example, if it's stored at the beginning of each result:
|
574 |
-
try:
|
575 |
-
return int(result.split('_')[0])
|
576 |
-
except (IndexError, ValueError):
|
577 |
-
logging.error(f"Failed to extract media_id from result: {result}")
|
578 |
-
return None
|
579 |
-
|
580 |
-
#
|
581 |
-
#
|
582 |
-
########################################################################################################################
|
583 |
-
|
584 |
-
|
585 |
-
############################################################################################################
|
586 |
-
#
|
587 |
-
# Chat RAG
|
588 |
-
|
589 |
-
def enhanced_rag_pipeline_chat(query: str, api_choice: str, character_id: int, keywords: Optional[str] = None) -> Dict[str, Any]:
|
590 |
-
"""
|
591 |
-
Enhanced RAG pipeline tailored for the Character Chat tab.
|
592 |
-
|
593 |
-
Args:
|
594 |
-
query (str): The user's input query.
|
595 |
-
api_choice (str): The API to use for generating the response.
|
596 |
-
character_id (int): The ID of the character being interacted with.
|
597 |
-
keywords (Optional[str]): Comma-separated keywords to filter search results.
|
598 |
-
|
599 |
-
Returns:
|
600 |
-
Dict[str, Any]: Contains the generated answer and the context used.
|
601 |
-
"""
|
602 |
-
log_counter("enhanced_rag_pipeline_chat_attempt", labels={"api_choice": api_choice, "character_id": character_id})
|
603 |
-
start_time = time.time()
|
604 |
-
try:
|
605 |
-
# Load embedding provider from config, or fallback to 'openai'
|
606 |
-
embedding_provider = config.get('Embeddings', 'provider', fallback='openai')
|
607 |
-
logging.debug(f"Using embedding provider: {embedding_provider}")
|
608 |
-
|
609 |
-
# Process keywords if provided
|
610 |
-
keyword_list = [k.strip().lower() for k in keywords.split(',')] if keywords else []
|
611 |
-
logging.debug(f"enhanced_rag_pipeline_chat - Keywords: {keyword_list}")
|
612 |
-
|
613 |
-
# Fetch relevant chat IDs based on character_id and keywords
|
614 |
-
if keyword_list:
|
615 |
-
relevant_chat_ids = fetch_keywords_for_chats(keyword_list)
|
616 |
-
else:
|
617 |
-
relevant_chat_ids = fetch_all_chat_ids(character_id)
|
618 |
-
logging.debug(f"enhanced_rag_pipeline_chat - Relevant chat IDs: {relevant_chat_ids}")
|
619 |
-
|
620 |
-
if not relevant_chat_ids:
|
621 |
-
logging.info(f"No chats found for the given keywords and character ID: {character_id}")
|
622 |
-
# Fallback to generating answer without context
|
623 |
-
answer = generate_answer(api_choice, "", query)
|
624 |
-
# Metrics
|
625 |
-
pipeline_duration = time.time() - start_time
|
626 |
-
log_histogram("enhanced_rag_pipeline_chat_duration", pipeline_duration, labels={"api_choice": api_choice})
|
627 |
-
log_counter("enhanced_rag_pipeline_chat_success",
|
628 |
-
labels={"api_choice": api_choice, "character_id": character_id})
|
629 |
-
return {
|
630 |
-
"answer": answer,
|
631 |
-
"context": ""
|
632 |
-
}
|
633 |
-
|
634 |
-
# Perform vector search within the relevant chats
|
635 |
-
vector_results = perform_vector_search_chat(query, relevant_chat_ids)
|
636 |
-
logging.debug(f"enhanced_rag_pipeline_chat - Vector search results: {vector_results}")
|
637 |
-
|
638 |
-
# Perform full-text search within the relevant chats
|
639 |
-
# FIXME - Update for DB Selection
|
640 |
-
fts_results = perform_full_text_search_chat(query, relevant_chat_ids)
|
641 |
-
logging.debug("enhanced_rag_pipeline_chat - Full-text search results:")
|
642 |
-
logging.debug("\n".join([str(item) for item in fts_results]))
|
643 |
-
|
644 |
-
# Combine results
|
645 |
-
all_results = vector_results + fts_results
|
646 |
-
|
647 |
-
apply_re_ranking = True
|
648 |
-
if apply_re_ranking:
|
649 |
-
logging.debug("enhanced_rag_pipeline_chat - Applying Re-Ranking")
|
650 |
-
ranker = Ranker()
|
651 |
-
|
652 |
-
# Prepare passages for re-ranking
|
653 |
-
passages = [{"id": i, "text": result['content']} for i, result in enumerate(all_results)]
|
654 |
-
rerank_request = RerankRequest(query=query, passages=passages)
|
655 |
-
|
656 |
-
# Rerank the results
|
657 |
-
reranked_results = ranker.rerank(rerank_request)
|
658 |
-
|
659 |
-
# Sort results based on the re-ranking score
|
660 |
-
reranked_results = sorted(reranked_results, key=lambda x: x['score'], reverse=True)
|
661 |
-
|
662 |
-
# Log reranked results
|
663 |
-
logging.debug(f"enhanced_rag_pipeline_chat - Reranked results: {reranked_results}")
|
664 |
-
|
665 |
-
# Update all_results based on reranking
|
666 |
-
all_results = [all_results[result['id']] for result in reranked_results]
|
667 |
-
|
668 |
-
# Extract context from top results (limit to top 10)
|
669 |
-
context = "\n".join([result['content'] for result in all_results[:10]])
|
670 |
-
logging.debug(f"Context length: {len(context)}")
|
671 |
-
logging.debug(f"Context: {context[:200]}") # Log only the first 200 characters for brevity
|
672 |
-
|
673 |
-
# Generate answer using the selected API
|
674 |
-
answer = generate_answer(api_choice, context, query)
|
675 |
-
|
676 |
-
if not all_results:
|
677 |
-
logging.info(f"No results found. Query: {query}, Keywords: {keywords}")
|
678 |
-
return {
|
679 |
-
"answer": "No relevant information based on your query and keywords were found in the database. Your query has been directly passed to the LLM, and here is its answer: \n\n" + answer,
|
680 |
-
"context": "No relevant information based on your query and keywords were found in the database. The only context used was your query: \n\n" + query
|
681 |
-
}
|
682 |
-
|
683 |
-
return {
|
684 |
-
"answer": answer,
|
685 |
-
"context": context
|
686 |
-
}
|
687 |
-
|
688 |
-
except Exception as e:
|
689 |
-
log_counter("enhanced_rag_pipeline_chat_error", labels={"api_choice": api_choice, "character_id": character_id, "error": str(e)})
|
690 |
-
logging.error(f"Error in enhanced_rag_pipeline_chat: {str(e)}")
|
691 |
-
return {
|
692 |
-
"answer": "An error occurred while processing your request.",
|
693 |
-
"context": ""
|
694 |
-
}
|
695 |
-
|
696 |
-
|
697 |
-
def fetch_relevant_chat_ids(character_id: int, keywords: List[str]) -> List[int]:
|
698 |
-
"""
|
699 |
-
Fetch chat IDs associated with a character and filtered by keywords.
|
700 |
-
|
701 |
-
Args:
|
702 |
-
character_id (int): The ID of the character.
|
703 |
-
keywords (List[str]): List of keywords to filter chats.
|
704 |
-
|
705 |
-
Returns:
|
706 |
-
List[int]: List of relevant chat IDs.
|
707 |
-
"""
|
708 |
-
log_counter("fetch_relevant_chat_ids_attempt", labels={"character_id": character_id, "keyword_count": len(keywords)})
|
709 |
-
start_time = time.time()
|
710 |
-
relevant_ids = set()
|
711 |
-
try:
|
712 |
-
media_ids = fetch_keywords_for_chats(keywords)
|
713 |
-
fetch_duration = time.time() - start_time
|
714 |
-
log_histogram("fetch_relevant_chat_ids_duration", fetch_duration)
|
715 |
-
log_counter("fetch_relevant_chat_ids_success",
|
716 |
-
labels={"character_id": character_id, "result_count": len(relevant_ids)})
|
717 |
-
relevant_ids.update(media_ids)
|
718 |
-
return list(relevant_ids)
|
719 |
-
except Exception as e:
|
720 |
-
log_counter("fetch_relevant_chat_ids_error", labels={"character_id": character_id, "error": str(e)})
|
721 |
-
logging.error(f"Error fetching relevant chat IDs: {str(e)}")
|
722 |
-
return []
|
723 |
-
|
724 |
-
|
725 |
-
def fetch_all_chat_ids(character_id: int) -> List[int]:
|
726 |
-
"""
|
727 |
-
Fetch all chat IDs associated with a specific character.
|
728 |
-
|
729 |
-
Args:
|
730 |
-
character_id (int): The ID of the character.
|
731 |
-
|
732 |
-
Returns:
|
733 |
-
List[int]: List of all chat IDs for the character.
|
734 |
-
"""
|
735 |
-
log_counter("fetch_all_chat_ids_attempt", labels={"character_id": character_id})
|
736 |
-
start_time = time.time()
|
737 |
-
try:
|
738 |
-
chats = get_character_chats(character_id=character_id)
|
739 |
-
chat_ids = [chat['id'] for chat in chats]
|
740 |
-
fetch_duration = time.time() - start_time
|
741 |
-
log_histogram("fetch_all_chat_ids_duration", fetch_duration)
|
742 |
-
log_counter("fetch_all_chat_ids_success", labels={"character_id": character_id, "chat_count": len(chat_ids)})
|
743 |
-
return chat_ids
|
744 |
-
except Exception as e:
|
745 |
-
log_counter("fetch_all_chat_ids_error", labels={"character_id": character_id, "error": str(e)})
|
746 |
-
logging.error(f"Error fetching all chat IDs for character {character_id}: {str(e)}")
|
747 |
-
return []
|
748 |
-
|
749 |
-
#
|
750 |
-
# End of Chat RAG
|
751 |
-
############################################################################################################
|
752 |
-
|
753 |
-
# Function to preprocess and store all existing content in the database
|
754 |
-
# def preprocess_all_content(database, create_contextualized=True, api_name="gpt-3.5-turbo"):
|
755 |
-
# unprocessed_media = get_unprocessed_media()
|
756 |
-
# total_media = len(unprocessed_media)
|
757 |
-
#
|
758 |
-
# for index, row in enumerate(unprocessed_media, 1):
|
759 |
-
# media_id, content, media_type, file_name = row
|
760 |
-
# collection_name = f"{media_type}_{media_id}"
|
761 |
-
#
|
762 |
-
# logger.info(f"Processing media {index} of {total_media}: ID {media_id}, Type {media_type}")
|
763 |
-
#
|
764 |
-
# try:
|
765 |
-
# process_and_store_content(
|
766 |
-
# database=database,
|
767 |
-
# content=content,
|
768 |
-
# collection_name=collection_name,
|
769 |
-
# media_id=media_id,
|
770 |
-
# file_name=file_name or f"{media_type}_{media_id}",
|
771 |
-
# create_embeddings=True,
|
772 |
-
# create_contextualized=create_contextualized,
|
773 |
-
# api_name=api_name
|
774 |
-
# )
|
775 |
-
#
|
776 |
-
# # Mark the media as processed in the database
|
777 |
-
# mark_media_as_processed(database, media_id)
|
778 |
-
#
|
779 |
-
# logger.info(f"Successfully processed media ID {media_id}")
|
780 |
-
# except Exception as e:
|
781 |
-
# logger.error(f"Error processing media ID {media_id}: {str(e)}")
|
782 |
-
#
|
783 |
-
# logger.info("Finished preprocessing all unprocessed content")
|
784 |
-
|
785 |
-
############################################################################################################
|
786 |
-
#
|
787 |
-
# ElasticSearch Retriever
|
788 |
-
|
789 |
-
# https://github.com/langchain-ai/langchain/tree/44e3e2391c48bfd0a8e6a20adde0b6567f4f43c3/templates/rag-elasticsearch
|
790 |
-
#
|
791 |
-
# https://github.com/langchain-ai/langchain/tree/44e3e2391c48bfd0a8e6a20adde0b6567f4f43c3/templates/rag-self-query
|
792 |
-
|
793 |
-
#
|
794 |
-
# End of RAG_Library_2.py
|
795 |
-
############################################################################################################
|
|
|
1 |
+
# RAG_Library_2.py
|
2 |
+
# Description: This script contains the main RAG pipeline function and related functions for the RAG pipeline.
|
3 |
+
#
|
4 |
+
# Import necessary modules and functions
|
5 |
+
import configparser
|
6 |
+
import logging
|
7 |
+
import os
|
8 |
+
import time
|
9 |
+
from typing import Dict, Any, List, Optional
|
10 |
+
|
11 |
+
from App_Function_Libraries.DB.Character_Chat_DB import get_character_chats, perform_full_text_search_chat, \
|
12 |
+
fetch_keywords_for_chats, search_character_chat, search_character_cards, fetch_character_ids_by_keywords
|
13 |
+
from App_Function_Libraries.DB.RAG_QA_Chat_DB import search_rag_chat, search_rag_notes
|
14 |
+
#
|
15 |
+
# Local Imports
|
16 |
+
from App_Function_Libraries.RAG.ChromaDB_Library import process_and_store_content, vector_search, chroma_client
|
17 |
+
from App_Function_Libraries.RAG.RAG_Persona_Chat import perform_vector_search_chat
|
18 |
+
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_custom_openai
|
19 |
+
from App_Function_Libraries.Web_Scraping.Article_Extractor_Lib import scrape_article
|
20 |
+
from App_Function_Libraries.DB.DB_Manager import fetch_keywords_for_media, search_media_db, get_notes_by_keywords, \
|
21 |
+
search_conversations_by_keywords
|
22 |
+
from App_Function_Libraries.Utils.Utils import load_comprehensive_config
|
23 |
+
from App_Function_Libraries.Metrics.metrics_logger import log_counter, log_histogram
|
24 |
+
#
|
25 |
+
# 3rd-Party Imports
|
26 |
+
import openai
|
27 |
+
from flashrank import Ranker, RerankRequest
|
28 |
+
#
|
29 |
+
########################################################################################################################
|
30 |
+
#
|
31 |
+
# Functions:
|
32 |
+
|
33 |
+
# Initialize OpenAI client (adjust this based on your API key management)
|
34 |
+
openai.api_key = "your-openai-api-key"
|
35 |
+
|
36 |
+
# Get the directory of the current script
|
37 |
+
current_dir = os.path.dirname(os.path.abspath(__file__))
|
38 |
+
# Construct the path to the config file
|
39 |
+
config_path = os.path.join(current_dir, 'Config_Files', 'config.txt')
|
40 |
+
# Read the config file
|
41 |
+
config = configparser.ConfigParser()
|
42 |
+
# Read the configuration file
|
43 |
+
config.read('config.txt')
|
44 |
+
|
45 |
+
|
46 |
+
search_functions = {
|
47 |
+
"Media DB": search_media_db,
|
48 |
+
"RAG Chat": search_rag_chat,
|
49 |
+
"RAG Notes": search_rag_notes,
|
50 |
+
"Character Chat": search_character_chat,
|
51 |
+
"Character Cards": search_character_cards
|
52 |
+
}
|
53 |
+
|
54 |
+
# RAG pipeline function for web scraping
|
55 |
+
# def rag_web_scraping_pipeline(url: str, query: str, api_choice=None) -> Dict[str, Any]:
|
56 |
+
# try:
|
57 |
+
# # Extract content
|
58 |
+
# try:
|
59 |
+
# article_data = scrape_article(url)
|
60 |
+
# content = article_data['content']
|
61 |
+
# title = article_data['title']
|
62 |
+
# except Exception as e:
|
63 |
+
# logging.error(f"Error scraping article: {str(e)}")
|
64 |
+
# return {"error": "Failed to scrape article", "details": str(e)}
|
65 |
+
#
|
66 |
+
# # Store the article in the database and get the media_id
|
67 |
+
# try:
|
68 |
+
# media_id = add_media_to_database(url, title, 'article', content)
|
69 |
+
# except Exception as e:
|
70 |
+
# logging.error(f"Error adding article to database: {str(e)}")
|
71 |
+
# return {"error": "Failed to store article in database", "details": str(e)}
|
72 |
+
#
|
73 |
+
# # Process and store content
|
74 |
+
# collection_name = f"article_{media_id}"
|
75 |
+
# try:
|
76 |
+
# # Assuming you have a database object available, let's call it 'db'
|
77 |
+
# db = get_database_connection()
|
78 |
+
#
|
79 |
+
# process_and_store_content(
|
80 |
+
# database=db,
|
81 |
+
# content=content,
|
82 |
+
# collection_name=collection_name,
|
83 |
+
# media_id=media_id,
|
84 |
+
# file_name=title,
|
85 |
+
# create_embeddings=True,
|
86 |
+
# create_contextualized=True,
|
87 |
+
# api_name=api_choice
|
88 |
+
# )
|
89 |
+
# except Exception as e:
|
90 |
+
# logging.error(f"Error processing and storing content: {str(e)}")
|
91 |
+
# return {"error": "Failed to process and store content", "details": str(e)}
|
92 |
+
#
|
93 |
+
# # Perform searches
|
94 |
+
# try:
|
95 |
+
# vector_results = vector_search(collection_name, query, k=5)
|
96 |
+
# fts_results = search_db(query, ["content"], "", page=1, results_per_page=5)
|
97 |
+
# except Exception as e:
|
98 |
+
# logging.error(f"Error performing searches: {str(e)}")
|
99 |
+
# return {"error": "Failed to perform searches", "details": str(e)}
|
100 |
+
#
|
101 |
+
# # Combine results with error handling for missing 'content' key
|
102 |
+
# all_results = []
|
103 |
+
# for result in vector_results + fts_results:
|
104 |
+
# if isinstance(result, dict) and 'content' in result:
|
105 |
+
# all_results.append(result['content'])
|
106 |
+
# else:
|
107 |
+
# logging.warning(f"Unexpected result format: {result}")
|
108 |
+
# all_results.append(str(result))
|
109 |
+
#
|
110 |
+
# context = "\n".join(all_results)
|
111 |
+
#
|
112 |
+
# # Generate answer using the selected API
|
113 |
+
# try:
|
114 |
+
# answer = generate_answer(api_choice, context, query)
|
115 |
+
# except Exception as e:
|
116 |
+
# logging.error(f"Error generating answer: {str(e)}")
|
117 |
+
# return {"error": "Failed to generate answer", "details": str(e)}
|
118 |
+
#
|
119 |
+
# return {
|
120 |
+
# "answer": answer,
|
121 |
+
# "context": context
|
122 |
+
# }
|
123 |
+
#
|
124 |
+
# except Exception as e:
|
125 |
+
# logging.error(f"Unexpected error in rag_pipeline: {str(e)}")
|
126 |
+
# return {"error": "An unexpected error occurred", "details": str(e)}
|
127 |
+
|
128 |
+
|
129 |
+
# RAG Search with keyword filtering
|
130 |
+
# FIXME - Update each called function to support modifiable top-k results
|
131 |
+
def enhanced_rag_pipeline(
|
132 |
+
query: str,
|
133 |
+
api_choice: str,
|
134 |
+
keywords: Optional[str] = None,
|
135 |
+
fts_top_k: int = 10,
|
136 |
+
apply_re_ranking: bool = True,
|
137 |
+
database_types: List[str] = ["Media DB"]
|
138 |
+
) -> Dict[str, Any]:
|
139 |
+
"""
|
140 |
+
Perform full text search across specified database type.
|
141 |
+
|
142 |
+
Args:
|
143 |
+
query: Search query string
|
144 |
+
api_choice: API to use for generating the response
|
145 |
+
keywords: Optional list of media IDs to filter results
|
146 |
+
fts_top_k: Maximum number of results to return
|
147 |
+
apply_re_ranking: Whether to apply re-ranking to results
|
148 |
+
database_types: Type of database to search
|
149 |
+
|
150 |
+
Returns:
|
151 |
+
Dictionary containing search results with content
|
152 |
+
"""
|
153 |
+
log_counter("enhanced_rag_pipeline_attempt", labels={"api_choice": api_choice})
|
154 |
+
start_time = time.time()
|
155 |
+
|
156 |
+
try:
|
157 |
+
# Load embedding provider from config, or fallback to 'openai'
|
158 |
+
embedding_provider = config.get('Embeddings', 'provider', fallback='openai')
|
159 |
+
logging.debug(f"Using embedding provider: {embedding_provider}")
|
160 |
+
|
161 |
+
# Initialize relevant IDs dictionary
|
162 |
+
relevant_ids: Dict[str, Optional[List[str]]] = {}
|
163 |
+
|
164 |
+
# Process keywords if provided
|
165 |
+
if keywords:
|
166 |
+
keyword_list = [k.strip().lower() for k in keywords.split(',')]
|
167 |
+
logging.debug(f"enhanced_rag_pipeline - Keywords: {keyword_list}")
|
168 |
+
|
169 |
+
try:
|
170 |
+
for db_type in database_types:
|
171 |
+
if db_type == "Media DB":
|
172 |
+
media_ids = fetch_relevant_media_ids(keyword_list)
|
173 |
+
relevant_ids[db_type] = [str(id_) for id_ in media_ids]
|
174 |
+
elif db_type == "RAG Chat":
|
175 |
+
conversations, _, _ = search_conversations_by_keywords(keywords=keyword_list)
|
176 |
+
relevant_ids[db_type] = [str(conv['conversation_id']) for conv in conversations]
|
177 |
+
elif db_type == "RAG Notes":
|
178 |
+
notes, _, _ = get_notes_by_keywords(keyword_list)
|
179 |
+
relevant_ids[db_type] = [str(note_id) for note_id, _, _, _ in notes]
|
180 |
+
elif db_type == "Character Chat":
|
181 |
+
relevant_ids[db_type] = [str(id_) for id_ in fetch_keywords_for_chats(keyword_list)]
|
182 |
+
elif db_type == "Character Cards":
|
183 |
+
relevant_ids[db_type] = [str(id_) for id_ in fetch_character_ids_by_keywords(keyword_list)]
|
184 |
+
else:
|
185 |
+
logging.error(f"Unsupported database type: {db_type}")
|
186 |
+
|
187 |
+
logging.debug(f"enhanced_rag_pipeline - {db_type} relevant IDs: {relevant_ids[db_type]}")
|
188 |
+
except Exception as e:
|
189 |
+
logging.error(f"Error fetching relevant IDs: {str(e)}")
|
190 |
+
relevant_ids = {db_type: None for db_type in database_types}
|
191 |
+
else:
|
192 |
+
relevant_ids = {db_type: None for db_type in database_types}
|
193 |
+
|
194 |
+
# Perform vector search
|
195 |
+
vector_results = []
|
196 |
+
for db_type in database_types:
|
197 |
+
try:
|
198 |
+
db_relevant_ids = relevant_ids.get(db_type)
|
199 |
+
results = perform_vector_search(query, db_relevant_ids, top_k=fts_top_k)
|
200 |
+
vector_results.extend(results)
|
201 |
+
logging.debug(f"\nenhanced_rag_pipeline - Vector search results for {db_type}: {results}")
|
202 |
+
except Exception as e:
|
203 |
+
logging.error(f"Error performing vector search on {db_type}: {str(e)}")
|
204 |
+
|
205 |
+
# Perform vector search
|
206 |
+
# FIXME
|
207 |
+
#vector_results = perform_vector_search(query, relevant_media_ids)
|
208 |
+
#ogging.debug(f"\n\nenhanced_rag_pipeline - Vector search results: {vector_results}")
|
209 |
+
|
210 |
+
# Perform full-text search
|
211 |
+
#v1
|
212 |
+
#fts_results = perform_full_text_search(query, database_type, relevant_media_ids, fts_top_k)
|
213 |
+
|
214 |
+
# v2
|
215 |
+
# Perform full-text search across specified databases
|
216 |
+
fts_results = []
|
217 |
+
for db_type in database_types:
|
218 |
+
try:
|
219 |
+
db_relevant_ids = relevant_ids.get(db_type)
|
220 |
+
db_results = perform_full_text_search(query, db_type, db_relevant_ids, fts_top_k)
|
221 |
+
fts_results.extend(db_results)
|
222 |
+
logging.debug(f"enhanced_rag_pipeline - FTS results for {db_type}: {db_results}")
|
223 |
+
except Exception as e:
|
224 |
+
logging.error(f"Error performing full-text search on {db_type}: {str(e)}")
|
225 |
+
|
226 |
+
#logging.debug("\n\nenhanced_rag_pipeline - Full-text search results:")
|
227 |
+
logging.debug(
|
228 |
+
"\n\nenhanced_rag_pipeline - Full-text search results:\n" + "\n".join(
|
229 |
+
[str(item) for item in fts_results]) + "\n"
|
230 |
+
)
|
231 |
+
|
232 |
+
# Combine results
|
233 |
+
all_results = vector_results + fts_results
|
234 |
+
|
235 |
+
# FIXME - specify model + add param to modify at call time
|
236 |
+
# You can specify a model if necessary, e.g., model_name="ms-marco-MiniLM-L-12-v2"
|
237 |
+
# Apply re-ranking if enabled and results exist
|
238 |
+
if apply_re_ranking and all_results:
|
239 |
+
logging.debug(f"\nenhanced_rag_pipeline - Applying Re-Ranking")
|
240 |
+
|
241 |
+
if all_results:
|
242 |
+
ranker = Ranker()
|
243 |
+
|
244 |
+
# Prepare passages for re-ranking
|
245 |
+
passages = [{"id": i, "text": result['content']} for i, result in enumerate(all_results)]
|
246 |
+
rerank_request = RerankRequest(query=query, passages=passages)
|
247 |
+
|
248 |
+
# Rerank the results
|
249 |
+
reranked_results = ranker.rerank(rerank_request)
|
250 |
+
|
251 |
+
# Sort results based on the re-ranking score
|
252 |
+
reranked_results = sorted(reranked_results, key=lambda x: x['score'], reverse=True)
|
253 |
+
|
254 |
+
# Log reranked results
|
255 |
+
logging.debug(f"\n\nenhanced_rag_pipeline - Reranked results: {reranked_results}")
|
256 |
+
|
257 |
+
# Update all_results based on reranking
|
258 |
+
all_results = [all_results[result['id']] for result in reranked_results]
|
259 |
+
|
260 |
+
# Extract content from results (top fts_top_k by default)
|
261 |
+
context = "\n".join([result['content'] for result in all_results[:fts_top_k]])
|
262 |
+
#logging.debug(f"Context length: {len(context)}")
|
263 |
+
logging.debug(f"Context: {context[:200]}")
|
264 |
+
|
265 |
+
# Generate answer using the selected API
|
266 |
+
answer = generate_answer(api_choice, context, query)
|
267 |
+
|
268 |
+
if not all_results:
|
269 |
+
logging.info(f"No results found. Query: {query}, Keywords: {keywords}")
|
270 |
+
return {
|
271 |
+
"answer": "No relevant information based on your query and keywords were found in the database. Your query has been directly passed to the LLM, and here is its answer: \n\n" + answer,
|
272 |
+
"context": "No relevant information based on your query and keywords were found in the database. The only context used was your query: \n\n" + query
|
273 |
+
}
|
274 |
+
|
275 |
+
# Log metrics
|
276 |
+
pipeline_duration = time.time() - start_time
|
277 |
+
log_histogram("enhanced_rag_pipeline_duration", pipeline_duration, labels={"api_choice": api_choice})
|
278 |
+
log_counter("enhanced_rag_pipeline_success", labels={"api_choice": api_choice})
|
279 |
+
|
280 |
+
return {
|
281 |
+
"answer": answer,
|
282 |
+
"context": context
|
283 |
+
}
|
284 |
+
|
285 |
+
except Exception as e:
|
286 |
+
log_counter("enhanced_rag_pipeline_error", labels={"api_choice": api_choice, "error": str(e)})
|
287 |
+
logging.error(f"Error in enhanced_rag_pipeline: {str(e)}")
|
288 |
+
logging.error(f"Error in enhanced_rag_pipeline: {str(e)}")
|
289 |
+
return {
|
290 |
+
"answer": "An error occurred while processing your request.",
|
291 |
+
"context": ""
|
292 |
+
}
|
293 |
+
|
294 |
+
|
295 |
+
|
296 |
+
# Need to write a test for this function FIXME
|
297 |
+
def generate_answer(api_choice: str, context: str, query: str) -> str:
|
298 |
+
# Metrics
|
299 |
+
log_counter("generate_answer_attempt", labels={"api_choice": api_choice})
|
300 |
+
start_time = time.time()
|
301 |
+
logging.debug("Entering generate_answer function")
|
302 |
+
config = load_comprehensive_config()
|
303 |
+
logging.debug(f"Config sections: {config.sections()}")
|
304 |
+
prompt = f"Context: {context}\n\nQuestion: {query}"
|
305 |
+
try:
|
306 |
+
if api_choice == "OpenAI":
|
307 |
+
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_openai
|
308 |
+
answer_generation_duration = time.time() - start_time
|
309 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
310 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
311 |
+
return summarize_with_openai(config['API']['openai_api_key'], prompt, "")
|
312 |
+
|
313 |
+
elif api_choice == "Anthropic":
|
314 |
+
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_anthropic
|
315 |
+
answer_generation_duration = time.time() - start_time
|
316 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
317 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
318 |
+
return summarize_with_anthropic(config['API']['anthropic_api_key'], prompt, "")
|
319 |
+
|
320 |
+
elif api_choice == "Cohere":
|
321 |
+
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_cohere
|
322 |
+
answer_generation_duration = time.time() - start_time
|
323 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
324 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
325 |
+
return summarize_with_cohere(config['API']['cohere_api_key'], prompt, "")
|
326 |
+
|
327 |
+
elif api_choice == "Groq":
|
328 |
+
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_groq
|
329 |
+
answer_generation_duration = time.time() - start_time
|
330 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
331 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
332 |
+
return summarize_with_groq(config['API']['groq_api_key'], prompt, "")
|
333 |
+
|
334 |
+
elif api_choice == "OpenRouter":
|
335 |
+
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_openrouter
|
336 |
+
answer_generation_duration = time.time() - start_time
|
337 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
338 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
339 |
+
return summarize_with_openrouter(config['API']['openrouter_api_key'], prompt, "")
|
340 |
+
|
341 |
+
elif api_choice == "HuggingFace":
|
342 |
+
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_huggingface
|
343 |
+
answer_generation_duration = time.time() - start_time
|
344 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
345 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
346 |
+
return summarize_with_huggingface(config['API']['huggingface_api_key'], prompt, "")
|
347 |
+
|
348 |
+
elif api_choice == "DeepSeek":
|
349 |
+
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_deepseek
|
350 |
+
answer_generation_duration = time.time() - start_time
|
351 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
352 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
353 |
+
return summarize_with_deepseek(config['API']['deepseek_api_key'], prompt, "")
|
354 |
+
|
355 |
+
elif api_choice == "Mistral":
|
356 |
+
from App_Function_Libraries.Summarization.Summarization_General_Lib import summarize_with_mistral
|
357 |
+
answer_generation_duration = time.time() - start_time
|
358 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
359 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
360 |
+
return summarize_with_mistral(config['API']['mistral_api_key'], prompt, "")
|
361 |
+
|
362 |
+
# Local LLM APIs
|
363 |
+
elif api_choice == "Local-LLM":
|
364 |
+
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_local_llm
|
365 |
+
answer_generation_duration = time.time() - start_time
|
366 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
367 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
368 |
+
# FIXME
|
369 |
+
return summarize_with_local_llm(config['Local-API']['local_llm_path'], prompt, "")
|
370 |
+
|
371 |
+
elif api_choice == "Llama.cpp":
|
372 |
+
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_llama
|
373 |
+
answer_generation_duration = time.time() - start_time
|
374 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
375 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
376 |
+
return summarize_with_llama(prompt, "", config['Local-API']['llama_api_key'], None, None)
|
377 |
+
elif api_choice == "Kobold":
|
378 |
+
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_kobold
|
379 |
+
answer_generation_duration = time.time() - start_time
|
380 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
381 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
382 |
+
return summarize_with_kobold(prompt, config['Local-API']['kobold_api_key'], "", system_message=None, temp=None)
|
383 |
+
|
384 |
+
elif api_choice == "Ooba":
|
385 |
+
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_oobabooga
|
386 |
+
answer_generation_duration = time.time() - start_time
|
387 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
388 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
389 |
+
return summarize_with_oobabooga(prompt, config['Local-API']['ooba_api_key'], custom_prompt="", system_message=None, temp=None)
|
390 |
+
|
391 |
+
elif api_choice == "TabbyAPI":
|
392 |
+
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_tabbyapi
|
393 |
+
answer_generation_duration = time.time() - start_time
|
394 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
395 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
396 |
+
return summarize_with_tabbyapi(prompt, None, None, None, None, )
|
397 |
+
|
398 |
+
elif api_choice == "vLLM":
|
399 |
+
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_vllm
|
400 |
+
answer_generation_duration = time.time() - start_time
|
401 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
402 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
403 |
+
return summarize_with_vllm(prompt, "", config['Local-API']['vllm_api_key'], None, None)
|
404 |
+
|
405 |
+
elif api_choice.lower() == "ollama":
|
406 |
+
from App_Function_Libraries.Summarization.Local_Summarization_Lib import summarize_with_ollama
|
407 |
+
answer_generation_duration = time.time() - start_time
|
408 |
+
log_histogram("generate_answer_duration", answer_generation_duration, labels={"api_choice": api_choice})
|
409 |
+
log_counter("generate_answer_success", labels={"api_choice": api_choice})
|
410 |
+
return summarize_with_ollama(prompt, "", config['Local-API']['ollama_api_IP'], config['Local-API']['ollama_api_key'], None, None, None)
|
411 |
+
|
412 |
+
elif api_choice.lower() == "custom_openai_api":
|
413 |
+
logging.debug(f"RAG Answer Gen: Trying with Custom_OpenAI API")
|
414 |
+
summary = summarize_with_custom_openai(prompt, "", config['API']['custom_openai_api_key'], None,
|
415 |
+
None)
|
416 |
+
else:
|
417 |
+
log_counter("generate_answer_error", labels={"api_choice": api_choice, "error": str()})
|
418 |
+
raise ValueError(f"Unsupported API choice: {api_choice}")
|
419 |
+
except Exception as e:
|
420 |
+
log_counter("generate_answer_error", labels={"api_choice": api_choice, "error": str(e)})
|
421 |
+
logging.error(f"Error in generate_answer: {str(e)}")
|
422 |
+
return "An error occurred while generating the answer."
|
423 |
+
|
424 |
+
|
425 |
+
def perform_vector_search(query: str, relevant_media_ids: List[str] = None, top_k=10) -> List[Dict[str, Any]]:
|
426 |
+
log_counter("perform_vector_search_attempt")
|
427 |
+
start_time = time.time()
|
428 |
+
all_collections = chroma_client.list_collections()
|
429 |
+
vector_results = []
|
430 |
+
try:
|
431 |
+
for collection in all_collections:
|
432 |
+
collection_results = vector_search(collection.name, query, k=top_k)
|
433 |
+
if not collection_results:
|
434 |
+
continue # Skip empty results
|
435 |
+
filtered_results = [
|
436 |
+
result for result in collection_results
|
437 |
+
if relevant_media_ids is None or result['metadata'].get('media_id') in relevant_media_ids
|
438 |
+
]
|
439 |
+
vector_results.extend(filtered_results)
|
440 |
+
search_duration = time.time() - start_time
|
441 |
+
log_histogram("perform_vector_search_duration", search_duration)
|
442 |
+
log_counter("perform_vector_search_success", labels={"result_count": len(vector_results)})
|
443 |
+
return vector_results
|
444 |
+
except Exception as e:
|
445 |
+
log_counter("perform_vector_search_error", labels={"error": str(e)})
|
446 |
+
logging.error(f"Error in perform_vector_search: {str(e)}")
|
447 |
+
raise
|
448 |
+
|
449 |
+
|
450 |
+
# V2
|
451 |
+
def perform_full_text_search(query: str, database_type: str, relevant_ids: List[str] = None, fts_top_k=None) -> List[Dict[str, Any]]:
|
452 |
+
"""
|
453 |
+
Perform full-text search on a specified database type.
|
454 |
+
|
455 |
+
Args:
|
456 |
+
query: Search query string
|
457 |
+
database_type: Type of database to search ("Media DB", "RAG Chat", "RAG Notes", "Character Chat", "Character Cards")
|
458 |
+
relevant_ids: Optional list of media IDs to filter results
|
459 |
+
fts_top_k: Maximum number of results to return
|
460 |
+
|
461 |
+
Returns:
|
462 |
+
List of search results with content and metadata
|
463 |
+
"""
|
464 |
+
log_counter("perform_full_text_search_attempt", labels={"database_type": database_type})
|
465 |
+
start_time = time.time()
|
466 |
+
|
467 |
+
try:
|
468 |
+
# Set default for fts_top_k
|
469 |
+
if fts_top_k is None:
|
470 |
+
fts_top_k = 10
|
471 |
+
|
472 |
+
# Call appropriate search function based on database type
|
473 |
+
if database_type not in search_functions:
|
474 |
+
raise ValueError(f"Unsupported database type: {database_type}")
|
475 |
+
|
476 |
+
# Call the appropriate search function
|
477 |
+
results = search_functions[database_type](query, fts_top_k, relevant_ids)
|
478 |
+
|
479 |
+
search_duration = time.time() - start_time
|
480 |
+
log_histogram("perform_full_text_search_duration", search_duration,
|
481 |
+
labels={"database_type": database_type})
|
482 |
+
log_counter("perform_full_text_search_success",
|
483 |
+
labels={"database_type": database_type, "result_count": len(results)})
|
484 |
+
|
485 |
+
return results
|
486 |
+
|
487 |
+
except Exception as e:
|
488 |
+
log_counter("perform_full_text_search_error",
|
489 |
+
labels={"database_type": database_type, "error": str(e)})
|
490 |
+
logging.error(f"Error in perform_full_text_search ({database_type}): {str(e)}")
|
491 |
+
raise
|
492 |
+
|
493 |
+
|
494 |
+
# v1
|
495 |
+
# def perform_full_text_search(query: str, relevant_media_ids: List[str] = None, fts_top_k=None) -> List[Dict[str, Any]]:
|
496 |
+
# log_counter("perform_full_text_search_attempt")
|
497 |
+
# start_time = time.time()
|
498 |
+
# try:
|
499 |
+
# fts_results = search_db(query, ["content"], "", page=1, results_per_page=fts_top_k or 10)
|
500 |
+
# filtered_fts_results = [
|
501 |
+
# {
|
502 |
+
# "content": result['content'],
|
503 |
+
# "metadata": {"media_id": result['id']}
|
504 |
+
# }
|
505 |
+
# for result in fts_results
|
506 |
+
# if relevant_media_ids is None or result['id'] in relevant_media_ids
|
507 |
+
# ]
|
508 |
+
# search_duration = time.time() - start_time
|
509 |
+
# log_histogram("perform_full_text_search_duration", search_duration)
|
510 |
+
# log_counter("perform_full_text_search_success", labels={"result_count": len(filtered_fts_results)})
|
511 |
+
# return filtered_fts_results
|
512 |
+
# except Exception as e:
|
513 |
+
# log_counter("perform_full_text_search_error", labels={"error": str(e)})
|
514 |
+
# logging.error(f"Error in perform_full_text_search: {str(e)}")
|
515 |
+
# raise
|
516 |
+
|
517 |
+
|
518 |
+
def fetch_relevant_media_ids(keywords: List[str], top_k=10) -> List[int]:
|
519 |
+
log_counter("fetch_relevant_media_ids_attempt", labels={"keyword_count": len(keywords)})
|
520 |
+
start_time = time.time()
|
521 |
+
relevant_ids = set()
|
522 |
+
for keyword in keywords:
|
523 |
+
try:
|
524 |
+
media_ids = fetch_keywords_for_media(keyword)
|
525 |
+
relevant_ids.update(media_ids)
|
526 |
+
except Exception as e:
|
527 |
+
log_counter("fetch_relevant_media_ids_error", labels={"error": str(e)})
|
528 |
+
logging.error(f"Error fetching relevant media IDs for keyword '{keyword}': {str(e)}")
|
529 |
+
# Continue processing other keywords
|
530 |
+
|
531 |
+
fetch_duration = time.time() - start_time
|
532 |
+
log_histogram("fetch_relevant_media_ids_duration", fetch_duration)
|
533 |
+
log_counter("fetch_relevant_media_ids_success", labels={"result_count": len(relevant_ids)})
|
534 |
+
return list(relevant_ids)
|
535 |
+
|
536 |
+
|
537 |
+
def filter_results_by_keywords(results: List[Dict[str, Any]], keywords: List[str]) -> List[Dict[str, Any]]:
|
538 |
+
log_counter("filter_results_by_keywords_attempt", labels={"result_count": len(results), "keyword_count": len(keywords)})
|
539 |
+
start_time = time.time()
|
540 |
+
if not keywords:
|
541 |
+
return results
|
542 |
+
|
543 |
+
filtered_results = []
|
544 |
+
for result in results:
|
545 |
+
try:
|
546 |
+
metadata = result.get('metadata', {})
|
547 |
+
if metadata is None:
|
548 |
+
logging.warning(f"No metadata found for result: {result}")
|
549 |
+
continue
|
550 |
+
if not isinstance(metadata, dict):
|
551 |
+
logging.warning(f"Unexpected metadata type: {type(metadata)}. Expected dict.")
|
552 |
+
continue
|
553 |
+
|
554 |
+
media_id = metadata.get('media_id')
|
555 |
+
if media_id is None:
|
556 |
+
logging.warning(f"No media_id found in metadata: {metadata}")
|
557 |
+
continue
|
558 |
+
|
559 |
+
media_keywords = fetch_keywords_for_media(media_id)
|
560 |
+
if any(keyword.lower() in [mk.lower() for mk in media_keywords] for keyword in keywords):
|
561 |
+
filtered_results.append(result)
|
562 |
+
except Exception as e:
|
563 |
+
logging.error(f"Error processing result: {result}. Error: {str(e)}")
|
564 |
+
|
565 |
+
filter_duration = time.time() - start_time
|
566 |
+
log_histogram("filter_results_by_keywords_duration", filter_duration)
|
567 |
+
log_counter("filter_results_by_keywords_success", labels={"filtered_count": len(filtered_results)})
|
568 |
+
return filtered_results
|
569 |
+
|
570 |
+
# FIXME: to be implememted
|
571 |
+
def extract_media_id_from_result(result: str) -> Optional[int]:
|
572 |
+
# Implement this function based on how you store the media_id in your results
|
573 |
+
# For example, if it's stored at the beginning of each result:
|
574 |
+
try:
|
575 |
+
return int(result.split('_')[0])
|
576 |
+
except (IndexError, ValueError):
|
577 |
+
logging.error(f"Failed to extract media_id from result: {result}")
|
578 |
+
return None
|
579 |
+
|
580 |
+
#
|
581 |
+
#
|
582 |
+
########################################################################################################################
|
583 |
+
|
584 |
+
|
585 |
+
############################################################################################################
|
586 |
+
#
|
587 |
+
# Chat RAG
|
588 |
+
|
589 |
+
def enhanced_rag_pipeline_chat(query: str, api_choice: str, character_id: int, keywords: Optional[str] = None) -> Dict[str, Any]:
|
590 |
+
"""
|
591 |
+
Enhanced RAG pipeline tailored for the Character Chat tab.
|
592 |
+
|
593 |
+
Args:
|
594 |
+
query (str): The user's input query.
|
595 |
+
api_choice (str): The API to use for generating the response.
|
596 |
+
character_id (int): The ID of the character being interacted with.
|
597 |
+
keywords (Optional[str]): Comma-separated keywords to filter search results.
|
598 |
+
|
599 |
+
Returns:
|
600 |
+
Dict[str, Any]: Contains the generated answer and the context used.
|
601 |
+
"""
|
602 |
+
log_counter("enhanced_rag_pipeline_chat_attempt", labels={"api_choice": api_choice, "character_id": character_id})
|
603 |
+
start_time = time.time()
|
604 |
+
try:
|
605 |
+
# Load embedding provider from config, or fallback to 'openai'
|
606 |
+
embedding_provider = config.get('Embeddings', 'provider', fallback='openai')
|
607 |
+
logging.debug(f"Using embedding provider: {embedding_provider}")
|
608 |
+
|
609 |
+
# Process keywords if provided
|
610 |
+
keyword_list = [k.strip().lower() for k in keywords.split(',')] if keywords else []
|
611 |
+
logging.debug(f"enhanced_rag_pipeline_chat - Keywords: {keyword_list}")
|
612 |
+
|
613 |
+
# Fetch relevant chat IDs based on character_id and keywords
|
614 |
+
if keyword_list:
|
615 |
+
relevant_chat_ids = fetch_keywords_for_chats(keyword_list)
|
616 |
+
else:
|
617 |
+
relevant_chat_ids = fetch_all_chat_ids(character_id)
|
618 |
+
logging.debug(f"enhanced_rag_pipeline_chat - Relevant chat IDs: {relevant_chat_ids}")
|
619 |
+
|
620 |
+
if not relevant_chat_ids:
|
621 |
+
logging.info(f"No chats found for the given keywords and character ID: {character_id}")
|
622 |
+
# Fallback to generating answer without context
|
623 |
+
answer = generate_answer(api_choice, "", query)
|
624 |
+
# Metrics
|
625 |
+
pipeline_duration = time.time() - start_time
|
626 |
+
log_histogram("enhanced_rag_pipeline_chat_duration", pipeline_duration, labels={"api_choice": api_choice})
|
627 |
+
log_counter("enhanced_rag_pipeline_chat_success",
|
628 |
+
labels={"api_choice": api_choice, "character_id": character_id})
|
629 |
+
return {
|
630 |
+
"answer": answer,
|
631 |
+
"context": ""
|
632 |
+
}
|
633 |
+
|
634 |
+
# Perform vector search within the relevant chats
|
635 |
+
vector_results = perform_vector_search_chat(query, relevant_chat_ids)
|
636 |
+
logging.debug(f"enhanced_rag_pipeline_chat - Vector search results: {vector_results}")
|
637 |
+
|
638 |
+
# Perform full-text search within the relevant chats
|
639 |
+
# FIXME - Update for DB Selection
|
640 |
+
fts_results = perform_full_text_search_chat(query, relevant_chat_ids)
|
641 |
+
logging.debug("enhanced_rag_pipeline_chat - Full-text search results:")
|
642 |
+
logging.debug("\n".join([str(item) for item in fts_results]))
|
643 |
+
|
644 |
+
# Combine results
|
645 |
+
all_results = vector_results + fts_results
|
646 |
+
|
647 |
+
apply_re_ranking = True
|
648 |
+
if apply_re_ranking:
|
649 |
+
logging.debug("enhanced_rag_pipeline_chat - Applying Re-Ranking")
|
650 |
+
ranker = Ranker()
|
651 |
+
|
652 |
+
# Prepare passages for re-ranking
|
653 |
+
passages = [{"id": i, "text": result['content']} for i, result in enumerate(all_results)]
|
654 |
+
rerank_request = RerankRequest(query=query, passages=passages)
|
655 |
+
|
656 |
+
# Rerank the results
|
657 |
+
reranked_results = ranker.rerank(rerank_request)
|
658 |
+
|
659 |
+
# Sort results based on the re-ranking score
|
660 |
+
reranked_results = sorted(reranked_results, key=lambda x: x['score'], reverse=True)
|
661 |
+
|
662 |
+
# Log reranked results
|
663 |
+
logging.debug(f"enhanced_rag_pipeline_chat - Reranked results: {reranked_results}")
|
664 |
+
|
665 |
+
# Update all_results based on reranking
|
666 |
+
all_results = [all_results[result['id']] for result in reranked_results]
|
667 |
+
|
668 |
+
# Extract context from top results (limit to top 10)
|
669 |
+
context = "\n".join([result['content'] for result in all_results[:10]])
|
670 |
+
logging.debug(f"Context length: {len(context)}")
|
671 |
+
logging.debug(f"Context: {context[:200]}") # Log only the first 200 characters for brevity
|
672 |
+
|
673 |
+
# Generate answer using the selected API
|
674 |
+
answer = generate_answer(api_choice, context, query)
|
675 |
+
|
676 |
+
if not all_results:
|
677 |
+
logging.info(f"No results found. Query: {query}, Keywords: {keywords}")
|
678 |
+
return {
|
679 |
+
"answer": "No relevant information based on your query and keywords were found in the database. Your query has been directly passed to the LLM, and here is its answer: \n\n" + answer,
|
680 |
+
"context": "No relevant information based on your query and keywords were found in the database. The only context used was your query: \n\n" + query
|
681 |
+
}
|
682 |
+
|
683 |
+
return {
|
684 |
+
"answer": answer,
|
685 |
+
"context": context
|
686 |
+
}
|
687 |
+
|
688 |
+
except Exception as e:
|
689 |
+
log_counter("enhanced_rag_pipeline_chat_error", labels={"api_choice": api_choice, "character_id": character_id, "error": str(e)})
|
690 |
+
logging.error(f"Error in enhanced_rag_pipeline_chat: {str(e)}")
|
691 |
+
return {
|
692 |
+
"answer": "An error occurred while processing your request.",
|
693 |
+
"context": ""
|
694 |
+
}
|
695 |
+
|
696 |
+
|
697 |
+
def fetch_relevant_chat_ids(character_id: int, keywords: List[str]) -> List[int]:
|
698 |
+
"""
|
699 |
+
Fetch chat IDs associated with a character and filtered by keywords.
|
700 |
+
|
701 |
+
Args:
|
702 |
+
character_id (int): The ID of the character.
|
703 |
+
keywords (List[str]): List of keywords to filter chats.
|
704 |
+
|
705 |
+
Returns:
|
706 |
+
List[int]: List of relevant chat IDs.
|
707 |
+
"""
|
708 |
+
log_counter("fetch_relevant_chat_ids_attempt", labels={"character_id": character_id, "keyword_count": len(keywords)})
|
709 |
+
start_time = time.time()
|
710 |
+
relevant_ids = set()
|
711 |
+
try:
|
712 |
+
media_ids = fetch_keywords_for_chats(keywords)
|
713 |
+
fetch_duration = time.time() - start_time
|
714 |
+
log_histogram("fetch_relevant_chat_ids_duration", fetch_duration)
|
715 |
+
log_counter("fetch_relevant_chat_ids_success",
|
716 |
+
labels={"character_id": character_id, "result_count": len(relevant_ids)})
|
717 |
+
relevant_ids.update(media_ids)
|
718 |
+
return list(relevant_ids)
|
719 |
+
except Exception as e:
|
720 |
+
log_counter("fetch_relevant_chat_ids_error", labels={"character_id": character_id, "error": str(e)})
|
721 |
+
logging.error(f"Error fetching relevant chat IDs: {str(e)}")
|
722 |
+
return []
|
723 |
+
|
724 |
+
|
725 |
+
def fetch_all_chat_ids(character_id: int) -> List[int]:
|
726 |
+
"""
|
727 |
+
Fetch all chat IDs associated with a specific character.
|
728 |
+
|
729 |
+
Args:
|
730 |
+
character_id (int): The ID of the character.
|
731 |
+
|
732 |
+
Returns:
|
733 |
+
List[int]: List of all chat IDs for the character.
|
734 |
+
"""
|
735 |
+
log_counter("fetch_all_chat_ids_attempt", labels={"character_id": character_id})
|
736 |
+
start_time = time.time()
|
737 |
+
try:
|
738 |
+
chats = get_character_chats(character_id=character_id)
|
739 |
+
chat_ids = [chat['id'] for chat in chats]
|
740 |
+
fetch_duration = time.time() - start_time
|
741 |
+
log_histogram("fetch_all_chat_ids_duration", fetch_duration)
|
742 |
+
log_counter("fetch_all_chat_ids_success", labels={"character_id": character_id, "chat_count": len(chat_ids)})
|
743 |
+
return chat_ids
|
744 |
+
except Exception as e:
|
745 |
+
log_counter("fetch_all_chat_ids_error", labels={"character_id": character_id, "error": str(e)})
|
746 |
+
logging.error(f"Error fetching all chat IDs for character {character_id}: {str(e)}")
|
747 |
+
return []
|
748 |
+
|
749 |
+
#
|
750 |
+
# End of Chat RAG
|
751 |
+
############################################################################################################
|
752 |
+
|
753 |
+
# Function to preprocess and store all existing content in the database
|
754 |
+
# def preprocess_all_content(database, create_contextualized=True, api_name="gpt-3.5-turbo"):
|
755 |
+
# unprocessed_media = get_unprocessed_media()
|
756 |
+
# total_media = len(unprocessed_media)
|
757 |
+
#
|
758 |
+
# for index, row in enumerate(unprocessed_media, 1):
|
759 |
+
# media_id, content, media_type, file_name = row
|
760 |
+
# collection_name = f"{media_type}_{media_id}"
|
761 |
+
#
|
762 |
+
# logger.info(f"Processing media {index} of {total_media}: ID {media_id}, Type {media_type}")
|
763 |
+
#
|
764 |
+
# try:
|
765 |
+
# process_and_store_content(
|
766 |
+
# database=database,
|
767 |
+
# content=content,
|
768 |
+
# collection_name=collection_name,
|
769 |
+
# media_id=media_id,
|
770 |
+
# file_name=file_name or f"{media_type}_{media_id}",
|
771 |
+
# create_embeddings=True,
|
772 |
+
# create_contextualized=create_contextualized,
|
773 |
+
# api_name=api_name
|
774 |
+
# )
|
775 |
+
#
|
776 |
+
# # Mark the media as processed in the database
|
777 |
+
# mark_media_as_processed(database, media_id)
|
778 |
+
#
|
779 |
+
# logger.info(f"Successfully processed media ID {media_id}")
|
780 |
+
# except Exception as e:
|
781 |
+
# logger.error(f"Error processing media ID {media_id}: {str(e)}")
|
782 |
+
#
|
783 |
+
# logger.info("Finished preprocessing all unprocessed content")
|
784 |
+
|
785 |
+
############################################################################################################
|
786 |
+
#
|
787 |
+
# ElasticSearch Retriever
|
788 |
+
|
789 |
+
# https://github.com/langchain-ai/langchain/tree/44e3e2391c48bfd0a8e6a20adde0b6567f4f43c3/templates/rag-elasticsearch
|
790 |
+
#
|
791 |
+
# https://github.com/langchain-ai/langchain/tree/44e3e2391c48bfd0a8e6a20adde0b6567f4f43c3/templates/rag-self-query
|
792 |
+
|
793 |
+
#
|
794 |
+
# End of RAG_Library_2.py
|
795 |
+
############################################################################################################
|