oceansweep commited on
Commit
f71d2e6
1 Parent(s): 59c0160

Upload 13 files

Browse files
App_Function_Libraries/Gradio_UI/Book_Ingestion_tab.py CHANGED
@@ -8,76 +8,17 @@
8
  #
9
  ####################
10
  # Imports
11
- import tempfile
12
- import os
13
- import zipfile
14
  #
15
  # External Imports
16
  import gradio as gr
17
  #
18
  # Local Imports
19
- from App_Function_Libraries.Gradio_UI.Import_Functionality import import_data
20
- from App_Function_Libraries.Books.Book_Ingestion_Lib import epub_to_markdown
21
  #
22
  ########################################################################################################################
23
  #
24
  # Functions:
25
 
26
- def import_epub(epub_file, title, author, keywords, system_prompt, user_prompt, auto_summarize, api_name, api_key):
27
- try:
28
- # Create a temporary directory to store the converted file
29
- with tempfile.TemporaryDirectory() as temp_dir:
30
- # Handle different types of file objects
31
- if isinstance(epub_file, (str, os.PathLike)):
32
- epub_path = epub_file
33
- elif hasattr(epub_file, 'name'):
34
- epub_path = epub_file.name
35
- elif hasattr(epub_file, 'path'):
36
- epub_path = epub_file.path
37
- else:
38
- raise ValueError("Unsupported file object type")
39
-
40
- md_path = os.path.join(temp_dir, "converted.md")
41
-
42
- # Convert EPUB to Markdown
43
- markdown_content = epub_to_markdown(epub_path)
44
-
45
- # Write the markdown content to a file
46
- with open(md_path, "w", encoding="utf-8") as md_file:
47
- md_file.write(markdown_content)
48
-
49
- # Read the converted markdown content
50
- with open(md_path, "r", encoding="utf-8") as md_file:
51
- content = md_file.read()
52
-
53
- # Now process the content as you would with a text file
54
- return import_data(content, title, author, keywords, system_prompt,
55
- user_prompt, auto_summarize, api_name, api_key)
56
- except Exception as e:
57
- return f"Error processing EPUB: {str(e)}"
58
-
59
-
60
- def process_zip_file(zip_file, title, author, keywords, system_prompt, user_prompt, auto_summarize, api_name, api_key):
61
- results = []
62
- with tempfile.TemporaryDirectory() as temp_dir:
63
- if hasattr(zip_file, 'name'):
64
- zip_path = zip_file.name
65
- elif hasattr(zip_file, 'path'):
66
- zip_path = zip_file.path
67
- else:
68
- raise ValueError("Unsupported zip file object type")
69
-
70
- with zipfile.ZipFile(zip_path, 'r') as zip_ref:
71
- zip_ref.extractall(temp_dir)
72
-
73
- for filename in os.listdir(temp_dir):
74
- if filename.lower().endswith('.epub'):
75
- file_path = os.path.join(temp_dir, filename)
76
- result = import_epub(file_path, title, author, keywords, system_prompt,
77
- user_prompt, auto_summarize, api_name, api_key)
78
- results.append(f"File: {filename} - {result}")
79
-
80
- return "\n".join(results)
81
 
82
 
83
  def create_import_book_tab():
@@ -87,7 +28,7 @@ def create_import_book_tab():
87
  gr.Markdown("# Import .epub files")
88
  gr.Markdown("Upload a single .epub file or a .zip file containing multiple .epub files")
89
  gr.Markdown(
90
- "How to remove DRM from your ebooks: https://www.reddit.com/r/Calibre/comments/1ck4w8e/2024_guide_on_removing_drm_from_kobo_kindle_ebooks/")
91
  import_file = gr.File(label="Upload file for import", file_types=[".epub", ".zip"])
92
  title_input = gr.Textbox(label="Title", placeholder="Enter the title of the content (for single files)")
93
  author_input = gr.Textbox(label="Author", placeholder="Enter the author's name (for single files)")
@@ -121,23 +62,34 @@ def create_import_book_tab():
121
  label="API for Auto-summarization"
122
  )
123
  api_key_input = gr.Textbox(label="API Key", type="password")
 
 
 
 
 
 
 
 
124
  import_button = gr.Button("Import eBook(s)")
125
  with gr.Column():
126
  with gr.Row():
127
- import_output = gr.Textbox(label="Import Status")
128
-
129
- def import_file_handler(file, title, author, keywords, system_prompt, user_prompt, auto_summarize, api_name, api_key):
130
- if file.name.lower().endswith('.epub'):
131
- return import_epub(file, title, author, keywords, system_prompt, user_prompt, auto_summarize, api_name, api_key)
132
- elif file.name.lower().endswith('.zip'):
133
- return process_zip_file(file, title, author, keywords, system_prompt, user_prompt, auto_summarize, api_name, api_key)
134
- else:
135
- return "Unsupported file type. Please upload an .epub file or a .zip file containing .epub files."
136
 
137
  import_button.click(
138
  fn=import_file_handler,
139
- inputs=[import_file, title_input, author_input, keywords_input, system_prompt_input,
140
- custom_prompt_input, auto_summarize_checkbox, api_name_input, api_key_input],
 
 
 
 
 
 
 
 
 
 
 
141
  outputs=import_output
142
  )
143
 
 
8
  #
9
  ####################
10
  # Imports
 
 
 
11
  #
12
  # External Imports
13
  import gradio as gr
14
  #
15
  # Local Imports
16
+ from App_Function_Libraries.Books.Book_Ingestion_Lib import process_zip_file, import_epub, import_file_handler
 
17
  #
18
  ########################################################################################################################
19
  #
20
  # Functions:
21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
 
24
  def create_import_book_tab():
 
28
  gr.Markdown("# Import .epub files")
29
  gr.Markdown("Upload a single .epub file or a .zip file containing multiple .epub files")
30
  gr.Markdown(
31
+ "🔗 **How to remove DRM from your ebooks:** [Reddit Guide](https://www.reddit.com/r/Calibre/comments/1ck4w8e/2024_guide_on_removing_drm_from_kobo_kindle_ebooks/)")
32
  import_file = gr.File(label="Upload file for import", file_types=[".epub", ".zip"])
33
  title_input = gr.Textbox(label="Title", placeholder="Enter the title of the content (for single files)")
34
  author_input = gr.Textbox(label="Author", placeholder="Enter the author's name (for single files)")
 
62
  label="API for Auto-summarization"
63
  )
64
  api_key_input = gr.Textbox(label="API Key", type="password")
65
+
66
+ # Chunking options
67
+ max_chunk_size = gr.Slider(minimum=100, maximum=2000, value=500, step=50, label="Max Chunk Size")
68
+ chunk_overlap = gr.Slider(minimum=0, maximum=500, value=200, step=10, label="Chunk Overlap")
69
+ custom_chapter_pattern = gr.Textbox(label="Custom Chapter Pattern (optional)",
70
+ placeholder="Enter a custom regex pattern for chapter detection")
71
+
72
+
73
  import_button = gr.Button("Import eBook(s)")
74
  with gr.Column():
75
  with gr.Row():
76
+ import_output = gr.Textbox(label="Import Status", lines=10, interactive=False)
 
 
 
 
 
 
 
 
77
 
78
  import_button.click(
79
  fn=import_file_handler,
80
+ inputs=[
81
+ import_file,
82
+ title_input,
83
+ author_input,
84
+ keywords_input,
85
+ custom_prompt_input,
86
+ auto_summarize_checkbox,
87
+ api_name_input,
88
+ api_key_input,
89
+ max_chunk_size,
90
+ chunk_overlap,
91
+ custom_chapter_pattern
92
+ ],
93
  outputs=import_output
94
  )
95
 
App_Function_Libraries/Gradio_UI/Character_Chat_tab.py CHANGED
@@ -1,8 +1,10 @@
1
- # Character_Interaction_Library_3.py
2
  # Description: Library for character card import functions
3
  #
4
  # Imports
 
5
  import re
 
6
  import uuid
7
  from datetime import datetime
8
  import json
@@ -13,9 +15,13 @@ from typing import Dict, Any, Optional, List, Tuple, Union, cast
13
  #
14
  # External Imports
15
  from PIL import Image
 
16
  import gradio as gr
17
  #
18
  # Local Imports
 
 
 
19
  from App_Function_Libraries.Chat import chat
20
  from App_Function_Libraries.DB.Character_Chat_DB import (
21
  add_character_card,
@@ -35,62 +41,9 @@ from App_Function_Libraries.Utils.Utils import sanitize_user_input
35
  #
36
  # Functions:
37
 
38
-
39
  #################################################################################
40
  #
41
- # Placeholder functions:
42
-
43
- def replace_placeholders(text: str, char_name: str, user_name: str) -> str:
44
- """
45
- Replace placeholders in the given text with appropriate values.
46
-
47
- Args:
48
- text (str): The text containing placeholders.
49
- char_name (str): The name of the character.
50
- user_name (str): The name of the user.
51
-
52
- Returns:
53
- str: The text with placeholders replaced.
54
- """
55
- replacements = {
56
- '{{char}}': char_name,
57
- '{{user}}': user_name,
58
- '{{random_user}}': user_name # Assuming random_user is the same as user for simplicity
59
- }
60
-
61
- for placeholder, value in replacements.items():
62
- text = text.replace(placeholder, value)
63
-
64
- return text
65
-
66
- def replace_user_placeholder(history, user_name):
67
- """
68
- Replaces all instances of '{{user}}' in the chat history with the actual user name.
69
-
70
- Args:
71
- history (list): The current chat history as a list of tuples (user_message, bot_message).
72
- user_name (str): The name entered by the user.
73
-
74
- Returns:
75
- list: Updated chat history with placeholders replaced.
76
- """
77
- if not user_name:
78
- user_name = "User" # Default name if none provided
79
-
80
- updated_history = []
81
- for user_msg, bot_msg in history:
82
- # Replace in user message
83
- if user_msg:
84
- user_msg = user_msg.replace("{{user}}", user_name)
85
- # Replace in bot message
86
- if bot_msg:
87
- bot_msg = bot_msg.replace("{{user}}", user_name)
88
- updated_history.append((user_msg, bot_msg))
89
- return updated_history
90
-
91
- #
92
- # End of Placeholder functions
93
- #################################################################################
94
 
95
  def import_character_card(file):
96
  if file is None:
@@ -100,7 +53,7 @@ def import_character_card(file):
100
  if file.name.lower().endswith(('.png', '.webp')):
101
  json_data = extract_json_from_image(file)
102
  if not json_data:
103
- return None, gr.update(), "No JSON data found in the image."
104
  elif file.name.lower().endswith('.json'):
105
  with open(file.name, 'r', encoding='utf-8') as f:
106
  json_data = f.read()
@@ -109,7 +62,7 @@ def import_character_card(file):
109
 
110
  card_data = import_character_card_json(json_data)
111
  if not card_data:
112
- return None, gr.update(), "Failed to parse character card JSON."
113
 
114
  # Save image data for PNG/WebP files
115
  if file.name.lower().endswith(('.png', '.webp')):
@@ -131,6 +84,7 @@ def import_character_card(file):
131
  logging.error(f"Error importing character card: {e}")
132
  return None, gr.update(), f"Error importing character card: {e}"
133
 
 
134
  def import_character_card_json(json_content: str) -> Optional[Dict[str, Any]]:
135
  try:
136
  json_content = json_content.strip()
@@ -148,68 +102,7 @@ def import_character_card_json(json_content: str) -> Optional[Dict[str, Any]]:
148
  logging.error(f"Unexpected error parsing JSON: {e}")
149
  return None
150
 
151
- def extract_json_from_image(image_file):
152
- logging.debug(f"Attempting to extract JSON from image: {image_file.name}")
153
- try:
154
- with Image.open(image_file) as img:
155
- logging.debug("Image opened successfully")
156
- metadata = img.info
157
- if 'chara' in metadata:
158
- logging.debug("Found 'chara' in image metadata")
159
- chara_content = metadata['chara']
160
- logging.debug(f"Content of 'chara' metadata (first 100 chars): {chara_content[:100]}...")
161
- try:
162
- decoded_content = base64.b64decode(chara_content).decode('utf-8')
163
- logging.debug(f"Decoded content (first 100 chars): {decoded_content[:100]}...")
164
- return decoded_content
165
- except Exception as e:
166
- logging.error(f"Error decoding base64 content: {e}")
167
-
168
- logging.warning("'chara' not found in metadata, attempting to find JSON data in image bytes")
169
- # Alternative method to extract embedded JSON from image bytes if metadata is not available
170
- img_byte_arr = io.BytesIO()
171
- img.save(img_byte_arr, format='PNG')
172
- img_bytes = img_byte_arr.getvalue()
173
- img_str = img_bytes.decode('latin1') # Use 'latin1' to preserve byte values
174
-
175
- # Search for JSON-like structures in the image bytes
176
- json_start = img_str.find('{')
177
- json_end = img_str.rfind('}')
178
- if json_start != -1 and json_end != -1 and json_end > json_start:
179
- possible_json = img_str[json_start:json_end+1]
180
- try:
181
- json.loads(possible_json)
182
- logging.debug("Found JSON data in image bytes")
183
- return possible_json
184
- except json.JSONDecodeError:
185
- logging.debug("No valid JSON found in image bytes")
186
 
187
- logging.warning("No JSON data found in the image")
188
- except Exception as e:
189
- logging.error(f"Error extracting JSON from image: {e}")
190
- return None
191
-
192
-
193
- def process_chat_history(chat_history: List[Tuple[str, str]], char_name: str, user_name: str) -> List[Tuple[str, str]]:
194
- """
195
- Process the chat history to replace placeholders in both user and character messages.
196
-
197
- Args:
198
- chat_history (List[Tuple[str, str]]): The chat history.
199
- char_name (str): The name of the character.
200
- user_name (str): The name of the user.
201
-
202
- Returns:
203
- List[Tuple[str, str]]: The processed chat history.
204
- """
205
- processed_history = []
206
- for user_msg, char_msg in chat_history:
207
- if user_msg:
208
- user_msg = replace_placeholders(user_msg, char_name, user_name)
209
- if char_msg:
210
- char_msg = replace_placeholders(char_msg, char_name, user_name)
211
- processed_history.append((user_msg, char_msg))
212
- return processed_history
213
 
214
  def parse_v2_card(card_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
215
  try:
@@ -289,166 +182,10 @@ def parse_v1_card(card_data: Dict[str, Any]) -> Dict[str, Any]:
289
 
290
  return v2_data
291
 
292
- def extract_character_id(choice: str) -> int:
293
- """Extract the character ID from the dropdown selection string."""
294
- return int(choice.split('(ID: ')[1].rstrip(')'))
295
-
296
- def load_character_wrapper(character_id: int, user_name: str) -> Tuple[Dict[str, Any], List[Tuple[Optional[str], str]], Optional[Image.Image]]:
297
- """Wrapper function to load character and image using the extracted ID."""
298
- char_data, chat_history, img = load_character_and_image(character_id, user_name)
299
- return char_data, chat_history, img
300
-
301
- def parse_character_book(book_data: Dict[str, Any]) -> Dict[str, Any]:
302
- """
303
- Parse the character book data from a V2 character card.
304
-
305
- Args:
306
- book_data (Dict[str, Any]): The raw character book data from the character card.
307
-
308
- Returns:
309
- Dict[str, Any]: The parsed and structured character book data.
310
- """
311
- parsed_book = {
312
- 'name': book_data.get('name', ''),
313
- 'description': book_data.get('description', ''),
314
- 'scan_depth': book_data.get('scan_depth'),
315
- 'token_budget': book_data.get('token_budget'),
316
- 'recursive_scanning': book_data.get('recursive_scanning', False),
317
- 'extensions': book_data.get('extensions', {}),
318
- 'entries': []
319
- }
320
-
321
- for entry in book_data.get('entries', []):
322
- parsed_entry = {
323
- 'keys': entry['keys'],
324
- 'content': entry['content'],
325
- 'extensions': entry.get('extensions', {}),
326
- 'enabled': entry['enabled'],
327
- 'insertion_order': entry['insertion_order'],
328
- 'case_sensitive': entry.get('case_sensitive', False),
329
- 'name': entry.get('name', ''),
330
- 'priority': entry.get('priority'),
331
- 'id': entry.get('id'),
332
- 'comment': entry.get('comment', ''),
333
- 'selective': entry.get('selective', False),
334
- 'secondary_keys': entry.get('secondary_keys', []),
335
- 'constant': entry.get('constant', False),
336
- 'position': entry.get('position')
337
- }
338
- parsed_book['entries'].append(parsed_entry)
339
-
340
- return parsed_book
341
-
342
- def load_character_and_image(character_id: int, user_name: str) -> Tuple[Optional[Dict[str, Any]], List[Tuple[Optional[str], str]], Optional[Image.Image]]:
343
- """
344
- Load a character and its associated image based on the character ID.
345
-
346
- Args:
347
- character_id (int): The ID of the character to load.
348
- user_name (str): The name of the user, used for placeholder replacement.
349
-
350
- Returns:
351
- Tuple[Optional[Dict[str, Any]], List[Tuple[Optional[str], str]], Optional[Image.Image]]:
352
- A tuple containing the character data, chat history, and character image (if available).
353
- """
354
- try:
355
- char_data = get_character_card_by_id(character_id)
356
- if not char_data:
357
- logging.warning(f"No character data found for ID: {character_id}")
358
- return None, [], None
359
-
360
- # Replace placeholders in character data
361
- for field in ['first_mes', 'mes_example', 'scenario', 'description', 'personality']:
362
- if field in char_data:
363
- char_data[field] = replace_placeholders(char_data[field], char_data['name'], user_name)
364
-
365
- # Replace placeholders in first_mes
366
- first_mes = char_data.get('first_mes', "Hello! I'm ready to chat.")
367
- first_mes = replace_placeholders(first_mes, char_data['name'], user_name)
368
-
369
- chat_history = [(None, first_mes)] if first_mes else []
370
-
371
- img = None
372
- if char_data.get('image'):
373
- try:
374
- image_data = base64.b64decode(char_data['image'])
375
- img = Image.open(io.BytesIO(image_data)).convert("RGBA")
376
- except Exception as e:
377
- logging.error(f"Error processing image for character '{char_data['name']}': {e}")
378
-
379
- return char_data, chat_history, img
380
-
381
- except Exception as e:
382
- logging.error(f"Error in load_character_and_image: {e}")
383
- return None, [], None
384
-
385
- def load_chat_and_character(chat_id: int, user_name: str) -> Tuple[Optional[Dict[str, Any]], List[Tuple[str, str]], Optional[Image.Image]]:
386
- """
387
- Load a chat and its associated character, including the character image and process templates.
388
-
389
- Args:
390
- chat_id (int): The ID of the chat to load.
391
- user_name (str): The name of the user.
392
-
393
- Returns:
394
- Tuple[Optional[Dict[str, Any]], List[Tuple[str, str]], Optional[Image.Image]]:
395
- A tuple containing the character data, processed chat history, and character image (if available).
396
- """
397
- try:
398
- # Load the chat
399
- chat = get_character_chat_by_id(chat_id)
400
- if not chat:
401
- logging.warning(f"No chat found with ID: {chat_id}")
402
- return None, [], None
403
-
404
- # Load the associated character
405
- character_id = chat['character_id']
406
- char_data = get_character_card_by_id(character_id)
407
- if not char_data:
408
- logging.warning(f"No character found for chat ID: {chat_id}")
409
- return None, chat['chat_history'], None
410
-
411
- # Process the chat history
412
- processed_history = process_chat_history(chat['chat_history'], char_data['name'], user_name)
413
-
414
- # Load the character image
415
- img = None
416
- if char_data.get('image'):
417
- try:
418
- image_data = base64.b64decode(char_data['image'])
419
- img = Image.open(io.BytesIO(image_data)).convert("RGBA")
420
- except Exception as e:
421
- logging.error(f"Error processing image for character '{char_data['name']}': {e}")
422
-
423
- # Process character data templates
424
- for field in ['first_mes', 'mes_example', 'scenario', 'description', 'personality']:
425
- if field in char_data:
426
- char_data[field] = replace_placeholders(char_data[field], char_data['name'], user_name)
427
-
428
- return char_data, processed_history, img
429
-
430
- except Exception as e:
431
- logging.error(f"Error in load_chat_and_character: {e}")
432
- return None, [], None
433
-
434
-
435
- def load_chat_history(file):
436
- try:
437
- content = file.read().decode('utf-8')
438
- chat_data = json.loads(content)
439
-
440
- # Extract history and character name from the loaded data
441
- history = chat_data.get('history') or chat_data.get('messages')
442
- character_name = chat_data.get('character') or chat_data.get('character_name')
443
-
444
- if not history or not character_name:
445
- logging.error("Chat history or character name missing in the imported file.")
446
- return None, None
447
 
448
- return history, character_name
449
- except Exception as e:
450
- logging.error(f"Error loading chat history: {e}")
451
- return None, None
452
 
453
  ####################################################
454
  #
@@ -507,6 +244,8 @@ def create_character_card_interaction_tab():
507
  chat_history = gr.Chatbot(label="Conversation", height=800)
508
  user_input = gr.Textbox(label="Your message")
509
  send_message_button = gr.Button("Send Message")
 
 
510
  regenerate_button = gr.Button("Regenerate Last Message")
511
  clear_chat_button = gr.Button("Clear Chat")
512
  save_snapshot_button = gr.Button("Save Chat Snapshot")
@@ -893,12 +632,201 @@ def create_character_card_interaction_tab():
893
  else:
894
  return "Failed to update chat."
895
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
896
  # Define States for conversation_id and media_content, which are required for saving chat history
897
  conversation_id = gr.State(str(uuid.uuid4()))
898
  media_content = gr.State({})
899
 
900
  # Button Callbacks
901
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
902
  import_card_button.click(
903
  fn=import_character_card,
904
  inputs=[character_card_upload],
@@ -1035,28 +963,40 @@ def create_character_card_interaction_tab():
1035
 
1036
 
1037
  def create_character_chat_mgmt_tab():
1038
- with gr.TabItem("Chat Management"):
1039
- gr.Markdown("# Chat Management")
1040
 
1041
  with gr.Row():
1042
- # Search Section
1043
  with gr.Column(scale=1):
1044
- gr.Markdown("## Search Conversations or Characters")
1045
- search_query = gr.Textbox(label="Search Conversations or Characters", placeholder="Enter search keywords")
1046
- search_button = gr.Button("Search")
1047
- search_results = gr.Dropdown(label="Search Results", choices=[], visible=False)
1048
- search_status = gr.Markdown("", visible=True)
 
 
 
1049
 
1050
- # Select Character and Chat Section
1051
- with gr.Column(scale=1):
1052
- gr.Markdown("## Select Character and Associated Chats")
1053
  characters = get_character_cards()
1054
  character_choices = [f"{char['name']} (ID: {char['id']})" for char in characters]
 
1055
  select_character = gr.Dropdown(label="Select Character", choices=character_choices, interactive=True)
1056
- select_chat = gr.Dropdown(label="Select Chat", choices=[], visible=False, interactive=True)
1057
- load_chat_button = gr.Button("Load Selected Chat", visible=False)
 
 
 
 
 
1058
 
1059
  with gr.Row():
 
 
 
1060
  conversation_list = gr.Dropdown(label="Select Conversation or Character", choices=[])
1061
  conversation_mapping = gr.State({})
1062
 
@@ -1072,36 +1012,58 @@ def create_character_chat_mgmt_tab():
1072
 
1073
  # Callback Functions
1074
 
1075
- def search_conversations_or_characters(query):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1076
  if not query.strip():
1077
  return gr.update(choices=[], visible=False), "Please enter a search query."
1078
 
1079
  try:
1080
- # Search Chats using FTS5
1081
- chat_results, chat_message = search_character_chats(query)
 
 
 
 
 
1082
 
1083
  # Format chat results
1084
  formatted_chat_results = [
1085
  f"Chat: {chat['conversation_name']} (ID: {chat['id']})" for chat in chat_results
1086
  ]
1087
 
1088
- # Search Characters using substring match
1089
- characters = get_character_cards()
1090
- filtered_characters = [
1091
- char for char in characters
1092
- if query.lower() in char['name'].lower()
1093
- ]
1094
- formatted_character_results = [
1095
- f"Character: {char['name']} (ID: {char['id']})" for char in filtered_characters
1096
- ]
 
 
 
1097
 
1098
  # Combine results
1099
  all_choices = formatted_chat_results + formatted_character_results
1100
- mapping = {choice: conv['id'] for choice, conv in zip(formatted_chat_results, chat_results)}
1101
- mapping.update({choice: char['id'] for choice, char in zip(formatted_character_results, filtered_characters)})
1102
 
1103
  if all_choices:
1104
- return gr.update(choices=all_choices, visible=True), f"Found {len(all_choices)} result(s) matching '{query}'."
1105
  else:
1106
  return gr.update(choices=[], visible=False), f"No results found for '{query}'."
1107
 
@@ -1265,11 +1227,39 @@ def create_character_chat_mgmt_tab():
1265
  <p><strong>Version:</strong> {character.get('character_version', 'N/A')}</p>
1266
  </div>
1267
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1268
 
1269
  # Register Callback Functions with Gradio Components
1270
  search_button.click(
1271
  fn=search_conversations_or_characters,
1272
- inputs=[search_query],
1273
  outputs=[search_results, search_status]
1274
  )
1275
 
@@ -1292,6 +1282,10 @@ def create_character_chat_mgmt_tab():
1292
  )
1293
 
1294
  select_character.change(
 
 
 
 
1295
  fn=populate_chats,
1296
  inputs=[select_character],
1297
  outputs=[select_chat, search_status]
@@ -1309,10 +1303,875 @@ def create_character_chat_mgmt_tab():
1309
  outputs=[chat_content, chat_preview]
1310
  )
1311
 
 
 
 
 
 
1312
  return (
 
1313
  search_query, search_button, search_results, search_status,
1314
  select_character, select_chat, load_chat_button,
1315
  conversation_list, conversation_mapping,
1316
  chat_content, save_button, delete_button,
1317
- chat_preview, result_message
1318
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Character_Interaction_Library.py
2
  # Description: Library for character card import functions
3
  #
4
  # Imports
5
+ import os
6
  import re
7
+ import tempfile
8
  import uuid
9
  from datetime import datetime
10
  import json
 
15
  #
16
  # External Imports
17
  from PIL import Image
18
+ from PIL.PngImagePlugin import PngInfo
19
  import gradio as gr
20
  #
21
  # Local Imports
22
+ from App_Function_Libraries.Character_Chat.Character_Chat_Lib import validate_character_book, validate_v2_card, \
23
+ replace_placeholders, replace_user_placeholder, extract_json_from_image, parse_character_book, \
24
+ load_chat_and_character, load_chat_history, load_character_and_image, extract_character_id, load_character_wrapper
25
  from App_Function_Libraries.Chat import chat
26
  from App_Function_Libraries.DB.Character_Chat_DB import (
27
  add_character_card,
 
41
  #
42
  # Functions:
43
 
 
44
  #################################################################################
45
  #
46
+ # Character card import functions:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  def import_character_card(file):
49
  if file is None:
 
53
  if file.name.lower().endswith(('.png', '.webp')):
54
  json_data = extract_json_from_image(file)
55
  if not json_data:
56
+ return None, gr.update(), "No character card data found in the image. This might not be a valid character card image."
57
  elif file.name.lower().endswith('.json'):
58
  with open(file.name, 'r', encoding='utf-8') as f:
59
  json_data = f.read()
 
62
 
63
  card_data = import_character_card_json(json_data)
64
  if not card_data:
65
+ return None, gr.update(), "Failed to parse character card data. The file might not contain valid character information."
66
 
67
  # Save image data for PNG/WebP files
68
  if file.name.lower().endswith(('.png', '.webp')):
 
84
  logging.error(f"Error importing character card: {e}")
85
  return None, gr.update(), f"Error importing character card: {e}"
86
 
87
+
88
  def import_character_card_json(json_content: str) -> Optional[Dict[str, Any]]:
89
  try:
90
  json_content = json_content.strip()
 
102
  logging.error(f"Unexpected error parsing JSON: {e}")
103
  return None
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
  def parse_v2_card(card_data: Dict[str, Any]) -> Optional[Dict[str, Any]]:
108
  try:
 
182
 
183
  return v2_data
184
 
185
+ #
186
+ # End of Character card import functions
187
+ ####################################################
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
 
 
 
 
189
 
190
  ####################################################
191
  #
 
244
  chat_history = gr.Chatbot(label="Conversation", height=800)
245
  user_input = gr.Textbox(label="Your message")
246
  send_message_button = gr.Button("Send Message")
247
+ answer_for_me_button = gr.Button("Answer for Me")
248
+ continue_talking_button = gr.Button("Continue Talking")
249
  regenerate_button = gr.Button("Regenerate Last Message")
250
  clear_chat_button = gr.Button("Clear Chat")
251
  save_snapshot_button = gr.Button("Save Chat Snapshot")
 
632
  else:
633
  return "Failed to update chat."
634
 
635
+ def continue_talking(
636
+ history, char_data, api_endpoint, api_key,
637
+ temperature, user_name_val, auto_save
638
+ ):
639
+ """
640
+ Causes the character to continue the conversation or think out loud.
641
+ """
642
+ if not char_data:
643
+ return history, "Please select a character first."
644
+
645
+ user_name_val = user_name_val or "User"
646
+ char_name = char_data.get('name', 'AI Assistant')
647
+
648
+ # Prepare the character's background information
649
+ char_background = f"""
650
+ Name: {char_name}
651
+ Description: {char_data.get('description', 'N/A')}
652
+ Personality: {char_data.get('personality', 'N/A')}
653
+ Scenario: {char_data.get('scenario', 'N/A')}
654
+ """
655
+
656
+ # Prepare the system prompt
657
+ system_message = f"""You are roleplaying as {char_name}. {char_data.get('system_prompt', '')}
658
+ If the user does not respond, continue expressing your thoughts or continue the conversation by thinking out loud. If thinking out loud, prefix the message with "Thinking: "."""
659
+
660
+ # Prepare chat context
661
+ media_content = {
662
+ 'id': char_name,
663
+ 'title': char_name,
664
+ 'content': char_background,
665
+ 'description': char_data.get('description', ''),
666
+ 'personality': char_data.get('personality', ''),
667
+ 'scenario': char_data.get('scenario', '')
668
+ }
669
+ selected_parts = ['description', 'personality', 'scenario']
670
+
671
+ prompt = char_data.get('post_history_instructions', '')
672
+
673
+ # Simulate empty user input
674
+ user_message = ""
675
+
676
+ # Generate bot response
677
+ bot_message = chat(
678
+ user_message,
679
+ history,
680
+ media_content,
681
+ selected_parts,
682
+ api_endpoint,
683
+ api_key,
684
+ prompt,
685
+ temperature,
686
+ system_message
687
+ )
688
+
689
+ # Replace placeholders in bot message
690
+ bot_message = replace_placeholders(bot_message, char_name, user_name_val)
691
+
692
+ # Update history
693
+ history.append((None, bot_message))
694
+
695
+ # Auto-save if enabled
696
+ save_status = ""
697
+ if auto_save:
698
+ character_id = char_data.get('id')
699
+ if character_id:
700
+ conversation_name = f"Auto-saved chat {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
701
+ add_character_chat(character_id, conversation_name, history)
702
+ save_status = "Chat auto-saved."
703
+ else:
704
+ save_status = "Character ID not found; chat not saved."
705
+
706
+ return history, save_status
707
+
708
+ def answer_for_me(
709
+ history, char_data, api_endpoint, api_key,
710
+ temperature, user_name_val, auto_save
711
+ ):
712
+ """
713
+ Generates a likely user response and continues the conversation.
714
+ """
715
+ if not char_data:
716
+ return history, "Please select a character first."
717
+
718
+ user_name_val = user_name_val or "User"
719
+ char_name = char_data.get('name', 'AI Assistant')
720
+
721
+ # Prepare the character's background information
722
+ char_background = f"""
723
+ Name: {char_name}
724
+ Description: {char_data.get('description', 'N/A')}
725
+ Personality: {char_data.get('personality', 'N/A')}
726
+ Scenario: {char_data.get('scenario', 'N/A')}
727
+ """
728
+
729
+ # Prepare system message for generating user's response
730
+ system_message_user = f"""You are simulating the user {user_name_val}. Based on the conversation so far, generate a natural and appropriate response that {user_name_val} might say next. The response should fit the context and flow of the conversation. ONLY SPEAK FOR {user_name_val}."""
731
+
732
+ # Prepare chat context
733
+ media_content = {
734
+ 'id': char_name,
735
+ 'title': char_name,
736
+ 'content': char_background,
737
+ 'description': char_data.get('description', ''),
738
+ 'personality': char_data.get('personality', ''),
739
+ 'scenario': char_data.get('scenario', '')
740
+ }
741
+ selected_parts = ['description', 'personality', 'scenario']
742
+
743
+ # Generate user response
744
+ user_response = chat(
745
+ "", # No new message
746
+ history,
747
+ media_content,
748
+ selected_parts,
749
+ api_endpoint,
750
+ api_key,
751
+ prompt="",
752
+ temperature=temperature,
753
+ system_message=system_message_user
754
+ )
755
+
756
+ # Append the generated user response to history
757
+ history.append((user_response, None))
758
+
759
+ # Now generate the character's response to this user response
760
+ # Prepare the system message for the character
761
+ system_message_bot = f"""You are roleplaying as {char_name}. {char_data.get('system_prompt', '')}"""
762
+
763
+ bot_message = chat(
764
+ f"{user_name_val}: {user_response}",
765
+ history[:-1],
766
+ media_content,
767
+ selected_parts,
768
+ api_endpoint,
769
+ api_key,
770
+ prompt=char_data.get('post_history_instructions', ''),
771
+ temperature=temperature,
772
+ system_message=system_message_bot
773
+ )
774
+
775
+ # Replace placeholders in bot message
776
+ bot_message = replace_placeholders(bot_message, char_name, user_name_val)
777
+
778
+ # Update history with bot's response
779
+ history[-1] = (user_response, bot_message)
780
+
781
+ # Auto-save if enabled
782
+ save_status = ""
783
+ if auto_save:
784
+ character_id = char_data.get('id')
785
+ if character_id:
786
+ conversation_name = f"Auto-saved chat {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
787
+ add_character_chat(character_id, conversation_name, history)
788
+ save_status = "Chat auto-saved."
789
+ else:
790
+ save_status = "Character ID not found; chat not saved."
791
+
792
+ return history, save_status
793
+
794
+
795
  # Define States for conversation_id and media_content, which are required for saving chat history
796
  conversation_id = gr.State(str(uuid.uuid4()))
797
  media_content = gr.State({})
798
 
799
  # Button Callbacks
800
 
801
+ # Add the new button callbacks here
802
+ answer_for_me_button.click(
803
+ fn=answer_for_me,
804
+ inputs=[
805
+ chat_history,
806
+ character_data,
807
+ api_name_input,
808
+ api_key_input,
809
+ temperature_slider,
810
+ user_name_input,
811
+ auto_save_checkbox
812
+ ],
813
+ outputs=[chat_history, save_status]
814
+ )
815
+
816
+ continue_talking_button.click(
817
+ fn=continue_talking,
818
+ inputs=[
819
+ chat_history,
820
+ character_data,
821
+ api_name_input,
822
+ api_key_input,
823
+ temperature_slider,
824
+ user_name_input,
825
+ auto_save_checkbox
826
+ ],
827
+ outputs=[chat_history, save_status]
828
+ )
829
+
830
  import_card_button.click(
831
  fn=import_character_card,
832
  inputs=[character_card_upload],
 
963
 
964
 
965
  def create_character_chat_mgmt_tab():
966
+ with gr.TabItem("Character and Chat Management"):
967
+ gr.Markdown("# Character and Chat Management")
968
 
969
  with gr.Row():
970
+ # Left Column: Character Import and Chat Management
971
  with gr.Column(scale=1):
972
+ gr.Markdown("## Import Characters")
973
+ character_files = gr.File(
974
+ label="Upload Character Files (PNG, WEBP, JSON)",
975
+ file_types=[".png", ".webp", ".json"],
976
+ file_count="multiple"
977
+ )
978
+ import_characters_button = gr.Button("Import Characters")
979
+ import_status = gr.Markdown("")
980
 
981
+ # Right Column: Character Selection and Image Display
982
+ with gr.Column(scale=2):
983
+ gr.Markdown("## Select Character")
984
  characters = get_character_cards()
985
  character_choices = [f"{char['name']} (ID: {char['id']})" for char in characters]
986
+ load_characters_button = gr.Button("Load Existing Characters")
987
  select_character = gr.Dropdown(label="Select Character", choices=character_choices, interactive=True)
988
+ character_image = gr.Image(label="Character Image", type="pil", interactive=False)
989
+
990
+ gr.Markdown("## Search Conversations")
991
+ search_query = gr.Textbox(label="Search Conversations", placeholder="Enter search keywords")
992
+ search_button = gr.Button("Search")
993
+ search_results = gr.Dropdown(label="Search Results", choices=[], visible=False)
994
+ search_status = gr.Markdown("", visible=True)
995
 
996
  with gr.Row():
997
+ gr.Markdown("## Chat Management")
998
+ select_chat = gr.Dropdown(label="Select Chat", choices=[], visible=False, interactive=True)
999
+ load_chat_button = gr.Button("Load Selected Chat", visible=False)
1000
  conversation_list = gr.Dropdown(label="Select Conversation or Character", choices=[])
1001
  conversation_mapping = gr.State({})
1002
 
 
1012
 
1013
  # Callback Functions
1014
 
1015
+ def load_character_image(character_selection):
1016
+ if not character_selection:
1017
+ return None
1018
+
1019
+ try:
1020
+ character_id = int(character_selection.split('(ID: ')[1].rstrip(')'))
1021
+ character = get_character_card_by_id(character_id)
1022
+ if character and 'image' in character:
1023
+ image_data = base64.b64decode(character['image'])
1024
+ img = Image.open(io.BytesIO(image_data))
1025
+ return img
1026
+ except Exception as e:
1027
+ logging.error(f"Error loading character image: {e}")
1028
+
1029
+ return None
1030
+
1031
+ def search_conversations_or_characters(query, selected_character):
1032
  if not query.strip():
1033
  return gr.update(choices=[], visible=False), "Please enter a search query."
1034
 
1035
  try:
1036
+ # Extract character ID from the selected character
1037
+ character_id = None
1038
+ if selected_character:
1039
+ character_id = int(selected_character.split('(ID: ')[1].rstrip(')'))
1040
+
1041
+ # Search Chats using FTS5, filtered by character_id if provided
1042
+ chat_results, chat_message = search_character_chats(query, character_id)
1043
 
1044
  # Format chat results
1045
  formatted_chat_results = [
1046
  f"Chat: {chat['conversation_name']} (ID: {chat['id']})" for chat in chat_results
1047
  ]
1048
 
1049
+ # If no character is selected, also search for characters
1050
+ if not character_id:
1051
+ characters = get_character_cards()
1052
+ filtered_characters = [
1053
+ char for char in characters
1054
+ if query.lower() in char['name'].lower()
1055
+ ]
1056
+ formatted_character_results = [
1057
+ f"Character: {char['name']} (ID: {char['id']})" for char in filtered_characters
1058
+ ]
1059
+ else:
1060
+ formatted_character_results = []
1061
 
1062
  # Combine results
1063
  all_choices = formatted_chat_results + formatted_character_results
 
 
1064
 
1065
  if all_choices:
1066
+ return gr.update(choices=all_choices, visible=True), chat_message
1067
  else:
1068
  return gr.update(choices=[], visible=False), f"No results found for '{query}'."
1069
 
 
1227
  <p><strong>Version:</strong> {character.get('character_version', 'N/A')}</p>
1228
  </div>
1229
  """
1230
+ def import_multiple_characters(files):
1231
+ if not files:
1232
+ return "No files provided for character import."
1233
+
1234
+ results = []
1235
+ for file in files:
1236
+ result, _, message = import_character_card(file)
1237
+ if result:
1238
+ results.append(f"Imported: {result['name']}")
1239
+ else:
1240
+ results.append(f"Failed: {file.name} - {message}")
1241
+
1242
+ # Refresh character choices
1243
+ characters = get_character_cards()
1244
+ character_choices = [f"{char['name']} (ID: {char['id']})" for char in characters]
1245
+ select_character.choices = character_choices
1246
+
1247
+ return "Import results:\n" + "\n".join(results)
1248
+
1249
+ # Register new callback for character import
1250
+ import_characters_button.click(
1251
+ fn=import_multiple_characters,
1252
+ inputs=[character_files],
1253
+ outputs=[import_status]
1254
+ ).then(
1255
+ fn=lambda: gr.update(choices=[f"{char['name']} (ID: {char['id']})" for char in get_character_cards()]),
1256
+ outputs=select_character
1257
+ )
1258
 
1259
  # Register Callback Functions with Gradio Components
1260
  search_button.click(
1261
  fn=search_conversations_or_characters,
1262
+ inputs=[search_query, select_character],
1263
  outputs=[search_results, search_status]
1264
  )
1265
 
 
1282
  )
1283
 
1284
  select_character.change(
1285
+ fn=load_character_image,
1286
+ inputs=[select_character],
1287
+ outputs=[character_image]
1288
+ ).then(
1289
  fn=populate_chats,
1290
  inputs=[select_character],
1291
  outputs=[select_chat, search_status]
 
1303
  outputs=[chat_content, chat_preview]
1304
  )
1305
 
1306
+ load_characters_button.click(
1307
+ fn=lambda: gr.update(choices=[f"{char['name']} (ID: {char['id']})" for char in get_character_cards()]),
1308
+ outputs=select_character
1309
+ )
1310
+
1311
  return (
1312
+ character_files, import_characters_button, import_status,
1313
  search_query, search_button, search_results, search_status,
1314
  select_character, select_chat, load_chat_button,
1315
  conversation_list, conversation_mapping,
1316
  chat_content, save_button, delete_button,
1317
+ chat_preview, result_message, character_image
1318
  )
1319
+
1320
+ def create_custom_character_card_tab():
1321
+ with gr.TabItem("Create a New Character Card"):
1322
+ gr.Markdown("# Create a New Character Card (v2)")
1323
+
1324
+ with gr.Row():
1325
+ with gr.Column():
1326
+ # Input fields for character card data
1327
+ name_input = gr.Textbox(label="Name", placeholder="Enter character name")
1328
+ description_input = gr.TextArea(label="Description", placeholder="Enter character description")
1329
+ personality_input = gr.TextArea(label="Personality", placeholder="Enter character personality")
1330
+ scenario_input = gr.TextArea(label="Scenario", placeholder="Enter character scenario")
1331
+ first_mes_input = gr.TextArea(label="First Message", placeholder="Enter the first message")
1332
+ mes_example_input = gr.TextArea(label="Example Messages", placeholder="Enter example messages")
1333
+ creator_notes_input = gr.TextArea(label="Creator Notes", placeholder="Enter notes for the creator")
1334
+ system_prompt_input = gr.TextArea(label="System Prompt", placeholder="Enter system prompt")
1335
+ post_history_instructions_input = gr.TextArea(label="Post History Instructions", placeholder="Enter post history instructions")
1336
+ alternate_greetings_input = gr.TextArea(
1337
+ label="Alternate Greetings (one per line)",
1338
+ placeholder="Enter alternate greetings, one per line"
1339
+ )
1340
+ tags_input = gr.Textbox(label="Tags", placeholder="Enter tags, separated by commas")
1341
+ creator_input = gr.Textbox(label="Creator", placeholder="Enter creator name")
1342
+ character_version_input = gr.Textbox(label="Character Version", placeholder="Enter character version")
1343
+ extensions_input = gr.TextArea(
1344
+ label="Extensions (JSON)",
1345
+ placeholder="Enter extensions as JSON (optional)"
1346
+ )
1347
+ image_input = gr.Image(label="Character Image", type="pil")
1348
+
1349
+ # Buttons
1350
+ save_button = gr.Button("Save Character Card")
1351
+ download_button = gr.Button("Download Character Card")
1352
+ download_image_button = gr.Button("Download Character Card as Image")
1353
+
1354
+ # Output status and outputs
1355
+ save_status = gr.Markdown("")
1356
+ download_output = gr.File(label="Download Character Card", interactive=False)
1357
+ download_image_output = gr.File(label="Download Character Card as Image", interactive=False)
1358
+
1359
+ # Import PngInfo
1360
+ from PIL.PngImagePlugin import PngInfo
1361
+
1362
+ # Callback Functions
1363
+ def build_character_card(
1364
+ name, description, personality, scenario, first_mes, mes_example,
1365
+ creator_notes, system_prompt, post_history_instructions,
1366
+ alternate_greetings_str, tags_str, creator, character_version,
1367
+ extensions_str
1368
+ ):
1369
+ # Parse alternate_greetings from multiline string
1370
+ alternate_greetings = [line.strip() for line in alternate_greetings_str.strip().split('\n') if line.strip()]
1371
+
1372
+ # Parse tags from comma-separated string
1373
+ tags = [tag.strip() for tag in tags_str.strip().split(',') if tag.strip()]
1374
+
1375
+ # Parse extensions from JSON string
1376
+ try:
1377
+ extensions = json.loads(extensions_str) if extensions_str.strip() else {}
1378
+ except json.JSONDecodeError as e:
1379
+ extensions = {}
1380
+ logging.error(f"Error parsing extensions JSON: {e}")
1381
+
1382
+ # Build the character card dictionary according to V2 spec
1383
+ character_card = {
1384
+ 'spec': 'chara_card_v2',
1385
+ 'spec_version': '2.0',
1386
+ 'data': {
1387
+ 'name': name,
1388
+ 'description': description,
1389
+ 'personality': personality,
1390
+ 'scenario': scenario,
1391
+ 'first_mes': first_mes,
1392
+ 'mes_example': mes_example,
1393
+ 'creator_notes': creator_notes,
1394
+ 'system_prompt': system_prompt,
1395
+ 'post_history_instructions': post_history_instructions,
1396
+ 'alternate_greetings': alternate_greetings,
1397
+ 'tags': tags,
1398
+ 'creator': creator,
1399
+ 'character_version': character_version,
1400
+ 'extensions': extensions,
1401
+ }
1402
+ }
1403
+ return character_card
1404
+
1405
+ def validate_character_card_data(character_card):
1406
+ """
1407
+ Validates the character card data using the extended validation logic.
1408
+ """
1409
+ is_valid, validation_messages = validate_v2_card(character_card)
1410
+ return is_valid, validation_messages
1411
+
1412
+ def save_character_card(
1413
+ name, description, personality, scenario, first_mes, mes_example,
1414
+ creator_notes, system_prompt, post_history_instructions,
1415
+ alternate_greetings_str, tags_str, creator, character_version,
1416
+ extensions_str, image
1417
+ ):
1418
+ # Build the character card
1419
+ character_card = build_character_card(
1420
+ name, description, personality, scenario, first_mes, mes_example,
1421
+ creator_notes, system_prompt, post_history_instructions,
1422
+ alternate_greetings_str, tags_str, creator, character_version,
1423
+ extensions_str
1424
+ )
1425
+
1426
+ # Validate the character card
1427
+ is_valid, validation_messages = validate_character_card_data(character_card)
1428
+ if not is_valid:
1429
+ # Return validation errors
1430
+ validation_output = "Character card validation failed:\n"
1431
+ validation_output += "\n".join(validation_messages)
1432
+ return validation_output
1433
+
1434
+ # If image is provided, encode it to base64
1435
+ if image:
1436
+ img_byte_arr = io.BytesIO()
1437
+ image.save(img_byte_arr, format='PNG')
1438
+ character_card['data']['image'] = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
1439
+
1440
+ # Save character card to database
1441
+ character_id = add_character_card(character_card['data'])
1442
+ if character_id:
1443
+ return f"Character card '{name}' saved successfully."
1444
+ else:
1445
+ return f"Failed to save character card '{name}'. It may already exist."
1446
+
1447
+ def download_character_card(
1448
+ name, description, personality, scenario, first_mes, mes_example,
1449
+ creator_notes, system_prompt, post_history_instructions,
1450
+ alternate_greetings_str, tags_str, creator, character_version,
1451
+ extensions_str, image
1452
+ ):
1453
+ # Build the character card
1454
+ character_card = build_character_card(
1455
+ name, description, personality, scenario, first_mes, mes_example,
1456
+ creator_notes, system_prompt, post_history_instructions,
1457
+ alternate_greetings_str, tags_str, creator, character_version,
1458
+ extensions_str
1459
+ )
1460
+
1461
+ # Validate the character card
1462
+ is_valid, validation_messages = validate_character_card_data(character_card)
1463
+ if not is_valid:
1464
+ # Return validation errors
1465
+ validation_output = "Character card validation failed:\n"
1466
+ validation_output += "\n".join(validation_messages)
1467
+ return gr.update(value=None), validation_output # Return None for the file output
1468
+
1469
+ # If image is provided, include it as base64
1470
+ if image:
1471
+ img_byte_arr = io.BytesIO()
1472
+ image.save(img_byte_arr, format='PNG')
1473
+ character_card['data']['image'] = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
1474
+
1475
+ # Convert to JSON string
1476
+ json_str = json.dumps(character_card, indent=2)
1477
+
1478
+ # Write the JSON to a temporary file
1479
+ with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.json', encoding='utf-8') as temp_file:
1480
+ temp_file.write(json_str)
1481
+ temp_file_path = temp_file.name
1482
+
1483
+ # Return the file path and clear validation output
1484
+ return temp_file_path, ""
1485
+
1486
+ def download_character_card_as_image(
1487
+ name, description, personality, scenario, first_mes, mes_example,
1488
+ creator_notes, system_prompt, post_history_instructions,
1489
+ alternate_greetings_str, tags_str, creator, character_version,
1490
+ extensions_str, image
1491
+ ):
1492
+ # Build the character card
1493
+ character_card = build_character_card(
1494
+ name, description, personality, scenario, first_mes, mes_example,
1495
+ creator_notes, system_prompt, post_history_instructions,
1496
+ alternate_greetings_str, tags_str, creator, character_version,
1497
+ extensions_str
1498
+ )
1499
+
1500
+ # Validate the character card
1501
+ is_valid, validation_messages = validate_character_card_data(character_card)
1502
+ if not is_valid:
1503
+ # Return validation errors
1504
+ validation_output = "Character card validation failed:\n"
1505
+ validation_output += "\n".join(validation_messages)
1506
+ return gr.update(value=None), validation_output # Return None for the file output
1507
+
1508
+ # Convert the character card JSON to a string
1509
+ json_str = json.dumps(character_card, indent=2)
1510
+
1511
+ # Encode the JSON string to base64
1512
+ chara_content = base64.b64encode(json_str.encode('utf-8')).decode('utf-8')
1513
+
1514
+ # Create PNGInfo object to hold metadata
1515
+ png_info = PngInfo()
1516
+ png_info.add_text('chara', chara_content)
1517
+
1518
+ # If image is provided, use it; otherwise, create a blank image
1519
+ if image:
1520
+ img = image.copy()
1521
+ else:
1522
+ # Create a default blank image
1523
+ img = Image.new('RGB', (512, 512), color='white')
1524
+
1525
+ # Save the image to a temporary file with metadata
1526
+ with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.png') as temp_file:
1527
+ img.save(temp_file, format='PNG', pnginfo=png_info)
1528
+ temp_file_path = temp_file.name
1529
+
1530
+ # Return the file path and clear validation output
1531
+ return temp_file_path, ""
1532
+
1533
+ # Include the validate_v2_card function here (from previous code)
1534
+
1535
+ # Button Callbacks
1536
+ save_button.click(
1537
+ fn=save_character_card,
1538
+ inputs=[
1539
+ name_input, description_input, personality_input, scenario_input,
1540
+ first_mes_input, mes_example_input, creator_notes_input, system_prompt_input,
1541
+ post_history_instructions_input, alternate_greetings_input, tags_input,
1542
+ creator_input, character_version_input, extensions_input, image_input
1543
+ ],
1544
+ outputs=[save_status]
1545
+ )
1546
+
1547
+ download_button.click(
1548
+ fn=download_character_card,
1549
+ inputs=[
1550
+ name_input, description_input, personality_input, scenario_input,
1551
+ first_mes_input, mes_example_input, creator_notes_input, system_prompt_input,
1552
+ post_history_instructions_input, alternate_greetings_input, tags_input,
1553
+ creator_input, character_version_input, extensions_input, image_input
1554
+ ],
1555
+ outputs=[download_output, save_status]
1556
+ )
1557
+
1558
+ download_image_button.click(
1559
+ fn=download_character_card_as_image,
1560
+ inputs=[
1561
+ name_input, description_input, personality_input, scenario_input,
1562
+ first_mes_input, mes_example_input, creator_notes_input, system_prompt_input,
1563
+ post_history_instructions_input, alternate_greetings_input, tags_input,
1564
+ creator_input, character_version_input, extensions_input, image_input
1565
+ ],
1566
+ outputs=[download_image_output, save_status]
1567
+ )
1568
+
1569
+ #v1
1570
+ def create_character_card_validation_tab():
1571
+ with gr.TabItem("Validate Character Card"):
1572
+ gr.Markdown("# Validate Character Card (v2)")
1573
+ gr.Markdown("Upload a character card (PNG, WEBP, or JSON) to validate whether it conforms to the Character Card V2 specification.")
1574
+
1575
+ with gr.Row():
1576
+ with gr.Column():
1577
+ # File uploader
1578
+ file_upload = gr.File(
1579
+ label="Upload Character Card (PNG, WEBP, JSON)",
1580
+ file_types=[".png", ".webp", ".json"]
1581
+ )
1582
+ # Validation button
1583
+ validate_button = gr.Button("Validate Character Card")
1584
+ # Output area for validation results
1585
+ validation_output = gr.Markdown("")
1586
+
1587
+ # Callback Functions
1588
+ def validate_character_card(file):
1589
+ if file is None:
1590
+ return "No file provided for validation."
1591
+
1592
+ try:
1593
+ if file.name.lower().endswith(('.png', '.webp')):
1594
+ json_data = extract_json_from_image(file)
1595
+ if not json_data:
1596
+ return "Failed to extract JSON data from the image. The image might not contain embedded character card data."
1597
+ elif file.name.lower().endswith('.json'):
1598
+ with open(file.name, 'r', encoding='utf-8') as f:
1599
+ json_data = f.read()
1600
+ else:
1601
+ return "Unsupported file type. Please upload a PNG, WEBP, or JSON file."
1602
+
1603
+ # Parse the JSON content
1604
+ try:
1605
+ card_data = json.loads(json_data)
1606
+ except json.JSONDecodeError as e:
1607
+ return f"JSON decoding error: {e}"
1608
+
1609
+ # Validate the character card
1610
+ is_valid, validation_messages = validate_v2_card(card_data)
1611
+
1612
+ # Prepare the validation output
1613
+ if is_valid:
1614
+ return "Character card is valid according to the V2 specification."
1615
+ else:
1616
+ # Concatenate all validation error messages
1617
+ validation_output = "Character card validation failed:\n"
1618
+ validation_output += "\n".join(validation_messages)
1619
+ return validation_output
1620
+
1621
+ except Exception as e:
1622
+ logging.error(f"Error validating character card: {e}")
1623
+ return f"An unexpected error occurred during validation: {e}"
1624
+
1625
+ def validate_v2_card(card_data):
1626
+ """
1627
+ Validate a character card according to the V2 specification.
1628
+
1629
+ Args:
1630
+ card_data (dict): The parsed character card data.
1631
+
1632
+ Returns:
1633
+ Tuple[bool, List[str]]: A tuple containing a boolean indicating validity and a list of validation messages.
1634
+ """
1635
+ validation_messages = []
1636
+
1637
+ # Check top-level fields
1638
+ if 'spec' not in card_data:
1639
+ validation_messages.append("Missing 'spec' field.")
1640
+ elif card_data['spec'] != 'chara_card_v2':
1641
+ validation_messages.append(f"Invalid 'spec' value: {card_data['spec']}. Expected 'chara_card_v2'.")
1642
+
1643
+ if 'spec_version' not in card_data:
1644
+ validation_messages.append("Missing 'spec_version' field.")
1645
+ else:
1646
+ # Ensure 'spec_version' is '2.0' or higher
1647
+ try:
1648
+ spec_version = float(card_data['spec_version'])
1649
+ if spec_version < 2.0:
1650
+ validation_messages.append(f"'spec_version' must be '2.0' or higher. Found '{card_data['spec_version']}'.")
1651
+ except ValueError:
1652
+ validation_messages.append(f"Invalid 'spec_version' format: {card_data['spec_version']}. Must be a number as a string.")
1653
+
1654
+ if 'data' not in card_data:
1655
+ validation_messages.append("Missing 'data' field.")
1656
+ return False, validation_messages # Cannot proceed without 'data' field
1657
+
1658
+ data = card_data['data']
1659
+
1660
+ # Required fields in 'data'
1661
+ required_fields = ['name', 'description', 'personality', 'scenario', 'first_mes', 'mes_example']
1662
+ for field in required_fields:
1663
+ if field not in data:
1664
+ validation_messages.append(f"Missing required field in 'data': '{field}'.")
1665
+ elif not isinstance(data[field], str):
1666
+ validation_messages.append(f"Field '{field}' must be a string.")
1667
+ elif not data[field].strip():
1668
+ validation_messages.append(f"Field '{field}' cannot be empty.")
1669
+
1670
+ # Optional fields with expected types
1671
+ optional_fields = {
1672
+ 'creator_notes': str,
1673
+ 'system_prompt': str,
1674
+ 'post_history_instructions': str,
1675
+ 'alternate_greetings': list,
1676
+ 'tags': list,
1677
+ 'creator': str,
1678
+ 'character_version': str,
1679
+ 'extensions': dict,
1680
+ 'character_book': dict # If present, should be a dict
1681
+ }
1682
+
1683
+ for field, expected_type in optional_fields.items():
1684
+ if field in data:
1685
+ if not isinstance(data[field], expected_type):
1686
+ validation_messages.append(f"Field '{field}' must be of type '{expected_type.__name__}'.")
1687
+ elif field == 'extensions':
1688
+ # Validate that extensions keys are properly namespaced
1689
+ for key in data[field].keys():
1690
+ if '/' not in key and '_' not in key:
1691
+ validation_messages.append(f"Extension key '{key}' in 'extensions' should be namespaced to prevent conflicts.")
1692
+
1693
+ # If 'alternate_greetings' is present, check that it's a list of non-empty strings
1694
+ if 'alternate_greetings' in data and isinstance(data['alternate_greetings'], list):
1695
+ for idx, greeting in enumerate(data['alternate_greetings']):
1696
+ if not isinstance(greeting, str) or not greeting.strip():
1697
+ validation_messages.append(f"Element {idx} in 'alternate_greetings' must be a non-empty string.")
1698
+
1699
+ # If 'tags' is present, check that it's a list of non-empty strings
1700
+ if 'tags' in data and isinstance(data['tags'], list):
1701
+ for idx, tag in enumerate(data['tags']):
1702
+ if not isinstance(tag, str) or not tag.strip():
1703
+ validation_messages.append(f"Element {idx} in 'tags' must be a non-empty string.")
1704
+
1705
+ # Validate 'extensions' field
1706
+ if 'extensions' in data and not isinstance(data['extensions'], dict):
1707
+ validation_messages.append("Field 'extensions' must be a dictionary.")
1708
+
1709
+ # Validate 'character_book' if present
1710
+ if 'character_book' in data:
1711
+ is_valid_book, book_messages = validate_character_book(data['character_book'])
1712
+ if not is_valid_book:
1713
+ validation_messages.extend(book_messages)
1714
+
1715
+ is_valid = len(validation_messages) == 0
1716
+ return is_valid, validation_messages
1717
+
1718
+ # Button Callback
1719
+ validate_button.click(
1720
+ fn=validate_character_card,
1721
+ inputs=[file_upload],
1722
+ outputs=[validation_output]
1723
+ )
1724
+ # v2-not-working-on-export-def create_character_card_validation_tab():
1725
+ # with gr.TabItem("Validate and Edit Character Card"):
1726
+ # gr.Markdown("# Validate and Edit Character Card (v2)")
1727
+ # gr.Markdown("Upload a character card (PNG, WEBP, or JSON) to validate and modify it.")
1728
+ #
1729
+ # with gr.Row():
1730
+ # with gr.Column():
1731
+ # # File uploader
1732
+ # file_upload = gr.File(
1733
+ # label="Upload Character Card (PNG, WEBP, JSON)",
1734
+ # file_types=[".png", ".webp", ".json"]
1735
+ # )
1736
+ # # Validation button
1737
+ # validate_button = gr.Button("Validate and Load Character Card")
1738
+ # # Output area for validation results
1739
+ # validation_output = gr.Markdown("")
1740
+ #
1741
+ # # Input fields for character card data (duplicated from the create tab)
1742
+ # with gr.Row():
1743
+ # with gr.Column():
1744
+ # name_input = gr.Textbox(label="Name", placeholder="Enter character name")
1745
+ # description_input = gr.TextArea(label="Description", placeholder="Enter character description")
1746
+ # personality_input = gr.TextArea(label="Personality", placeholder="Enter character personality")
1747
+ # scenario_input = gr.TextArea(label="Scenario", placeholder="Enter character scenario")
1748
+ # first_mes_input = gr.TextArea(label="First Message", placeholder="Enter the first message")
1749
+ # mes_example_input = gr.TextArea(label="Example Messages", placeholder="Enter example messages")
1750
+ # creator_notes_input = gr.TextArea(label="Creator Notes", placeholder="Enter notes for the creator")
1751
+ # system_prompt_input = gr.TextArea(label="System Prompt", placeholder="Enter system prompt")
1752
+ # post_history_instructions_input = gr.TextArea(label="Post History Instructions", placeholder="Enter post history instructions")
1753
+ # alternate_greetings_input = gr.TextArea(
1754
+ # label="Alternate Greetings (one per line)",
1755
+ # placeholder="Enter alternate greetings, one per line"
1756
+ # )
1757
+ # tags_input = gr.Textbox(label="Tags", placeholder="Enter tags, separated by commas")
1758
+ # creator_input = gr.Textbox(label="Creator", placeholder="Enter creator name")
1759
+ # character_version_input = gr.Textbox(label="Character Version", placeholder="Enter character version")
1760
+ # extensions_input = gr.TextArea(
1761
+ # label="Extensions (JSON)",
1762
+ # placeholder="Enter extensions as JSON (optional)"
1763
+ # )
1764
+ # image_input = gr.Image(label="Character Image", type="pil")
1765
+ #
1766
+ # # Buttons
1767
+ # save_button = gr.Button("Save Character Card")
1768
+ # download_button = gr.Button("Download Character Card")
1769
+ # download_image_button = gr.Button("Download Character Card as Image")
1770
+ #
1771
+ # # Output status and outputs
1772
+ # save_status = gr.Markdown("")
1773
+ # download_output = gr.File(label="Download Character Card", interactive=False)
1774
+ # download_image_output = gr.File(label="Download Character Card as Image", interactive=False)
1775
+ #
1776
+ # # Callback Functions
1777
+ # def extract_json_from_image(file):
1778
+ # try:
1779
+ # image = Image.open(file.name)
1780
+ # if "chara" in image.info:
1781
+ # json_data = image.info["chara"]
1782
+ # # Decode base64 if necessary
1783
+ # try:
1784
+ # json_data = base64.b64decode(json_data).decode('utf-8')
1785
+ # except Exception:
1786
+ # pass # Assume it's already in plain text
1787
+ # return json_data
1788
+ # else:
1789
+ # return None
1790
+ # except Exception as e:
1791
+ # logging.error(f"Error extracting JSON from image: {e}")
1792
+ # return None
1793
+ #
1794
+ # def validate_v2_card(card_data):
1795
+ # """
1796
+ # Validate a character card according to the V2 specification.
1797
+ #
1798
+ # Args:
1799
+ # card_data (dict): The parsed character card data.
1800
+ #
1801
+ # Returns:
1802
+ # Tuple[bool, List[str]]: A tuple containing a boolean indicating validity and a list of validation messages.
1803
+ # """
1804
+ # validation_messages = []
1805
+ #
1806
+ # # Check top-level fields
1807
+ # if 'spec' not in card_data:
1808
+ # validation_messages.append("Missing 'spec' field.")
1809
+ # elif card_data['spec'] != 'chara_card_v2':
1810
+ # validation_messages.append(f"Invalid 'spec' value: {card_data['spec']}. Expected 'chara_card_v2'.")
1811
+ #
1812
+ # if 'spec_version' not in card_data:
1813
+ # validation_messages.append("Missing 'spec_version' field.")
1814
+ # else:
1815
+ # # Ensure 'spec_version' is '2.0' or higher
1816
+ # try:
1817
+ # spec_version = float(card_data['spec_version'])
1818
+ # if spec_version < 2.0:
1819
+ # validation_messages.append(
1820
+ # f"'spec_version' must be '2.0' or higher. Found '{card_data['spec_version']}'.")
1821
+ # except ValueError:
1822
+ # validation_messages.append(
1823
+ # f"Invalid 'spec_version' format: {card_data['spec_version']}. Must be a number as a string.")
1824
+ #
1825
+ # if 'data' not in card_data:
1826
+ # validation_messages.append("Missing 'data' field.")
1827
+ # return False, validation_messages # Cannot proceed without 'data' field
1828
+ #
1829
+ # data = card_data['data']
1830
+ #
1831
+ # # Required fields in 'data'
1832
+ # required_fields = ['name', 'description', 'personality', 'scenario', 'first_mes', 'mes_example']
1833
+ # for field in required_fields:
1834
+ # if field not in data:
1835
+ # validation_messages.append(f"Missing required field in 'data': '{field}'.")
1836
+ # elif not isinstance(data[field], str):
1837
+ # validation_messages.append(f"Field '{field}' must be a string.")
1838
+ # elif not data[field].strip():
1839
+ # validation_messages.append(f"Field '{field}' cannot be empty.")
1840
+ #
1841
+ # # Optional fields with expected types
1842
+ # optional_fields = {
1843
+ # 'creator_notes': str,
1844
+ # 'system_prompt': str,
1845
+ # 'post_history_instructions': str,
1846
+ # 'alternate_greetings': list,
1847
+ # 'tags': list,
1848
+ # 'creator': str,
1849
+ # 'character_version': str,
1850
+ # 'extensions': dict,
1851
+ # 'character_book': dict # If present, should be a dict
1852
+ # }
1853
+ #
1854
+ # for field, expected_type in optional_fields.items():
1855
+ # if field in data:
1856
+ # if not isinstance(data[field], expected_type):
1857
+ # validation_messages.append(f"Field '{field}' must be of type '{expected_type.__name__}'.")
1858
+ # elif field == 'extensions':
1859
+ # # Validate that extensions keys are properly namespaced
1860
+ # for key in data[field].keys():
1861
+ # if '/' not in key and '_' not in key:
1862
+ # validation_messages.append(
1863
+ # f"Extension key '{key}' in 'extensions' should be namespaced to prevent conflicts.")
1864
+ #
1865
+ # # If 'alternate_greetings' is present, check that it's a list of non-empty strings
1866
+ # if 'alternate_greetings' in data and isinstance(data['alternate_greetings'], list):
1867
+ # for idx, greeting in enumerate(data['alternate_greetings']):
1868
+ # if not isinstance(greeting, str) or not greeting.strip():
1869
+ # validation_messages.append(
1870
+ # f"Element {idx} in 'alternate_greetings' must be a non-empty string.")
1871
+ #
1872
+ # # If 'tags' is present, check that it's a list of non-empty strings
1873
+ # if 'tags' in data and isinstance(data['tags'], list):
1874
+ # for idx, tag in enumerate(data['tags']):
1875
+ # if not isinstance(tag, str) or not tag.strip():
1876
+ # validation_messages.append(f"Element {idx} in 'tags' must be a non-empty string.")
1877
+ #
1878
+ # # Validate 'extensions' field
1879
+ # if 'extensions' in data and not isinstance(data['extensions'], dict):
1880
+ # validation_messages.append("Field 'extensions' must be a dictionary.")
1881
+ #
1882
+ # # Validate 'character_book' if present
1883
+ # # (Assuming you have a validate_character_book function)
1884
+ # # if 'character_book' in data:
1885
+ # # is_valid_book, book_messages = validate_character_book(data['character_book'])
1886
+ # # if not is_valid_book:
1887
+ # # validation_messages.extend(book_messages)
1888
+ #
1889
+ # is_valid = len(validation_messages) == 0
1890
+ # return is_valid, validation_messages
1891
+ #
1892
+ # # Include the save_character_card, download_character_card, and download_character_card_as_image functions
1893
+ # def save_character_card(
1894
+ # name, description, personality, scenario, first_mes, mes_example,
1895
+ # creator_notes, system_prompt, post_history_instructions,
1896
+ # alternate_greetings_str, tags_str, creator, character_version,
1897
+ # extensions_str, image
1898
+ # ):
1899
+ # # Build the character card
1900
+ # character_card = build_character_card(
1901
+ # name, description, personality, scenario, first_mes, mes_example,
1902
+ # creator_notes, system_prompt, post_history_instructions,
1903
+ # alternate_greetings_str, tags_str, creator, character_version,
1904
+ # extensions_str
1905
+ # )
1906
+ #
1907
+ # # Validate the character card
1908
+ # is_valid, validation_messages = validate_v2_card(character_card)
1909
+ # if not is_valid:
1910
+ # # Return validation errors
1911
+ # validation_output = "Character card validation failed:\n"
1912
+ # validation_output += "\n".join(validation_messages)
1913
+ # return validation_output
1914
+ #
1915
+ # # If image is provided, encode it to base64
1916
+ # if image:
1917
+ # img_byte_arr = io.BytesIO()
1918
+ # image.save(img_byte_arr, format='PNG')
1919
+ # character_card['data']['image'] = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
1920
+ #
1921
+ # # Save character card to database
1922
+ # character_id = add_character_card(character_card['data'])
1923
+ # if character_id:
1924
+ # return f"Character card '{name}' saved successfully."
1925
+ # else:
1926
+ # return f"Failed to save character card '{name}'. It may already exist."
1927
+ #
1928
+ # def download_character_card(
1929
+ # name, description, personality, scenario, first_mes, mes_example,
1930
+ # creator_notes, system_prompt, post_history_instructions,
1931
+ # alternate_greetings_str, tags_str, creator, character_version,
1932
+ # extensions_str, image
1933
+ # ):
1934
+ # # Build the character card
1935
+ # character_card = build_character_card(
1936
+ # name, description, personality, scenario, first_mes, mes_example,
1937
+ # creator_notes, system_prompt, post_history_instructions,
1938
+ # alternate_greetings_str, tags_str, creator, character_version,
1939
+ # extensions_str
1940
+ # )
1941
+ #
1942
+ # # Validate the character card
1943
+ # is_valid, validation_messages = validate_v2_card(character_card)
1944
+ # if not is_valid:
1945
+ # # Return validation errors
1946
+ # validation_output = "Character card validation failed:\n"
1947
+ # validation_output += "\n".join(validation_messages)
1948
+ # return gr.update(value=None), validation_output # Return None for the file output
1949
+ #
1950
+ # # If image is provided, include it as base64
1951
+ # if image:
1952
+ # img_byte_arr = io.BytesIO()
1953
+ # image.save(img_byte_arr, format='PNG')
1954
+ # character_card['data']['image'] = base64.b64encode(img_byte_arr.getvalue()).decode('utf-8')
1955
+ #
1956
+ # # Convert to JSON string
1957
+ # json_str = json.dumps(character_card, indent=2)
1958
+ #
1959
+ # # Write the JSON to a temporary file
1960
+ # with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.json', encoding='utf-8') as temp_file:
1961
+ # temp_file.write(json_str)
1962
+ # temp_file_path = temp_file.name
1963
+ #
1964
+ # # Return the file path and clear validation output
1965
+ # return temp_file_path, ""
1966
+ #
1967
+ # def download_character_card_as_image(
1968
+ # name, description, personality, scenario, first_mes, mes_example,
1969
+ # creator_notes, system_prompt, post_history_instructions,
1970
+ # alternate_greetings_str, tags_str, creator, character_version,
1971
+ # extensions_str, image
1972
+ # ):
1973
+ # # Build the character card
1974
+ # character_card = build_character_card(
1975
+ # name, description, personality, scenario, first_mes, mes_example,
1976
+ # creator_notes, system_prompt, post_history_instructions,
1977
+ # alternate_greetings_str, tags_str, creator, character_version,
1978
+ # extensions_str
1979
+ # )
1980
+ #
1981
+ # # Validate the character card
1982
+ # is_valid, validation_messages = validate_v2_card(character_card)
1983
+ # if not is_valid:
1984
+ # # Return validation errors
1985
+ # validation_output = "Character card validation failed:\n"
1986
+ # validation_output += "\n".join(validation_messages)
1987
+ # return gr.update(value=None), validation_output # Return None for the file output
1988
+ #
1989
+ # # Convert the character card JSON to a string
1990
+ # json_str = json.dumps(character_card, indent=2)
1991
+ #
1992
+ # # Encode the JSON string to base64
1993
+ # chara_content = base64.b64encode(json_str.encode('utf-8')).decode('utf-8')
1994
+ #
1995
+ # # Create PNGInfo object to hold metadata
1996
+ # png_info = PngInfo()
1997
+ # png_info.add_text('chara', chara_content)
1998
+ #
1999
+ # # If image is provided, use it; otherwise, create a blank image
2000
+ # if image:
2001
+ # img = image.copy()
2002
+ # else:
2003
+ # # Create a default blank image
2004
+ # img = Image.new('RGB', (512, 512), color='white')
2005
+ #
2006
+ # # Save the image to a temporary file with metadata
2007
+ # with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.png') as temp_file:
2008
+ # img.save(temp_file, format='PNG', pnginfo=png_info)
2009
+ # temp_file_path = temp_file.name
2010
+ #
2011
+ # # Return the file path and clear validation output
2012
+ # return temp_file_path, ""
2013
+ #
2014
+ # def build_character_card(
2015
+ # name, description, personality, scenario, first_mes, mes_example,
2016
+ # creator_notes, system_prompt, post_history_instructions,
2017
+ # alternate_greetings_str, tags_str, creator, character_version,
2018
+ # extensions_str
2019
+ # ):
2020
+ # # Parse alternate_greetings from multiline string
2021
+ # alternate_greetings = [line.strip() for line in alternate_greetings_str.strip().split('\n') if line.strip()]
2022
+ #
2023
+ # # Parse tags from comma-separated string
2024
+ # tags = [tag.strip() for tag in tags_str.strip().split(',') if tag.strip()]
2025
+ #
2026
+ # # Parse extensions from JSON string
2027
+ # try:
2028
+ # extensions = json.loads(extensions_str) if extensions_str.strip() else {}
2029
+ # except json.JSONDecodeError as e:
2030
+ # extensions = {}
2031
+ # logging.error(f"Error parsing extensions JSON: {e}")
2032
+ #
2033
+ # # Build the character card dictionary according to V2 spec
2034
+ # character_card = {
2035
+ # 'spec': 'chara_card_v2',
2036
+ # 'spec_version': '2.0',
2037
+ # 'data': {
2038
+ # 'name': name,
2039
+ # 'description': description,
2040
+ # 'personality': personality,
2041
+ # 'scenario': scenario,
2042
+ # 'first_mes': first_mes,
2043
+ # 'mes_example': mes_example,
2044
+ # 'creator_notes': creator_notes,
2045
+ # 'system_prompt': system_prompt,
2046
+ # 'post_history_instructions': post_history_instructions,
2047
+ # 'alternate_greetings': alternate_greetings,
2048
+ # 'tags': tags,
2049
+ # 'creator': creator,
2050
+ # 'character_version': character_version,
2051
+ # 'extensions': extensions,
2052
+ # }
2053
+ # }
2054
+ # return character_card
2055
+ #
2056
+ # def validate_and_load_character_card(file):
2057
+ # if file is None:
2058
+ # return ["No file provided for validation."] + [gr.update() for _ in range(15)]
2059
+ #
2060
+ # try:
2061
+ # if file.name.lower().endswith(('.png', '.webp')):
2062
+ # json_data = extract_json_from_image(file)
2063
+ # if not json_data:
2064
+ # return ["Failed to extract JSON data from the image."] + [gr.update() for _ in range(15)]
2065
+ # elif file.name.lower().endswith('.json'):
2066
+ # with open(file.name, 'r', encoding='utf-8') as f:
2067
+ # json_data = f.read()
2068
+ # else:
2069
+ # return ["Unsupported file type."] + [gr.update() for _ in range(15)]
2070
+ #
2071
+ # # Parse the JSON content
2072
+ # try:
2073
+ # card_data = json.loads(json_data)
2074
+ # except json.JSONDecodeError as e:
2075
+ # return [f"JSON decoding error: {e}"] + [gr.update() for _ in range(15)]
2076
+ #
2077
+ # # Validate the character card
2078
+ # is_valid, validation_messages = validate_v2_card(card_data)
2079
+ #
2080
+ # # Prepare the validation output
2081
+ # if is_valid:
2082
+ # validation_output_msg = "Character card is valid according to the V2 specification."
2083
+ # else:
2084
+ # validation_output_msg = "Character card validation failed:\n" + "\n".join(validation_messages)
2085
+ #
2086
+ # # Extract data to populate input fields
2087
+ # data = card_data.get('data', {})
2088
+ #
2089
+ # # Handle image data
2090
+ # if 'image' in data:
2091
+ # # Decode base64 image
2092
+ # image_data = base64.b64decode(data['image'])
2093
+ # image = Image.open(io.BytesIO(image_data))
2094
+ # else:
2095
+ # image = None
2096
+ #
2097
+ # # Prepare values for input fields
2098
+ # alternate_greetings_str = "\n".join(data.get('alternate_greetings', []))
2099
+ # tags_str = ", ".join(data.get('tags', []))
2100
+ # extensions_str = json.dumps(data.get('extensions', {}), indent=2) if data.get('extensions', {}) else ""
2101
+ #
2102
+ # outputs = [
2103
+ # validation_output_msg,
2104
+ # data.get('name', ''),
2105
+ # data.get('description', ''),
2106
+ # data.get('personality', ''),
2107
+ # data.get('scenario', ''),
2108
+ # data.get('first_mes', ''),
2109
+ # data.get('mes_example', ''),
2110
+ # data.get('creator_notes', ''),
2111
+ # data.get('system_prompt', ''),
2112
+ # data.get('post_history_instructions', ''),
2113
+ # alternate_greetings_str,
2114
+ # tags_str,
2115
+ # data.get('creator', ''),
2116
+ # data.get('character_version', ''),
2117
+ # extensions_str,
2118
+ # image
2119
+ # ]
2120
+ #
2121
+ # return outputs
2122
+ #
2123
+ # except Exception as e:
2124
+ # logging.error(f"Error validating character card: {e}")
2125
+ # return [f"An unexpected error occurred: {e}"] + [gr.update() for _ in range(15)]
2126
+ #
2127
+ # # Button Callback for validation
2128
+ # validate_button.click(
2129
+ # fn=validate_and_load_character_card,
2130
+ # inputs=[file_upload],
2131
+ # outputs=[
2132
+ # validation_output,
2133
+ # name_input, description_input, personality_input, scenario_input,
2134
+ # first_mes_input, mes_example_input, creator_notes_input, system_prompt_input,
2135
+ # post_history_instructions_input, alternate_greetings_input, tags_input,
2136
+ # creator_input, character_version_input, extensions_input, image_input
2137
+ # ]
2138
+ # )
2139
+ #
2140
+ # # Button Callbacks for save, download, etc.
2141
+ # save_button.click(
2142
+ # fn=save_character_card,
2143
+ # inputs=[
2144
+ # name_input, description_input, personality_input, scenario_input,
2145
+ # first_mes_input, mes_example_input, creator_notes_input, system_prompt_input,
2146
+ # post_history_instructions_input, alternate_greetings_input, tags_input,
2147
+ # creator_input, character_version_input, extensions_input, image_input
2148
+ # ],
2149
+ # outputs=[save_status]
2150
+ # )
2151
+ #
2152
+ # download_button.click(
2153
+ # fn=download_character_card,
2154
+ # inputs=[
2155
+ # name_input, description_input, personality_input, scenario_input,
2156
+ # first_mes_input, mes_example_input, creator_notes_input, system_prompt_input,
2157
+ # post_history_instructions_input, alternate_greetings_input, tags_input,
2158
+ # creator_input, character_version_input, extensions_input, image_input
2159
+ # ],
2160
+ # outputs=[download_output, save_status]
2161
+ # )
2162
+ #
2163
+ # download_image_button.click(
2164
+ # fn=download_character_card_as_image,
2165
+ # inputs=[
2166
+ # name_input, description_input, personality_input, scenario_input,
2167
+ # first_mes_input, mes_example_input, creator_notes_input, system_prompt_input,
2168
+ # post_history_instructions_input, alternate_greetings_input, tags_input,
2169
+ # creator_input, character_version_input, extensions_input, image_input
2170
+ # ],
2171
+ # outputs=[download_image_output, save_status]
2172
+ # )
2173
+
2174
+
2175
+ #
2176
+ # End of Character_Chat_tab.py
2177
+ #######################################################################################################################
App_Function_Libraries/Gradio_UI/Character_interaction_tab.py CHANGED
@@ -24,7 +24,7 @@ from App_Function_Libraries.Gradio_UI.Writing_tab import generate_writing_feedba
24
  ########################################################################################################################
25
  #
26
  # Single-Character chat Functions:
27
-
28
 
29
  def chat_with_character(user_message, history, char_data, api_name_input, api_key):
30
  if char_data is None:
@@ -134,340 +134,13 @@ def load_chat_history(file):
134
  return None, None
135
 
136
 
137
- # FIXME - deprecated keeping until sure no longer needed
138
- # def create_character_card_interaction_tab():
139
- # with gr.TabItem("Chat with a Character Card"):
140
- # gr.Markdown("# Chat with a Character Card")
141
- # with gr.Row():
142
- # with gr.Column(scale=1):
143
- # character_image = gr.Image(label="Character Image", type="filepath")
144
- # character_card_upload = gr.File(label="Upload Character Card")
145
- # import_card_button = gr.Button("Import Character Card")
146
- # load_characters_button = gr.Button("Load Existing Characters")
147
- # from App_Function_Libraries.Chat import get_character_names
148
- # character_dropdown = gr.Dropdown(label="Select Character", choices=get_character_names())
149
- # user_name_input = gr.Textbox(label="Your Name", placeholder="Enter your name here")
150
- # api_name_input = gr.Dropdown(
151
- # choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq", "DeepSeek", "Mistral",
152
- # "OpenRouter", "Llama.cpp", "Kobold", "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace",
153
- # "Custom-OpenAI-API"],
154
- # value="HuggingFace",
155
- # # FIXME - make it so the user cant' click `Send Message` without first setting an API + Chatbot
156
- # label="API for Interaction(Mandatory)"
157
- # )
158
- # api_key_input = gr.Textbox(label="API Key (if not set in Config_Files/config.txt)",
159
- # placeholder="Enter your API key here", type="password")
160
- # temperature_slider = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, step=0.05, label="Temperature")
161
- # import_chat_button = gr.Button("Import Chat History")
162
- # chat_file_upload = gr.File(label="Upload Chat History JSON", visible=False)
163
- #
164
- # with gr.Column(scale=2):
165
- # chat_history = gr.Chatbot(label="Conversation", height=800)
166
- # user_input = gr.Textbox(label="Your message")
167
- # send_message_button = gr.Button("Send Message")
168
- # regenerate_button = gr.Button("Regenerate Last Message")
169
- # clear_chat_button = gr.Button("Clear Chat")
170
- # chat_media_name = gr.Textbox(label="Custom Chat Name(optional)", visible=True)
171
- # save_chat_history_to_db = gr.Button("Save Chat History to DataBase")
172
- # save_status = gr.Textbox(label="Save Status", interactive=False)
173
- #
174
- # character_data = gr.State(None)
175
- # user_name = gr.State("")
176
- #
177
- # def import_chat_history(file, current_history, char_data):
178
- # loaded_history, char_name = load_chat_history(file)
179
- # if loaded_history is None:
180
- # return current_history, char_data, "Failed to load chat history."
181
- #
182
- # # Check if the loaded chat is for the current character
183
- # if char_data and char_data.get('name') != char_name:
184
- # return current_history, char_data, f"Warning: Loaded chat is for character '{char_name}', but current character is '{char_data.get('name')}'. Chat not imported."
185
- #
186
- # # If no character is selected, try to load the character from the chat
187
- # if not char_data:
188
- # new_char_data = load_character(char_name)[0]
189
- # if new_char_data:
190
- # char_data = new_char_data
191
- # else:
192
- # return current_history, char_data, f"Warning: Character '{char_name}' not found. Please select the character manually."
193
- #
194
- # return loaded_history, char_data, f"Chat history for '{char_name}' imported successfully."
195
- #
196
- # def import_character(file):
197
- # card_data = import_character_card(file)
198
- # if card_data:
199
- # from App_Function_Libraries.Chat import save_character
200
- # save_character(card_data)
201
- # return card_data, gr.update(choices=get_character_names())
202
- # else:
203
- # return None, gr.update()
204
- #
205
- # def load_character(name):
206
- # from App_Function_Libraries.Chat import load_characters
207
- # characters = load_characters()
208
- # char_data = characters.get(name)
209
- # if char_data:
210
- # first_message = char_data.get('first_mes', "Hello! I'm ready to chat.")
211
- # return char_data, [(None, first_message)] if first_message else [], None
212
- # return None, [], None
213
- #
214
- # def load_character_image(name):
215
- # from App_Function_Libraries.Chat import load_characters
216
- # characters = load_characters()
217
- # char_data = characters.get(name)
218
- # if char_data and 'image_path' in char_data:
219
- # image_path = char_data['image_path']
220
- # if os.path.exists(image_path):
221
- # return image_path
222
- # else:
223
- # logging.warning(f"Image file not found: {image_path}")
224
- # return None
225
- #
226
- # def load_character_and_image(name):
227
- # char_data, chat_history, _ = load_character(name)
228
- # image_path = load_character_image(name)
229
- # logging.debug(f"Character: {name}")
230
- # logging.debug(f"Character data: {char_data}")
231
- # logging.debug(f"Image path: {image_path}")
232
- # return char_data, chat_history, image_path
233
- #
234
- # def character_chat_wrapper(message, history, char_data, api_endpoint, api_key, temperature, user_name):
235
- # logging.debug("Entered character_chat_wrapper")
236
- # if char_data is None:
237
- # return "Please select a character first.", history
238
- #
239
- # if not user_name:
240
- # user_name = "User"
241
- #
242
- # char_name = char_data.get('name', 'AI Assistant')
243
- #
244
- # # Prepare the character's background information
245
- # char_background = f"""
246
- # Name: {char_name}
247
- # Description: {char_data.get('description', 'N/A')}
248
- # Personality: {char_data.get('personality', 'N/A')}
249
- # Scenario: {char_data.get('scenario', 'N/A')}
250
- # """
251
- #
252
- # # Prepare the system prompt for character impersonation
253
- # system_message = f"""You are roleplaying as {char_name}, the character described below. Respond to the user's messages in character, maintaining the personality and background provided. Do not break character or refer to yourself as an AI. Always refer to yourself as "{char_name}" and refer to the user as "{user_name}".
254
- #
255
- # {char_background}
256
- #
257
- # Additional instructions: {char_data.get('post_history_instructions', '')}
258
- # """
259
- #
260
- # # Prepare media_content and selected_parts
261
- # media_content = {
262
- # 'id': char_name,
263
- # 'title': char_name,
264
- # 'content': char_background,
265
- # 'description': char_data.get('description', ''),
266
- # 'personality': char_data.get('personality', ''),
267
- # 'scenario': char_data.get('scenario', '')
268
- # }
269
- # selected_parts = ['description', 'personality', 'scenario']
270
- #
271
- # prompt = char_data.get('post_history_instructions', '')
272
- #
273
- # # Prepare the input for the chat function
274
- # if not history:
275
- # full_message = f"{prompt}\n\n{user_name}: {message}" if prompt else f"{user_name}: {message}"
276
- # else:
277
- # full_message = f"{user_name}: {message}"
278
- #
279
- # # Call the chat function
280
- # bot_message = chat(
281
- # full_message,
282
- # history,
283
- # media_content,
284
- # selected_parts,
285
- # api_endpoint,
286
- # api_key,
287
- # prompt,
288
- # temperature,
289
- # system_message
290
- # )
291
- #
292
- # # Update history
293
- # history.append((message, bot_message))
294
- # return history
295
- #
296
- # def save_chat_history(history, character_name):
297
- # # Create the Saved_Chats folder if it doesn't exist
298
- # save_directory = "Saved_Chats"
299
- # os.makedirs(save_directory, exist_ok=True)
300
- #
301
- # timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
302
- # filename = f"chat_history_{character_name}_{timestamp}.json"
303
- # filepath = os.path.join(save_directory, filename)
304
- #
305
- # chat_data = {
306
- # "character": character_name,
307
- # "timestamp": timestamp,
308
- # "history": history
309
- # }
310
- #
311
- # try:
312
- # with open(filepath, 'w', encoding='utf-8') as f:
313
- # json.dump(chat_data, f, ensure_ascii=False, indent=2)
314
- # return filepath
315
- # except Exception as e:
316
- # return f"Error saving chat: {str(e)}"
317
- #
318
- # def save_current_chat(history, char_data):
319
- # if not char_data or not history:
320
- # return "No chat to save or character not selected."
321
- #
322
- # character_name = char_data.get('name', 'Unknown')
323
- # result = save_chat_history(history, character_name)
324
- # if result.startswith("Error"):
325
- # return result
326
- # return f"Chat saved successfully as {result}"
327
- #
328
- # def regenerate_last_message(history, char_data, api_name, api_key, temperature, user_name):
329
- # if not history:
330
- # return history
331
- #
332
- # last_user_message = history[-1][0]
333
- # new_history = history[:-1]
334
- #
335
- # return character_chat_wrapper(last_user_message, new_history, char_data, api_name, api_key, temperature,
336
- # user_name)
337
- #
338
- # import_chat_button.click(
339
- # fn=lambda: gr.update(visible=True),
340
- # outputs=chat_file_upload
341
- # )
342
- #
343
- # chat_file_upload.change(
344
- # fn=import_chat_history,
345
- # inputs=[chat_file_upload, chat_history, character_data],
346
- # outputs=[chat_history, character_data, save_status]
347
- # )
348
- #
349
- # def update_character_info(name):
350
- # from App_Function_Libraries.Chat import load_characters
351
- # characters = load_characters()
352
- # char_data = characters.get(name)
353
- #
354
- # image_path = char_data.get('image_path') if char_data else None
355
- #
356
- # logging.debug(f"Character: {name}")
357
- # logging.debug(f"Character data: {char_data}")
358
- # logging.debug(f"Image path: {image_path}")
359
- #
360
- # if image_path:
361
- # if os.path.exists(image_path):
362
- # logging.debug(f"Image file exists at {image_path}")
363
- # if os.access(image_path, os.R_OK):
364
- # logging.debug(f"Image file is readable")
365
- # else:
366
- # logging.warning(f"Image file is not readable: {image_path}")
367
- # image_path = None
368
- # else:
369
- # logging.warning(f"Image file does not exist: {image_path}")
370
- # image_path = None
371
- # else:
372
- # logging.warning("No image path provided for the character")
373
- #
374
- # return char_data, None, image_path # Return None for chat_history
375
- #
376
- # def on_character_select(name):
377
- # logging.debug(f"Character selected: {name}")
378
- # return update_character_info_with_error_handling(name)
379
- #
380
- # def clear_chat_history():
381
- # return [], None # Return empty list for chat_history and None for character_data
382
- #
383
- # def update_character_info_with_error_handling(name):
384
- # logging.debug(f"Entering update_character_info_with_error_handling for character: {name}")
385
- # try:
386
- # char_data, _, image_path = update_character_info(name)
387
- # logging.debug(f"Retrieved data: char_data={bool(char_data)}, image_path={image_path}")
388
- #
389
- # if char_data:
390
- # first_message = char_data.get('first_mes', "Hello! I'm ready to chat.")
391
- # chat_history = [(None, first_message)] if first_message else []
392
- # else:
393
- # chat_history = []
394
- #
395
- # logging.debug(f"Created chat_history with length: {len(chat_history)}")
396
- #
397
- # if image_path and os.path.exists(image_path):
398
- # logging.debug(f"Image file exists at {image_path}")
399
- # return char_data, chat_history, image_path
400
- # else:
401
- # logging.warning(f"Image not found or invalid path: {image_path}")
402
- # return char_data, chat_history, None
403
- # except Exception as e:
404
- # logging.error(f"Error updating character info: {str(e)}", exc_info=True)
405
- # return None, [], None
406
- # finally:
407
- # logging.debug("Exiting update_character_info_with_error_handling")
408
- #
409
- # # Define States for conversation_id and media_content, which are required for saving chat history
410
- # conversation_id = gr.State(str(uuid.uuid4()))
411
- # media_content = gr.State({})
412
- #
413
- # import_card_button.click(
414
- # fn=import_character,
415
- # inputs=[character_card_upload],
416
- # outputs=[character_data, character_dropdown]
417
- # )
418
- #
419
- # load_characters_button.click(
420
- # fn=lambda: gr.update(choices=get_character_names()),
421
- # outputs=character_dropdown
422
- # )
423
- #
424
- # clear_chat_button.click(
425
- # fn=clear_chat_history,
426
- # inputs=[],
427
- # outputs=[chat_history, character_data]
428
- # )
429
- #
430
- # character_dropdown.change(
431
- # fn=on_character_select,
432
- # inputs=[character_dropdown],
433
- # outputs=[character_data, chat_history, character_image]
434
- # )
435
- #
436
- # send_message_button.click(
437
- # fn=character_chat_wrapper,
438
- # inputs=[user_input, chat_history, character_data, api_name_input, api_key_input, temperature_slider,
439
- # user_name_input],
440
- # outputs=[chat_history]
441
- # ).then(lambda: "", outputs=user_input)
442
- #
443
- # regenerate_button.click(
444
- # fn=regenerate_last_message,
445
- # inputs=[chat_history, character_data, api_name_input, api_key_input, temperature_slider, user_name_input],
446
- # outputs=[chat_history]
447
- # )
448
- #
449
- # user_name_input.change(
450
- # fn=lambda name: name,
451
- # inputs=[user_name_input],
452
- # outputs=[user_name]
453
- # )
454
- #
455
- # # FIXME - Implement saving chat history to database; look at Chat_UI.py for reference
456
- # save_chat_history_to_db.click(
457
- # save_chat_history_to_db_wrapper,
458
- # inputs=[chat_history, conversation_id, media_content, chat_media_name],
459
- # outputs=[conversation_id, gr.Textbox(label="Save Status")]
460
- # )
461
- #
462
- # return character_data, chat_history, user_input, user_name, character_image
463
-
464
-
465
  #
466
- # End of Character chat tab
467
  ######################################################################################################################
468
  #
469
  # Multi-Character Chat Interface
470
 
 
471
  def character_interaction_setup():
472
  characters = load_characters()
473
  return characters, [], None, None
 
24
  ########################################################################################################################
25
  #
26
  # Single-Character chat Functions:
27
+ # FIXME - add these functions to the Personas library
28
 
29
  def chat_with_character(user_message, history, char_data, api_name_input, api_key):
30
  if char_data is None:
 
134
  return None, None
135
 
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  #
138
+ # End of X
139
  ######################################################################################################################
140
  #
141
  # Multi-Character Chat Interface
142
 
143
+ # FIXME - refactor and move these functions to the Character_Chat library so that it uses the same functions
144
  def character_interaction_setup():
145
  characters = load_characters()
146
  return characters, [], None, None
App_Function_Libraries/Gradio_UI/Chat_ui.py CHANGED
@@ -167,6 +167,39 @@ def delete_message_from_chat(message_id, history):
167
  return updated_history
168
 
169
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  def create_chat_interface():
171
  custom_css = """
172
  .chatbot-container .message-wrap .message {
@@ -231,6 +264,7 @@ def create_chat_interface():
231
  chatbot = gr.Chatbot(height=600, elem_classes="chatbot-container")
232
  msg = gr.Textbox(label="Enter your message")
233
  submit = gr.Button("Submit")
 
234
  clear_chat_button = gr.Button("Clear Chat")
235
 
236
  edit_message_id = gr.Number(label="Message ID to Edit", visible=False)
@@ -367,6 +401,12 @@ def create_chat_interface():
367
  outputs=[conversation_id, gr.Textbox(label="Save Status")]
368
  )
369
 
 
 
 
 
 
 
370
  chatbot.select(show_edit_message, None, [edit_message_text, edit_message_id, update_message_button])
371
  chatbot.select(show_delete_message, None, [delete_message_id, delete_message_button])
372
 
@@ -430,6 +470,7 @@ def create_chat_interface_stacked():
430
  with gr.Row():
431
  with gr.Column():
432
  submit = gr.Button("Submit")
 
433
  clear_chat_button = gr.Button("Clear Chat")
434
  chat_media_name = gr.Textbox(label="Custom Chat Name(optional)", visible=True)
435
  save_chat_history_to_db = gr.Button("Save Chat History to DataBase")
@@ -521,6 +562,12 @@ def create_chat_interface_stacked():
521
  outputs=[conversation_id, gr.Textbox(label="Save Status")]
522
  )
523
 
 
 
 
 
 
 
524
 
525
  # FIXME - System prompts
526
  def create_chat_interface_multi_api():
@@ -534,7 +581,7 @@ def create_chat_interface_multi_api():
534
  }
535
  """
536
  with gr.TabItem("One Prompt - Multiple APIs"):
537
- gr.Markdown("# One Prompt but Multiple API Chat Interface")
538
 
539
  with gr.Row():
540
  with gr.Column(scale=1):
@@ -551,40 +598,36 @@ def create_chat_interface_multi_api():
551
  with gr.Column():
552
  preset_prompt = gr.Dropdown(label="Select Preset Prompt", choices=load_preset_prompts(), visible=True)
553
  system_prompt = gr.Textbox(label="System Prompt", value="You are a helpful AI assistant.", lines=5)
554
- user_prompt = gr.Textbox(label="Modify Prompt", lines=5, value=".")
555
 
556
  with gr.Row():
557
  chatbots = []
558
  api_endpoints = []
559
  api_keys = []
560
  temperatures = []
 
561
  for i in range(3):
562
  with gr.Column():
563
  gr.Markdown(f"### Chat Window {i + 1}")
564
  api_endpoint = gr.Dropdown(label=f"API Endpoint {i + 1}",
565
  choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq",
566
  "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp", "Kobold",
567
- "Ooba",
568
- "Tabbyapi", "VLLM", "ollama", "HuggingFace"])
569
  api_key = gr.Textbox(label=f"API Key {i + 1} (if required)", type="password")
570
  temperature = gr.Slider(label=f"Temperature {i + 1}", minimum=0.0, maximum=1.0, step=0.05,
571
  value=0.7)
572
  chatbot = gr.Chatbot(height=800, elem_classes="chat-window")
 
573
  chatbots.append(chatbot)
574
  api_endpoints.append(api_endpoint)
575
  api_keys.append(api_key)
576
  temperatures.append(temperature)
 
577
 
578
  with gr.Row():
579
  msg = gr.Textbox(label="Enter your message", scale=4)
580
  submit = gr.Button("Submit", scale=1)
581
- # FIXME - clear chat
582
- # clear_chat_button = gr.Button("Clear Chat")
583
- #
584
- # clear_chat_button.click(
585
- # clear_chat,
586
- # outputs=[chatbot]
587
- # )
588
 
589
  # State variables
590
  chat_history = [gr.State([]) for _ in range(3)]
@@ -601,6 +644,14 @@ def create_chat_interface_multi_api():
601
 
602
  preset_prompt.change(update_user_prompt, inputs=preset_prompt, outputs=user_prompt)
603
 
 
 
 
 
 
 
 
 
604
  def chat_wrapper_multi(message, custom_prompt, system_prompt, *args):
605
  chat_histories = args[:3]
606
  chatbots = args[3:6]
@@ -630,6 +681,46 @@ def create_chat_interface_multi_api():
630
 
631
  return [gr.update(value="")] + new_chatbots + new_chat_histories
632
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
633
  # In the create_chat_interface_multi_api function:
634
  submit.click(
635
  chat_wrapper_multi,
@@ -656,6 +747,7 @@ def create_chat_interface_multi_api():
656
  )
657
 
658
 
 
659
  def create_chat_interface_four():
660
  custom_css = """
661
  .chatbot-container .message-wrap .message {
@@ -687,16 +779,6 @@ def create_chat_interface_four():
687
  chat_interfaces = []
688
 
689
  def create_single_chat_interface(index, user_prompt_component):
690
- """
691
- Creates a single chat interface with its own set of components and event bindings.
692
-
693
- Parameters:
694
- index (int): The index of the chat interface.
695
- user_prompt_component (gr.Textbox): The user prompt textbox component.
696
-
697
- Returns:
698
- dict: A dictionary containing all components of the chat interface.
699
- """
700
  with gr.Column():
701
  gr.Markdown(f"### Chat Window {index + 1}")
702
  api_endpoint = gr.Dropdown(
@@ -721,6 +803,7 @@ def create_chat_interface_four():
721
  chatbot = gr.Chatbot(height=400, elem_classes="chat-window")
722
  msg = gr.Textbox(label=f"Enter your message for Chat {index + 1}")
723
  submit = gr.Button(f"Submit to Chat {index + 1}")
 
724
  clear_chat_button = gr.Button(f"Clear Chat {index + 1}")
725
 
726
  # State to maintain chat history
@@ -734,14 +817,11 @@ def create_chat_interface_four():
734
  'chatbot': chatbot,
735
  'msg': msg,
736
  'submit': submit,
 
737
  'clear_chat_button': clear_chat_button,
738
  'chat_history': chat_history
739
  })
740
 
741
- # # Create four chat interfaces
742
- # for i in range(4):
743
- # create_single_chat_interface(i, user_prompt)
744
-
745
  # Create four chat interfaces arranged in a 2x2 grid
746
  with gr.Row():
747
  for i in range(2):
@@ -749,7 +829,6 @@ def create_chat_interface_four():
749
  for j in range(2):
750
  create_single_chat_interface(i * 2 + j, user_prompt)
751
 
752
-
753
  # Update user_prompt based on preset_prompt selection
754
  preset_prompt.change(
755
  fn=update_user_prompt,
@@ -760,7 +839,6 @@ def create_chat_interface_four():
760
  def chat_wrapper_single(message, chat_history, api_endpoint, api_key, temperature, user_prompt):
761
  logging.debug(f"Chat Wrapper Single - Message: {message}, Chat History: {chat_history}")
762
 
763
- # Call chat_wrapper with the new signature and the additional parameters
764
  new_msg, new_history, _ = chat_wrapper(
765
  message,
766
  chat_history,
@@ -773,13 +851,12 @@ def create_chat_interface_four():
773
  False, # save_conversation
774
  temperature, # temperature
775
  system_prompt="", # system_prompt
776
- max_tokens=None, # Additional parameters with default None values
777
  top_p=None,
778
  frequency_penalty=None,
779
  presence_penalty=None,
780
  stop_sequence=None
781
  )
782
- # Only append to history if the new message was successful (i.e., no error in API response)
783
  if "API request failed" not in new_msg:
784
  chat_history.append((message, new_msg))
785
  else:
@@ -787,9 +864,40 @@ def create_chat_interface_four():
787
 
788
  return "", chat_history, chat_history
789
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
790
  # Attach click events for each chat interface
791
  for interface in chat_interfaces:
792
- logging.debug(f"Chat Interface - Clicked Submit for Chat {interface['chatbot']}"),
793
  interface['submit'].click(
794
  chat_wrapper_single,
795
  inputs=[
@@ -807,7 +915,22 @@ def create_chat_interface_four():
807
  ]
808
  )
809
 
810
- # Bind the clear chat button
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
811
  interface['clear_chat_button'].click(
812
  clear_chat_single,
813
  inputs=[],
 
167
  return updated_history
168
 
169
 
170
+ def regenerate_last_message(history, media_content, selected_parts, api_endpoint, api_key, custom_prompt, temperature, system_prompt):
171
+ if not history:
172
+ return history, "No messages to regenerate."
173
+
174
+ last_entry = history[-1]
175
+ last_user_message, last_bot_message = last_entry
176
+
177
+ if last_bot_message is None:
178
+ return history, "The last message is not from the bot."
179
+
180
+ new_history = history[:-1]
181
+
182
+ if not last_user_message:
183
+ return new_history, "No user message to regenerate the bot response."
184
+
185
+ full_message = last_user_message
186
+
187
+ bot_message = chat(
188
+ full_message,
189
+ new_history,
190
+ media_content,
191
+ selected_parts,
192
+ api_endpoint,
193
+ api_key,
194
+ custom_prompt,
195
+ temperature,
196
+ system_prompt
197
+ )
198
+
199
+ new_history.append((last_user_message, bot_message))
200
+
201
+ return new_history, "Last message regenerated successfully."
202
+
203
  def create_chat_interface():
204
  custom_css = """
205
  .chatbot-container .message-wrap .message {
 
264
  chatbot = gr.Chatbot(height=600, elem_classes="chatbot-container")
265
  msg = gr.Textbox(label="Enter your message")
266
  submit = gr.Button("Submit")
267
+ regenerate_button = gr.Button("Regenerate Last Message")
268
  clear_chat_button = gr.Button("Clear Chat")
269
 
270
  edit_message_id = gr.Number(label="Message ID to Edit", visible=False)
 
401
  outputs=[conversation_id, gr.Textbox(label="Save Status")]
402
  )
403
 
404
+ regenerate_button.click(
405
+ regenerate_last_message,
406
+ inputs=[chatbot, media_content, selected_parts, api_endpoint, api_key, user_prompt, temperature, system_prompt_input],
407
+ outputs=[chatbot, save_status]
408
+ )
409
+
410
  chatbot.select(show_edit_message, None, [edit_message_text, edit_message_id, update_message_button])
411
  chatbot.select(show_delete_message, None, [delete_message_id, delete_message_button])
412
 
 
470
  with gr.Row():
471
  with gr.Column():
472
  submit = gr.Button("Submit")
473
+ regenerate_button = gr.Button("Regenerate Last Message")
474
  clear_chat_button = gr.Button("Clear Chat")
475
  chat_media_name = gr.Textbox(label="Custom Chat Name(optional)", visible=True)
476
  save_chat_history_to_db = gr.Button("Save Chat History to DataBase")
 
562
  outputs=[conversation_id, gr.Textbox(label="Save Status")]
563
  )
564
 
565
+ regenerate_button.click(
566
+ regenerate_last_message,
567
+ inputs=[chatbot, media_content, selected_parts, api_endpoint, api_key, user_prompt, temp, system_prompt],
568
+ outputs=[chatbot, gr.Textbox(label="Regenerate Status")]
569
+ )
570
+
571
 
572
  # FIXME - System prompts
573
  def create_chat_interface_multi_api():
 
581
  }
582
  """
583
  with gr.TabItem("One Prompt - Multiple APIs"):
584
+ gr.Markdown("# One Prompt but Multiple APIs Chat Interface")
585
 
586
  with gr.Row():
587
  with gr.Column(scale=1):
 
598
  with gr.Column():
599
  preset_prompt = gr.Dropdown(label="Select Preset Prompt", choices=load_preset_prompts(), visible=True)
600
  system_prompt = gr.Textbox(label="System Prompt", value="You are a helpful AI assistant.", lines=5)
601
+ user_prompt = gr.Textbox(label="Modify Prompt (Prefixed to your message every time)", lines=5, value="", visible=True)
602
 
603
  with gr.Row():
604
  chatbots = []
605
  api_endpoints = []
606
  api_keys = []
607
  temperatures = []
608
+ regenerate_buttons = []
609
  for i in range(3):
610
  with gr.Column():
611
  gr.Markdown(f"### Chat Window {i + 1}")
612
  api_endpoint = gr.Dropdown(label=f"API Endpoint {i + 1}",
613
  choices=["Local-LLM", "OpenAI", "Anthropic", "Cohere", "Groq",
614
  "DeepSeek", "Mistral", "OpenRouter", "Llama.cpp", "Kobold",
615
+ "Ooba", "Tabbyapi", "VLLM", "ollama", "HuggingFace"])
 
616
  api_key = gr.Textbox(label=f"API Key {i + 1} (if required)", type="password")
617
  temperature = gr.Slider(label=f"Temperature {i + 1}", minimum=0.0, maximum=1.0, step=0.05,
618
  value=0.7)
619
  chatbot = gr.Chatbot(height=800, elem_classes="chat-window")
620
+ regenerate_button = gr.Button(f"Regenerate Last Message {i + 1}")
621
  chatbots.append(chatbot)
622
  api_endpoints.append(api_endpoint)
623
  api_keys.append(api_key)
624
  temperatures.append(temperature)
625
+ regenerate_buttons.append(regenerate_button)
626
 
627
  with gr.Row():
628
  msg = gr.Textbox(label="Enter your message", scale=4)
629
  submit = gr.Button("Submit", scale=1)
630
+ clear_chat_button = gr.Button("Clear All Chats")
 
 
 
 
 
 
631
 
632
  # State variables
633
  chat_history = [gr.State([]) for _ in range(3)]
 
644
 
645
  preset_prompt.change(update_user_prompt, inputs=preset_prompt, outputs=user_prompt)
646
 
647
+
648
+ def clear_all_chats():
649
+ return [[]] * 3 + [[]] * 3
650
+
651
+ clear_chat_button.click(
652
+ clear_all_chats,
653
+ outputs=chatbots + chat_history
654
+ )
655
  def chat_wrapper_multi(message, custom_prompt, system_prompt, *args):
656
  chat_histories = args[:3]
657
  chatbots = args[3:6]
 
681
 
682
  return [gr.update(value="")] + new_chatbots + new_chat_histories
683
 
684
+
685
+ def regenerate_last_message(chat_history, chatbot, media_content, selected_parts, api_endpoint, api_key, custom_prompt, temperature, system_prompt):
686
+ if not chat_history:
687
+ return chatbot, chat_history, "No messages to regenerate."
688
+
689
+ last_entry = chat_history[-1]
690
+ last_user_message, last_bot_message = last_entry
691
+
692
+ if last_bot_message is None:
693
+ return chatbot, chat_history, "The last message is not from the bot."
694
+
695
+ new_history = chat_history[:-1]
696
+
697
+ if not last_user_message:
698
+ return chatbot[:-1], new_history, "No user message to regenerate the bot response."
699
+
700
+ bot_message = chat(
701
+ last_user_message,
702
+ new_history,
703
+ media_content,
704
+ selected_parts,
705
+ api_endpoint,
706
+ api_key,
707
+ custom_prompt,
708
+ temperature,
709
+ system_prompt
710
+ )
711
+
712
+ new_history.append((last_user_message, bot_message))
713
+ new_chatbot = chatbot[:-1] + [(last_user_message, bot_message)]
714
+
715
+ return new_chatbot, new_history, "Last message regenerated successfully."
716
+
717
+ for i in range(3):
718
+ regenerate_buttons[i].click(
719
+ regenerate_last_message,
720
+ inputs=[chat_history[i], chatbots[i], media_content, selected_parts, api_endpoints[i], api_keys[i], user_prompt, temperatures[i], system_prompt],
721
+ outputs=[chatbots[i], chat_history[i], gr.Textbox(label=f"Regenerate Status {i + 1}")]
722
+ )
723
+
724
  # In the create_chat_interface_multi_api function:
725
  submit.click(
726
  chat_wrapper_multi,
 
747
  )
748
 
749
 
750
+
751
  def create_chat_interface_four():
752
  custom_css = """
753
  .chatbot-container .message-wrap .message {
 
779
  chat_interfaces = []
780
 
781
  def create_single_chat_interface(index, user_prompt_component):
 
 
 
 
 
 
 
 
 
 
782
  with gr.Column():
783
  gr.Markdown(f"### Chat Window {index + 1}")
784
  api_endpoint = gr.Dropdown(
 
803
  chatbot = gr.Chatbot(height=400, elem_classes="chat-window")
804
  msg = gr.Textbox(label=f"Enter your message for Chat {index + 1}")
805
  submit = gr.Button(f"Submit to Chat {index + 1}")
806
+ regenerate_button = gr.Button(f"Regenerate Last Message {index + 1}")
807
  clear_chat_button = gr.Button(f"Clear Chat {index + 1}")
808
 
809
  # State to maintain chat history
 
817
  'chatbot': chatbot,
818
  'msg': msg,
819
  'submit': submit,
820
+ 'regenerate_button': regenerate_button,
821
  'clear_chat_button': clear_chat_button,
822
  'chat_history': chat_history
823
  })
824
 
 
 
 
 
825
  # Create four chat interfaces arranged in a 2x2 grid
826
  with gr.Row():
827
  for i in range(2):
 
829
  for j in range(2):
830
  create_single_chat_interface(i * 2 + j, user_prompt)
831
 
 
832
  # Update user_prompt based on preset_prompt selection
833
  preset_prompt.change(
834
  fn=update_user_prompt,
 
839
  def chat_wrapper_single(message, chat_history, api_endpoint, api_key, temperature, user_prompt):
840
  logging.debug(f"Chat Wrapper Single - Message: {message}, Chat History: {chat_history}")
841
 
 
842
  new_msg, new_history, _ = chat_wrapper(
843
  message,
844
  chat_history,
 
851
  False, # save_conversation
852
  temperature, # temperature
853
  system_prompt="", # system_prompt
854
+ max_tokens=None,
855
  top_p=None,
856
  frequency_penalty=None,
857
  presence_penalty=None,
858
  stop_sequence=None
859
  )
 
860
  if "API request failed" not in new_msg:
861
  chat_history.append((message, new_msg))
862
  else:
 
864
 
865
  return "", chat_history, chat_history
866
 
867
+ def regenerate_last_message(chat_history, api_endpoint, api_key, temperature, user_prompt):
868
+ if not chat_history:
869
+ return chat_history, chat_history, "No messages to regenerate."
870
+
871
+ last_user_message, _ = chat_history[-1]
872
+
873
+ new_msg, new_history, _ = chat_wrapper(
874
+ last_user_message,
875
+ chat_history[:-1],
876
+ {}, # Empty media_content
877
+ [], # Empty selected_parts
878
+ api_endpoint,
879
+ api_key,
880
+ user_prompt, # custom_prompt
881
+ None, # conversation_id
882
+ False, # save_conversation
883
+ temperature, # temperature
884
+ system_prompt="", # system_prompt
885
+ max_tokens=None,
886
+ top_p=None,
887
+ frequency_penalty=None,
888
+ presence_penalty=None,
889
+ stop_sequence=None
890
+ )
891
+
892
+ if "API request failed" not in new_msg:
893
+ new_history.append((last_user_message, new_msg))
894
+ return new_history, new_history, "Last message regenerated successfully."
895
+ else:
896
+ logging.error(f"API request failed during regeneration: {new_msg}")
897
+ return chat_history, chat_history, f"Failed to regenerate: {new_msg}"
898
+
899
  # Attach click events for each chat interface
900
  for interface in chat_interfaces:
 
901
  interface['submit'].click(
902
  chat_wrapper_single,
903
  inputs=[
 
915
  ]
916
  )
917
 
918
+ interface['regenerate_button'].click(
919
+ regenerate_last_message,
920
+ inputs=[
921
+ interface['chat_history'],
922
+ interface['api_endpoint'],
923
+ interface['api_key'],
924
+ interface['temperature'],
925
+ user_prompt
926
+ ],
927
+ outputs=[
928
+ interface['chatbot'],
929
+ interface['chat_history'],
930
+ gr.Textbox(label="Regenerate Status")
931
+ ]
932
+ )
933
+
934
  interface['clear_chat_button'].click(
935
  clear_chat_single,
936
  inputs=[],
App_Function_Libraries/Gradio_UI/Embeddings_tab.py CHANGED
@@ -7,23 +7,20 @@ import logging
7
  #
8
  # External Imports
9
  import gradio as gr
 
10
  from tqdm import tqdm
11
-
12
- from App_Function_Libraries.Chunk_Lib import improved_chunking_process, chunk_for_embedding
13
  #
14
  # Local Imports
15
  from App_Function_Libraries.DB.DB_Manager import get_all_content_from_database
16
  from App_Function_Libraries.RAG.ChromaDB_Library import chroma_client, \
17
  store_in_chroma, situate_context
18
  from App_Function_Libraries.RAG.Embeddings_Create import create_embedding, create_embeddings_batch
19
-
20
-
21
  #
22
  ########################################################################################################################
23
  #
24
  # Functions:
25
 
26
- # FIXME - under construction
27
  def create_embeddings_tab():
28
  with gr.TabItem("Create Embeddings"):
29
  gr.Markdown("# Create Embeddings for All Content")
@@ -36,13 +33,36 @@ def create_embeddings_tab():
36
  value="huggingface"
37
  )
38
  gr.Markdown("Note: Local provider requires a running Llama.cpp/llamafile server.")
39
- gr.Markdown("OpenAI provider requires a valid API key. ")
40
- gr.Markdown("OpenAI Embeddings models: `text-embedding-3-small`, `text-embedding-3-large`")
41
- gr.Markdown("HuggingFace provider requires a valid model name, i.e. `dunzhang/stella_en_400M_v5`")
42
- embedding_model = gr.Textbox(
43
- label="Embedding Model",
44
- value="Enter your embedding model name here", lines=3
 
 
 
 
 
 
45
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  embedding_api_url = gr.Textbox(
47
  label="API URL (for local provider)",
48
  value="http://localhost:8080/embedding",
@@ -74,15 +94,32 @@ def create_embeddings_tab():
74
  status_output = gr.Textbox(label="Status", lines=10)
75
 
76
  def update_provider_options(provider):
77
- return gr.update(visible=provider == "local")
 
 
 
 
 
 
 
 
 
 
 
78
 
79
  embedding_provider.change(
80
  fn=update_provider_options,
81
  inputs=[embedding_provider],
82
- outputs=[embedding_api_url]
 
 
 
 
 
 
83
  )
84
 
85
- def create_all_embeddings(provider, model, api_url, method, max_size, overlap, adaptive):
86
  try:
87
  all_content = get_all_content_from_database()
88
  if not all_content:
@@ -98,6 +135,14 @@ def create_embeddings_tab():
98
  collection_name = "all_content_embeddings"
99
  collection = chroma_client.get_or_create_collection(name=collection_name)
100
 
 
 
 
 
 
 
 
 
101
  for item in all_content:
102
  media_id = item['id']
103
  text = item['content']
@@ -133,7 +178,7 @@ def create_embeddings_tab():
133
 
134
  create_button.click(
135
  fn=create_all_embeddings,
136
- inputs=[embedding_provider, embedding_model, embedding_api_url,
137
  chunking_method, max_chunk_size, chunk_overlap, adaptive_chunking],
138
  outputs=status_output
139
  )
@@ -159,13 +204,36 @@ def create_view_embeddings_tab():
159
  value="huggingface"
160
  )
161
  gr.Markdown("Note: Local provider requires a running Llama.cpp/llamafile server.")
162
- gr.Markdown("OpenAI provider requires a valid API key. ")
163
- gr.Markdown("OpenAI Embeddings models: `text-embedding-3-small`, `text-embedding-3-large`")
164
- gr.Markdown("HuggingFace provider requires a valid model name, i.e. `dunzhang/stella_en_400M_v5`")
165
- embedding_model = gr.Textbox(
166
- label="Embedding Model",
167
- value="Enter your embedding model name here", lines=3
 
 
 
 
 
 
168
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  embedding_api_url = gr.Textbox(
170
  label="API URL (for local provider)",
171
  value="http://localhost:8080/embedding",
@@ -222,7 +290,18 @@ def create_view_embeddings_tab():
222
  return gr.update(choices=["Error: Unable to fetch items"]), {}
223
 
224
  def update_provider_options(provider):
225
- return gr.update(visible=provider == "local")
 
 
 
 
 
 
 
 
 
 
 
226
 
227
  def check_embedding_status(selected_item, item_mapping):
228
  if not selected_item:
@@ -255,7 +334,8 @@ def create_view_embeddings_tab():
255
  logging.error(f"Error in check_embedding_status: {str(e)}")
256
  return f"Error processing item: {selected_item}. Details: {str(e)}", "", ""
257
 
258
- def create_new_embedding_for_item(selected_item, provider, model, api_url, method, max_size, overlap, adaptive,
 
259
  item_mapping, use_contextual, contextual_api_choice=None):
260
  if not selected_item:
261
  return "Please select an item", "", ""
@@ -290,13 +370,10 @@ def create_view_embeddings_tab():
290
  texts, ids, metadatas = [], [], []
291
  chunk_count = 0
292
  logging.info("Generating contextual summaries and preparing chunks for embedding")
293
- for i, chunk in tqdm(enumerate(chunks), total=len(chunks), desc="Processing chunks"):
294
  chunk_text = chunk['text']
295
  chunk_metadata = chunk['metadata']
296
- if chunk_count == 0:
297
- chunk_count = 1
298
  if use_contextual:
299
- # Generate contextual summary
300
  logging.debug(f"Generating contextual summary for chunk {chunk_count}")
301
  context = situate_context(contextual_api_choice, item['content'], chunk_text)
302
  contextualized_text = f"{chunk_text}\n\nContextual Summary: {context}"
@@ -305,6 +382,15 @@ def create_view_embeddings_tab():
305
  context = None
306
 
307
  chunk_id = f"doc_{item_id}_chunk_{i}"
 
 
 
 
 
 
 
 
 
308
  metadata = {
309
  "media_id": str(item_id),
310
  "chunk_index": i,
@@ -324,7 +410,7 @@ def create_view_embeddings_tab():
324
  texts.append(contextualized_text)
325
  ids.append(chunk_id)
326
  metadatas.append(metadata)
327
- chunk_count = chunk_count+1
328
 
329
  # Create embeddings in batch
330
  logging.info(f"Creating embeddings for {len(texts)} chunks")
@@ -334,7 +420,12 @@ def create_view_embeddings_tab():
334
  store_in_chroma(collection_name, texts, embeddings, ids, metadatas)
335
 
336
  # Create a preview of the first embedding
337
- embedding_preview = str(embeddings[0][:50]) if embeddings else "No embeddings created"
 
 
 
 
 
338
 
339
  # Return status message
340
  status = f"New embeddings created and stored for item: {item['title']} (ID: {item_id})"
@@ -344,9 +435,10 @@ def create_view_embeddings_tab():
344
  status += " (with contextual summaries)"
345
 
346
  # Return status message, embedding preview, and metadata
347
- return status, f"First 50 elements of new embedding:\n{embedding_preview}", json.dumps(metadatas[0], indent=2)
 
348
  except Exception as e:
349
- logging.error(f"Error in create_new_embedding_for_item: {str(e)}")
350
  return f"Error creating embedding: {str(e)}", "", ""
351
 
352
  refresh_button.click(
@@ -360,7 +452,7 @@ def create_view_embeddings_tab():
360
  )
361
  create_new_embedding_button.click(
362
  create_new_embedding_for_item,
363
- inputs=[item_dropdown, embedding_provider, embedding_model, embedding_api_url,
364
  chunking_method, max_chunk_size, chunk_overlap, adaptive_chunking, item_mapping,
365
  use_contextual_embeddings, contextual_api_choice],
366
  outputs=[embedding_status, embedding_preview, embedding_metadata]
@@ -368,11 +460,16 @@ def create_view_embeddings_tab():
368
  embedding_provider.change(
369
  update_provider_options,
370
  inputs=[embedding_provider],
371
- outputs=[embedding_api_url]
 
 
 
 
 
372
  )
373
 
374
  return (item_dropdown, refresh_button, embedding_status, embedding_preview, embedding_metadata,
375
- create_new_embedding_button, embedding_provider, embedding_model, embedding_api_url,
376
  chunking_method, max_chunk_size, chunk_overlap, adaptive_chunking,
377
  use_contextual_embeddings, contextual_api_choice, contextual_api_key)
378
 
@@ -389,9 +486,11 @@ def create_purge_embeddings_tab():
389
 
390
  def purge_all_embeddings():
391
  try:
 
392
  collection_name = "all_content_embeddings"
393
  chroma_client.delete_collection(collection_name)
394
  chroma_client.create_collection(collection_name)
 
395
  return "All embeddings have been purged successfully."
396
  except Exception as e:
397
  logging.error(f"Error during embedding purge: {str(e)}")
 
7
  #
8
  # External Imports
9
  import gradio as gr
10
+ import numpy as np
11
  from tqdm import tqdm
 
 
12
  #
13
  # Local Imports
14
  from App_Function_Libraries.DB.DB_Manager import get_all_content_from_database
15
  from App_Function_Libraries.RAG.ChromaDB_Library import chroma_client, \
16
  store_in_chroma, situate_context
17
  from App_Function_Libraries.RAG.Embeddings_Create import create_embedding, create_embeddings_batch
18
+ from App_Function_Libraries.Chunk_Lib import improved_chunking_process, chunk_for_embedding
 
19
  #
20
  ########################################################################################################################
21
  #
22
  # Functions:
23
 
 
24
  def create_embeddings_tab():
25
  with gr.TabItem("Create Embeddings"):
26
  gr.Markdown("# Create Embeddings for All Content")
 
33
  value="huggingface"
34
  )
35
  gr.Markdown("Note: Local provider requires a running Llama.cpp/llamafile server.")
36
+ gr.Markdown("OpenAI provider requires a valid API key.")
37
+
38
+ huggingface_model = gr.Dropdown(
39
+ choices=[
40
+ "jinaai/jina-embeddings-v3",
41
+ "Alibaba-NLP/gte-large-en-v1.5",
42
+ "dunzhang/setll_en_400M_v5",
43
+ "custom"
44
+ ],
45
+ label="Hugging Face Model",
46
+ value="jinaai/jina-embeddings-v3",
47
+ visible=True
48
  )
49
+
50
+ openai_model = gr.Dropdown(
51
+ choices=[
52
+ "text-embedding-3-small",
53
+ "text-embedding-3-large"
54
+ ],
55
+ label="OpenAI Embedding Model",
56
+ value="text-embedding-3-small",
57
+ visible=False
58
+ )
59
+
60
+ custom_embedding_model = gr.Textbox(
61
+ label="Custom Embedding Model",
62
+ placeholder="Enter your custom embedding model name here",
63
+ visible=False
64
+ )
65
+
66
  embedding_api_url = gr.Textbox(
67
  label="API URL (for local provider)",
68
  value="http://localhost:8080/embedding",
 
94
  status_output = gr.Textbox(label="Status", lines=10)
95
 
96
  def update_provider_options(provider):
97
+ if provider == "huggingface":
98
+ return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
99
+ elif provider == "local":
100
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
101
+ else: # OpenAI
102
+ return gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
103
+
104
+ def update_huggingface_options(model):
105
+ if model == "custom":
106
+ return gr.update(visible=True)
107
+ else:
108
+ return gr.update(visible=False)
109
 
110
  embedding_provider.change(
111
  fn=update_provider_options,
112
  inputs=[embedding_provider],
113
+ outputs=[huggingface_model, openai_model, custom_embedding_model, embedding_api_url]
114
+ )
115
+
116
+ huggingface_model.change(
117
+ fn=update_huggingface_options,
118
+ inputs=[huggingface_model],
119
+ outputs=[custom_embedding_model]
120
  )
121
 
122
+ def create_all_embeddings(provider, hf_model, openai_model, custom_model, api_url, method, max_size, overlap, adaptive):
123
  try:
124
  all_content = get_all_content_from_database()
125
  if not all_content:
 
135
  collection_name = "all_content_embeddings"
136
  collection = chroma_client.get_or_create_collection(name=collection_name)
137
 
138
+ # Determine the model to use
139
+ if provider == "huggingface":
140
+ model = custom_model if hf_model == "custom" else hf_model
141
+ elif provider == "openai":
142
+ model = openai_model
143
+ else:
144
+ model = custom_model
145
+
146
  for item in all_content:
147
  media_id = item['id']
148
  text = item['content']
 
178
 
179
  create_button.click(
180
  fn=create_all_embeddings,
181
+ inputs=[embedding_provider, huggingface_model, openai_model, custom_embedding_model, embedding_api_url,
182
  chunking_method, max_chunk_size, chunk_overlap, adaptive_chunking],
183
  outputs=status_output
184
  )
 
204
  value="huggingface"
205
  )
206
  gr.Markdown("Note: Local provider requires a running Llama.cpp/llamafile server.")
207
+ gr.Markdown("OpenAI provider requires a valid API key.")
208
+
209
+ huggingface_model = gr.Dropdown(
210
+ choices=[
211
+ "jinaai/jina-embeddings-v3",
212
+ "Alibaba-NLP/gte-large-en-v1.5",
213
+ "dunzhang/stella_en_400M_v5",
214
+ "custom"
215
+ ],
216
+ label="Hugging Face Model",
217
+ value="jinaai/jina-embeddings-v3",
218
+ visible=True
219
  )
220
+
221
+ openai_model = gr.Dropdown(
222
+ choices=[
223
+ "text-embedding-3-small",
224
+ "text-embedding-3-large"
225
+ ],
226
+ label="OpenAI Embedding Model",
227
+ value="text-embedding-3-small",
228
+ visible=False
229
+ )
230
+
231
+ custom_embedding_model = gr.Textbox(
232
+ label="Custom Embedding Model",
233
+ placeholder="Enter your custom embedding model name here",
234
+ visible=False
235
+ )
236
+
237
  embedding_api_url = gr.Textbox(
238
  label="API URL (for local provider)",
239
  value="http://localhost:8080/embedding",
 
290
  return gr.update(choices=["Error: Unable to fetch items"]), {}
291
 
292
  def update_provider_options(provider):
293
+ if provider == "huggingface":
294
+ return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
295
+ elif provider == "local":
296
+ return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
297
+ else: # OpenAI
298
+ return gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
299
+
300
+ def update_huggingface_options(model):
301
+ if model == "custom":
302
+ return gr.update(visible=True)
303
+ else:
304
+ return gr.update(visible=False)
305
 
306
  def check_embedding_status(selected_item, item_mapping):
307
  if not selected_item:
 
334
  logging.error(f"Error in check_embedding_status: {str(e)}")
335
  return f"Error processing item: {selected_item}. Details: {str(e)}", "", ""
336
 
337
+ def create_new_embedding_for_item(selected_item, provider, hf_model, openai_model, custom_model, api_url,
338
+ method, max_size, overlap, adaptive,
339
  item_mapping, use_contextual, contextual_api_choice=None):
340
  if not selected_item:
341
  return "Please select an item", "", ""
 
370
  texts, ids, metadatas = [], [], []
371
  chunk_count = 0
372
  logging.info("Generating contextual summaries and preparing chunks for embedding")
373
+ for i, chunk in enumerate(chunks):
374
  chunk_text = chunk['text']
375
  chunk_metadata = chunk['metadata']
 
 
376
  if use_contextual:
 
377
  logging.debug(f"Generating contextual summary for chunk {chunk_count}")
378
  context = situate_context(contextual_api_choice, item['content'], chunk_text)
379
  contextualized_text = f"{chunk_text}\n\nContextual Summary: {context}"
 
382
  context = None
383
 
384
  chunk_id = f"doc_{item_id}_chunk_{i}"
385
+
386
+ # Determine the model to use
387
+ if provider == "huggingface":
388
+ model = custom_model if hf_model == "custom" else hf_model
389
+ elif provider == "openai":
390
+ model = openai_model
391
+ else:
392
+ model = custom_model
393
+
394
  metadata = {
395
  "media_id": str(item_id),
396
  "chunk_index": i,
 
410
  texts.append(contextualized_text)
411
  ids.append(chunk_id)
412
  metadatas.append(metadata)
413
+ chunk_count += 1
414
 
415
  # Create embeddings in batch
416
  logging.info(f"Creating embeddings for {len(texts)} chunks")
 
420
  store_in_chroma(collection_name, texts, embeddings, ids, metadatas)
421
 
422
  # Create a preview of the first embedding
423
+ if isinstance(embeddings, np.ndarray) and embeddings.size > 0:
424
+ embedding_preview = str(embeddings[0][:50])
425
+ elif isinstance(embeddings, list) and len(embeddings) > 0:
426
+ embedding_preview = str(embeddings[0][:50])
427
+ else:
428
+ embedding_preview = "No embeddings created"
429
 
430
  # Return status message
431
  status = f"New embeddings created and stored for item: {item['title']} (ID: {item_id})"
 
435
  status += " (with contextual summaries)"
436
 
437
  # Return status message, embedding preview, and metadata
438
+ return status, f"First 50 elements of new embedding:\n{embedding_preview}", json.dumps(metadatas[0],
439
+ indent=2)
440
  except Exception as e:
441
+ logging.error(f"Error in create_new_embedding_for_item: {str(e)}", exc_info=True)
442
  return f"Error creating embedding: {str(e)}", "", ""
443
 
444
  refresh_button.click(
 
452
  )
453
  create_new_embedding_button.click(
454
  create_new_embedding_for_item,
455
+ inputs=[item_dropdown, embedding_provider, huggingface_model, openai_model, custom_embedding_model, embedding_api_url,
456
  chunking_method, max_chunk_size, chunk_overlap, adaptive_chunking, item_mapping,
457
  use_contextual_embeddings, contextual_api_choice],
458
  outputs=[embedding_status, embedding_preview, embedding_metadata]
 
460
  embedding_provider.change(
461
  update_provider_options,
462
  inputs=[embedding_provider],
463
+ outputs=[huggingface_model, openai_model, custom_embedding_model, embedding_api_url]
464
+ )
465
+ huggingface_model.change(
466
+ update_huggingface_options,
467
+ inputs=[huggingface_model],
468
+ outputs=[custom_embedding_model]
469
  )
470
 
471
  return (item_dropdown, refresh_button, embedding_status, embedding_preview, embedding_metadata,
472
+ create_new_embedding_button, embedding_provider, huggingface_model, openai_model, custom_embedding_model, embedding_api_url,
473
  chunking_method, max_chunk_size, chunk_overlap, adaptive_chunking,
474
  use_contextual_embeddings, contextual_api_choice, contextual_api_key)
475
 
 
486
 
487
  def purge_all_embeddings():
488
  try:
489
+ # It came to me in a dream....I literally don't remember how the fuck this works, cant find documentation...
490
  collection_name = "all_content_embeddings"
491
  chroma_client.delete_collection(collection_name)
492
  chroma_client.create_collection(collection_name)
493
+ logging.info(f"All embeddings have been purged successfully.")
494
  return "All embeddings have been purged successfully."
495
  except Exception as e:
496
  logging.error(f"Error during embedding purge: {str(e)}")
App_Function_Libraries/Gradio_UI/Explain_summarize_tab.py CHANGED
@@ -177,7 +177,7 @@ def summarize_explain_text(message, api_endpoint, api_key, summarization, explan
177
  summarization_response = summarize_with_deepseek(api_key, input_data, user_prompt, temp,
178
  system_prompt)
179
  elif api_endpoint.lower() == "llama.cpp":
180
- summarization_response = summarize_with_llama(input_data, user_prompt, temp, system_prompt)
181
  elif api_endpoint.lower() == "kobold":
182
  summarization_response = summarize_with_kobold(input_data, api_key, user_prompt, temp,
183
  system_prompt)
@@ -194,7 +194,7 @@ def summarize_explain_text(message, api_endpoint, api_key, summarization, explan
194
  summarization_response = summarize_with_huggingface(api_key, input_data, user_prompt,
195
  temp) # , system_prompt)
196
  elif api_endpoint.lower() == "ollama":
197
- summarization_response = summarize_with_ollama(input_data, user_prompt, temp, system_prompt)
198
  else:
199
  raise ValueError(f"Unsupported API endpoint: {api_endpoint}")
200
  except Exception as e:
 
177
  summarization_response = summarize_with_deepseek(api_key, input_data, user_prompt, temp,
178
  system_prompt)
179
  elif api_endpoint.lower() == "llama.cpp":
180
+ summarization_response = summarize_with_llama(input_data, user_prompt, api_key, temp, system_prompt)
181
  elif api_endpoint.lower() == "kobold":
182
  summarization_response = summarize_with_kobold(input_data, api_key, user_prompt, temp,
183
  system_prompt)
 
194
  summarization_response = summarize_with_huggingface(api_key, input_data, user_prompt,
195
  temp) # , system_prompt)
196
  elif api_endpoint.lower() == "ollama":
197
+ summarization_response = summarize_with_ollama(input_data, user_prompt, None, api_key, temp, system_prompt)
198
  else:
199
  raise ValueError(f"Unsupported API endpoint: {api_endpoint}")
200
  except Exception as e:
App_Function_Libraries/Gradio_UI/Import_Functionality.py CHANGED
@@ -13,7 +13,6 @@ import zipfile
13
  #
14
  # External Imports
15
  import gradio as gr
16
- import pypandoc
17
  #
18
  # Local Imports
19
  from App_Function_Libraries.DB.DB_Manager import insert_prompt_to_db, load_preset_prompts, import_obsidian_note_to_db, \
@@ -29,6 +28,7 @@ logger = logging.getLogger()
29
 
30
 
31
  def import_data(file, title, author, keywords, custom_prompt, summary, auto_summarize, api_name, api_key):
 
32
  if file is None:
33
  return "No file uploaded. Please upload a file."
34
 
@@ -73,7 +73,7 @@ def import_data(file, title, author, keywords, custom_prompt, summary, auto_summ
73
  segments = [{'Text': file_content}]
74
 
75
  # Process keywords
76
- keyword_list = [kw.strip() for kw in keywords.split(',') if kw.strip()]
77
 
78
  # Handle summarization
79
  if auto_summarize and api_name and api_key:
@@ -81,30 +81,28 @@ def import_data(file, title, author, keywords, custom_prompt, summary, auto_summ
81
  elif not summary:
82
  summary = "No summary provided"
83
 
84
- # Add to database
85
- add_media_to_database(
86
- url=file_name, # Using filename as URL
87
- info_dict=info_dict,
88
- segments=segments,
89
- summary=summary,
90
- keywords=keyword_list,
91
- custom_prompt_input=custom_prompt,
92
- whisper_model="Imported", # Indicating this was an imported file
93
- media_type="document"
94
- )
95
-
96
- # Clean up the temporary file
97
- os.unlink(temp_file.name)
98
-
99
- return f"File '{file_name}' successfully imported with title '{title}' and author '{author}'."
 
100
  except Exception as e:
101
- logging.error(f"Error importing file: {str(e)}")
102
  return f"Error importing file: {str(e)}"
103
 
104
 
105
-
106
-
107
-
108
  def process_obsidian_zip(zip_file):
109
  with tempfile.TemporaryDirectory() as temp_dir:
110
  try:
@@ -357,13 +355,11 @@ def create_import_obsidian_vault_tab():
357
  fn=import_vault,
358
  inputs=[vault_path_input, vault_zip_input],
359
  outputs=[import_status],
360
- show_progress=True
361
  )
362
 
363
 
364
  def import_obsidian_vault(vault_path, progress=gr.Progress()):
365
  try:
366
- from App_Function_Libraries.Gradio_UI.Export_Functionality import scan_obsidian_vault
367
  markdown_files = scan_obsidian_vault(vault_path)
368
  total_files = len(markdown_files)
369
  imported_files = 0
 
13
  #
14
  # External Imports
15
  import gradio as gr
 
16
  #
17
  # Local Imports
18
  from App_Function_Libraries.DB.DB_Manager import insert_prompt_to_db, load_preset_prompts, import_obsidian_note_to_db, \
 
28
 
29
 
30
  def import_data(file, title, author, keywords, custom_prompt, summary, auto_summarize, api_name, api_key):
31
+ logging.debug(f"Starting import_data with file: {file} / Title: {title} / Author: {author} / Keywords: {keywords}")
32
  if file is None:
33
  return "No file uploaded. Please upload a file."
34
 
 
73
  segments = [{'Text': file_content}]
74
 
75
  # Process keywords
76
+ keyword_list = [kw.strip() for kw in keywords.split(',') if kw.strip()] if keywords else []
77
 
78
  # Handle summarization
79
  if auto_summarize and api_name and api_key:
 
81
  elif not summary:
82
  summary = "No summary provided"
83
 
84
+ # Add to database
85
+ result = add_media_to_database(
86
+ url=file_name, # Using filename as URL
87
+ info_dict=info_dict,
88
+ segments=segments,
89
+ summary=summary,
90
+ keywords=keyword_list,
91
+ custom_prompt_input=custom_prompt,
92
+ whisper_model="Imported", # Indicating this was an imported file
93
+ media_type="document",
94
+ overwrite=False # Set this to True if you want to overwrite existing entries
95
+ )
96
+
97
+ # Clean up the temporary file
98
+ os.unlink(temp_file.name)
99
+
100
+ return f"File '{file_name}' import attempt complete. Database result: {result}"
101
  except Exception as e:
102
+ logging.exception(f"Error importing file: {str(e)}")
103
  return f"Error importing file: {str(e)}"
104
 
105
 
 
 
 
106
  def process_obsidian_zip(zip_file):
107
  with tempfile.TemporaryDirectory() as temp_dir:
108
  try:
 
355
  fn=import_vault,
356
  inputs=[vault_path_input, vault_zip_input],
357
  outputs=[import_status],
 
358
  )
359
 
360
 
361
  def import_obsidian_vault(vault_path, progress=gr.Progress()):
362
  try:
 
363
  markdown_files = scan_obsidian_vault(vault_path)
364
  total_files = len(markdown_files)
365
  imported_files = 0
App_Function_Libraries/Gradio_UI/Search_Tab.py CHANGED
@@ -73,7 +73,7 @@ def format_as_html(content, title):
73
  return f"""
74
  <div style="border: 1px solid #ddd; padding: 10px; margin-bottom: 10px;">
75
  <h3>{title}</h3>
76
- <div style="max-height: 300px; overflow-y: auto;">
77
  {formatted_content}
78
  </div>
79
  </div>
 
73
  return f"""
74
  <div style="border: 1px solid #ddd; padding: 10px; margin-bottom: 10px;">
75
  <h3>{title}</h3>
76
+ <div style="max-height: 700px; overflow-y: auto;">
77
  {formatted_content}
78
  </div>
79
  </div>
App_Function_Libraries/Gradio_UI/Video_transcription_tab.py CHANGED
@@ -43,6 +43,7 @@ def create_video_transcription_tab():
43
  lines=5)
44
  video_file_input = gr.File(label="Upload Video File (Optional)", file_types=["video/*"])
45
  diarize_input = gr.Checkbox(label="Enable Speaker Diarization", value=False)
 
46
  whisper_model_input = gr.Dropdown(choices=whisper_models, value="medium", label="Whisper Model")
47
 
48
  with gr.Row():
@@ -185,7 +186,7 @@ def create_video_transcription_tab():
185
  download_summary = gr.File(label="Download All Summaries as Text")
186
 
187
  @error_handler
188
- def process_videos_with_error_handling(inputs, start_time, end_time, diarize, whisper_model,
189
  custom_prompt_checkbox, custom_prompt, chunking_options_checkbox,
190
  chunk_method, max_chunk_size, chunk_overlap, use_adaptive_chunking,
191
  use_multi_level_chunking, chunk_language, api_name,
@@ -301,7 +302,7 @@ def create_video_transcription_tab():
301
  input_item, 2, whisper_model,
302
  custom_prompt,
303
  start_seconds, api_name, api_key,
304
- False, False, False, False, 0.01, None, keywords, None, diarize,
305
  end_time=end_seconds,
306
  include_timestamps=timestamp_option,
307
  metadata=video_metadata,
@@ -425,7 +426,7 @@ def create_video_transcription_tab():
425
  None
426
  )
427
 
428
- def process_videos_wrapper(url_input, video_file, start_time, end_time, diarize, whisper_model,
429
  custom_prompt_checkbox, custom_prompt, chunking_options_checkbox,
430
  chunk_method, max_chunk_size, chunk_overlap, use_adaptive_chunking,
431
  use_multi_level_chunking, chunk_language, summarize_recursively, api_name,
@@ -460,7 +461,7 @@ def create_video_transcription_tab():
460
  raise ValueError("No input provided. Please enter URLs or upload a video file.")
461
 
462
  result = process_videos_with_error_handling(
463
- inputs, start_time, end_time, diarize, whisper_model,
464
  custom_prompt_checkbox, custom_prompt, chunking_options_checkbox,
465
  chunk_method, max_chunk_size, chunk_overlap, use_adaptive_chunking,
466
  use_multi_level_chunking, chunk_language, api_name,
@@ -507,6 +508,7 @@ def create_video_transcription_tab():
507
  try:
508
  logging.info(f"Starting process_url_metadata for URL: {input_item}")
509
  # Create download path
 
510
  download_path = create_download_directory("Video_Downloads")
511
  logging.info(f"Download path created at: {download_path}")
512
 
@@ -743,15 +745,37 @@ def create_video_transcription_tab():
743
  inputs=[confab_checkbox],
744
  outputs=[confabulation_output]
745
  )
 
746
  process_button.click(
747
  fn=process_videos_wrapper,
748
  inputs=[
749
- url_input, video_file_input, start_time_input, end_time_input, diarize_input, whisper_model_input,
750
- custom_prompt_checkbox, custom_prompt_input, chunking_options_checkbox,
751
- chunk_method, max_chunk_size, chunk_overlap, use_adaptive_chunking,
752
- use_multi_level_chunking, chunk_language, summarize_recursively, api_name_input, api_key_input,
753
- keywords_input, use_cookies_input, cookies_input, batch_size_input,
754
- timestamp_option, keep_original_video, confab_checkbox, overwrite_checkbox
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
755
  ],
756
  outputs=[progress_output, error_output, results_output, download_transcription, download_summary, confabulation_output]
757
  )
 
43
  lines=5)
44
  video_file_input = gr.File(label="Upload Video File (Optional)", file_types=["video/*"])
45
  diarize_input = gr.Checkbox(label="Enable Speaker Diarization", value=False)
46
+ vad_checkbox = gr.Checkbox(label="Enable Voice-Audio-Detection(VAD)", value=True)
47
  whisper_model_input = gr.Dropdown(choices=whisper_models, value="medium", label="Whisper Model")
48
 
49
  with gr.Row():
 
186
  download_summary = gr.File(label="Download All Summaries as Text")
187
 
188
  @error_handler
189
+ def process_videos_with_error_handling(inputs, start_time, end_time, diarize, vad_use, whisper_model,
190
  custom_prompt_checkbox, custom_prompt, chunking_options_checkbox,
191
  chunk_method, max_chunk_size, chunk_overlap, use_adaptive_chunking,
192
  use_multi_level_chunking, chunk_language, api_name,
 
302
  input_item, 2, whisper_model,
303
  custom_prompt,
304
  start_seconds, api_name, api_key,
305
+ vad_use, False, False, False, 0.01, None, keywords, None, diarize,
306
  end_time=end_seconds,
307
  include_timestamps=timestamp_option,
308
  metadata=video_metadata,
 
426
  None
427
  )
428
 
429
+ def process_videos_wrapper(url_input, video_file, start_time, end_time, diarize, vad_use, whisper_model,
430
  custom_prompt_checkbox, custom_prompt, chunking_options_checkbox,
431
  chunk_method, max_chunk_size, chunk_overlap, use_adaptive_chunking,
432
  use_multi_level_chunking, chunk_language, summarize_recursively, api_name,
 
461
  raise ValueError("No input provided. Please enter URLs or upload a video file.")
462
 
463
  result = process_videos_with_error_handling(
464
+ inputs, start_time, end_time, diarize, vad_use, whisper_model,
465
  custom_prompt_checkbox, custom_prompt, chunking_options_checkbox,
466
  chunk_method, max_chunk_size, chunk_overlap, use_adaptive_chunking,
467
  use_multi_level_chunking, chunk_language, api_name,
 
508
  try:
509
  logging.info(f"Starting process_url_metadata for URL: {input_item}")
510
  # Create download path
511
+
512
  download_path = create_download_directory("Video_Downloads")
513
  logging.info(f"Download path created at: {download_path}")
514
 
 
745
  inputs=[confab_checkbox],
746
  outputs=[confabulation_output]
747
  )
748
+
749
  process_button.click(
750
  fn=process_videos_wrapper,
751
  inputs=[
752
+ url_input,
753
+ video_file_input,
754
+ start_time_input,
755
+ end_time_input,
756
+ diarize_input,
757
+ vad_checkbox,
758
+ whisper_model_input,
759
+ custom_prompt_checkbox,
760
+ custom_prompt_input,
761
+ chunking_options_checkbox,
762
+ chunk_method,
763
+ max_chunk_size,
764
+ chunk_overlap,
765
+ use_adaptive_chunking,
766
+ use_multi_level_chunking,
767
+ chunk_language,
768
+ summarize_recursively,
769
+ api_name_input,
770
+ api_key_input,
771
+ keywords_input,
772
+ use_cookies_input,
773
+ cookies_input,
774
+ batch_size_input,
775
+ timestamp_option,
776
+ keep_original_video,
777
+ confab_checkbox,
778
+ overwrite_checkbox
779
  ],
780
  outputs=[progress_output, error_output, results_output, download_transcription, download_summary, confabulation_output]
781
  )
App_Function_Libraries/Gradio_UI/View_DB_Items_tab.py CHANGED
@@ -134,7 +134,7 @@ def format_as_html(content, title):
134
  return f"""
135
  <div style="border: 1px solid #ddd; padding: 10px; margin-bottom: 10px;">
136
  <h3>{title}</h3>
137
- <div style="max-height: 300px; overflow-y: auto;">
138
  {formatted_content}
139
  </div>
140
  </div>
@@ -200,7 +200,7 @@ def create_view_all_with_versions_tab():
200
  return f"""
201
  <div style="border: 1px solid #ddd; padding: 10px; margin-bottom: 10px;">
202
  <h3>{title}</h3>
203
- <div style="max-height: 300px; overflow-y: auto;">
204
  {formatted_content}
205
  </div>
206
  </div>
 
134
  return f"""
135
  <div style="border: 1px solid #ddd; padding: 10px; margin-bottom: 10px;">
136
  <h3>{title}</h3>
137
+ <div style="max-height: 700px; overflow-y: auto;">
138
  {formatted_content}
139
  </div>
140
  </div>
 
200
  return f"""
201
  <div style="border: 1px solid #ddd; padding: 10px; margin-bottom: 10px;">
202
  <h3>{title}</h3>
203
+ <div style="max-height: 700px; overflow-y: auto;">
204
  {formatted_content}
205
  </div>
206
  </div>