Spaces:
Runtime error
Runtime error
import gradio as gr | |
from anthropic import Anthropic | |
from openai import OpenAI | |
import openai | |
import json | |
import uuid | |
import os | |
import base64 | |
from PIL import Image | |
from PIL.PngImagePlugin import PngInfo | |
from io import BytesIO | |
default_urls = ["https://api.anthropic.com", "https://api.openai.com/v1"] | |
# List of available Claude models | |
claude_models = ["claude-3-5-sonnet-20240620", "claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307"] | |
# List of available OpenAI models | |
openai_models = ["gpt-4o", "gpt-4o-mini", "gpt-4", "gpt-4-32k", "gpt-3.5-turbo", "gpt-4-0125-preview", "gpt-4-turbo-preview", "gpt-4-1106-preview", "gpt-4-0613"] | |
image_prompter = ["SDXL", "midjourney"] | |
both_models = claude_models + openai_models | |
def generate_response(endpoint, api_key, model, user_prompt): | |
print(endpoint) | |
if endpoint in default_urls: | |
#check api keys as normal | |
if api_key.startswith("sk-ant-"): | |
client = Anthropic(api_key=api_key, base_url=endpoint) | |
system_prompt_path = __file__.replace("app.py", "json.txt") | |
elif api_key.startswith("sk-"): | |
client = OpenAI(api_key=api_key, base_url=endpoint) | |
system_prompt_path = __file__.replace("app.py", "json.txt") | |
else: | |
print(f"Invalid API key: {api_key}") | |
return "Invalid API key", "Invalid API key", None | |
else: | |
if model in claude_models: | |
# Set the Anthropic API key | |
client = Anthropic(api_key=api_key, base_url=endpoint) | |
system_prompt_path = __file__.replace("app.py", "json.txt") | |
else: | |
# Set the OpenAI API key | |
client = OpenAI(api_key=api_key, base_url=endpoint) | |
system_prompt_path = __file__.replace("app.py", "json.txt") | |
# Read the system prompt from a text file | |
with open(system_prompt_path, "r") as file: | |
system_prompt = file.read() | |
if model in claude_models: | |
# Generate a response using the selected Anthropic model | |
try: | |
response = client.messages.create( | |
system=system_prompt, | |
messages=[{"role": "user", "content": user_prompt}], | |
model=model, | |
max_tokens=4096 | |
) | |
response_text = response.content[0].text | |
except Exception as e: | |
print(e) | |
response_text = f"An error occurred while generating the response. Check that your API key is correct! More info: {e}" | |
else: | |
try: | |
# Generate a response using the selected OpenAI model | |
response = client.chat.completions.create( | |
model=model, | |
messages=[ | |
{"role": "system", "content": system_prompt}, | |
{"role": "user", "content": user_prompt} | |
], | |
max_tokens=4096 | |
) | |
response_text = response.choices[0].message.content | |
except Exception as e: | |
print(e) | |
response_text = f"An error occurred while generating the response. Check that your API key is correct! More info: {e}" | |
json_string, json_json = extract_json(response_text) | |
json_file = json_string if json_string else None | |
create_unique_id = str(uuid.uuid4()) | |
json_folder = __file__.replace("app.py", f"outputs/") | |
if not os.path.exists(json_folder): | |
os.makedirs(json_folder) | |
path = None | |
if json_string: | |
with open(f"{json_folder}{json_json['name']}_{create_unique_id}.json", "w") as file: | |
file.write(json_file) | |
path = f"{json_folder}{json_json['name']}_{create_unique_id}.json" | |
else: | |
json_string = "No JSON data was found, or the JSON data was incomplete." | |
return response_text, json_string or "", path | |
def extract_json(generated_output): | |
try: | |
generated_output = generated_output.replace("```json", "").replace("```", "").strip() | |
# Find the JSON string in the generated output | |
json_start = generated_output.find("{") | |
json_end = generated_output.rfind("}") + 1 | |
json_string = generated_output[json_start:json_end] | |
print(json_string) | |
# Parse the JSON string | |
json_data = json.loads(json_string) | |
json_data['name'] = json_data['char_name'] | |
json_data['personality'] = json_data['char_persona'] | |
json_data['scenario'] = json_data['world_scenario'] | |
json_data['first_mes'] = json_data['char_greeting'] | |
# Check if all the required keys are present | |
required_keys = ["char_name", "char_persona", "world_scenario", "char_greeting", "example_dialogue", "description"] | |
if all(key in json_data for key in required_keys): | |
return json.dumps(json_data), json_data | |
else: | |
return None, None | |
except Exception as e: | |
print(e) | |
return None, None | |
def generate_second_response(endpoint, api_key, model, generated_output, image_model): | |
if endpoint in default_urls: | |
#check api keys as normal | |
if api_key.startswith("sk-ant-"): | |
client = Anthropic(api_key=api_key, base_url=endpoint) | |
system_prompt_path = __file__.replace("app.py", f"{image_model}.txt") | |
elif api_key.startswith("sk-"): | |
client = OpenAI(api_key=api_key, base_url=endpoint) | |
system_prompt_path = __file__.replace("app.py", f"{image_model}.txt") | |
else: | |
print("Invalid API key") | |
return "Invalid API key", "Invalid API key", None | |
else: | |
if model in claude_models: | |
# Set the Anthropic API key | |
client = Anthropic(api_key=api_key, base_url=endpoint) | |
system_prompt_path = __file__.replace("app.py", f"{image_model}.txt") | |
else: | |
# Set the OpenAI API key | |
client = OpenAI(api_key=api_key, base_url=endpoint) | |
system_prompt_path = __file__.replace("app.py", f"{image_model}.txt") | |
# Read the system prompt from a text file | |
with open(system_prompt_path, "r") as file: | |
system_prompt = file.read() | |
if model in claude_models: | |
try: | |
# Generate a second response using the selected Anthropic model and the previously generated output | |
response = client.messages.create( | |
system=system_prompt, | |
messages=[{"role": "user", "content": generated_output}], | |
model=model, | |
max_tokens=4096 | |
) | |
response_text = response.content[0].text | |
except Exception as e: | |
print(e) | |
response_text = f"An error occurred while generating the response. Check that your API key is correct! More info: {e}" | |
else: | |
try: | |
# Generate a response using the selected OpenAI model | |
response = client.chat.completions.create( | |
model=model, | |
messages=[ | |
{"role": "system", "content": system_prompt}, | |
{"role": "user", "content": generated_output} | |
], | |
max_tokens=4096 | |
) | |
response_text = response.choices[0].message.content | |
except Exception as e: | |
print(e) | |
response_text = f"An error occurred while generating the response. Check that your API key is correct! More info: {e}" | |
return response_text | |
def inject_json_to_png(image, json_data): | |
if isinstance(json_data, str): | |
json_data = json.loads(json_data) | |
img = Image.open(image) | |
# Calculate the aspect ratio of the original image | |
width, height = img.size | |
aspect_ratio = width / height | |
# Calculate the cropping dimensions based on the aspect ratio | |
if aspect_ratio > 400 / 600: | |
# Image is wider than 400x600, crop the sides | |
new_width = int(height * 400 / 600) | |
left = (width - new_width) // 2 | |
right = left + new_width | |
top = 0 | |
bottom = height | |
else: | |
# Image is taller than 400x600, crop the top and bottom | |
new_height = int(width * 600 / 400) | |
left = 0 | |
right = width | |
top = (height - new_height) // 2 | |
bottom = top + new_height | |
# Perform cropping | |
img = img.crop((left, top, right, bottom)) | |
# Resize the cropped image to 400x600 pixels | |
img = img.resize((400, 600), Image.LANCZOS) | |
# Convert the JSON data to bytes | |
json_bytes = json.dumps(json_data).encode('utf-8') | |
# Create a new PNG image with the JSON data injected into the tEXT chunk | |
output = BytesIO() | |
img.save(output, format='PNG') | |
output.seek(0) | |
# Add the tEXT chunk with the tag 'chara' | |
metadata = PngInfo() | |
metadata.add_text("chara", base64.b64encode(json_bytes)) | |
# Save the modified PNG image to a BytesIO object | |
output = BytesIO() | |
create_unique_id = str(uuid.uuid4()) | |
if json_data['name']: | |
filename = f"{json_data['name']}_{create_unique_id}.png" | |
img_folder = __file__.replace("app.py", f"outputs/") | |
img.save(f"{img_folder}/{filename}", format='PNG', pnginfo=metadata) | |
return f"{img_folder}/{filename}" | |
# Set up the Gradio interface | |
with gr.Blocks() as demo: | |
gr.Markdown("# SillyTavern Character Generator") | |
#Text explaining that you can use the API key from the Anthropic API or the OpenAI API | |
gr.Markdown("You can use the API key from the Anthropic API or the OpenAI API. The API key should start with 'sk-ant-' for Anthropic or 'sk-' for OpenAI.") | |
gr.Markdown("Please Note: If you use a proxy it must support the OpenAI or Anthropic standard api calls! khanon does, Openrouter based ones usually do not.") | |
gr.Markdown("Generating images locally and want to use the prompts from here in your workflow? https://github.com/AppleBotzz/ComfyUI_LLMVISION") | |
with gr.Tab("JSON Generate"): | |
with gr.Row(): | |
with gr.Column(): | |
endpoint = gr.Textbox(label="Endpoint", value="https://api.anthropic.com") | |
api_key = gr.Textbox(label="API Key", type="password", placeholder="sk-ant-api03-... or sk-...") | |
model_dropdown = gr.Dropdown(choices=[], label="Select a model") | |
user_prompt = gr.Textbox(label="User Prompt", value="Make me a card for a panther made of translucent pastel colored goo. Its color never changes once it exists but each 'copy' has a different color. The creature comes out of a small jar, seemingly defying physics with its size. It is the size of a real panther, and as strong as one too. By default its female but is able to change gender. It can even split into multiple copies of itself if needed with no change in its own size or mass. Its outside is normally lightly squishy but solid, but on command it can become viscous like non-newtonian fluids. Be descriptive when describing this character, and make sure to describe all of its features in char_persona just like you do in description. Make sure to describe commonly used features in detail (visual, smell, taste, touch, etc).") | |
generate_button = gr.Button("Generate JSON") | |
with gr.Column(): | |
generated_output = gr.Textbox(label="Generated Output") | |
json_output = gr.Textbox(label="JSON Output") | |
json_download = gr.File(label="Download JSON") | |
with gr.Row(): | |
with gr.Column(): | |
image_model = gr.Dropdown(choices=image_prompter, label="Image Model to prompt for", value="SDXL") | |
generate_button_2 = gr.Button("Generate SDXL Prompt") | |
with gr.Column(): | |
generated_output_2 = gr.Textbox(label="Generated SDXL Prompt") | |
def update_models(api_key): | |
if api_key.startswith("sk-ant-"): | |
return gr.Dropdown(choices=claude_models), gr.Textbox(label="Endpoint", value="https://api.anthropic.com") | |
elif api_key.startswith("sk-"): | |
return gr.Dropdown(choices=openai_models), gr.Textbox(label="Endpoint", value="https://api.openai.com/v1") | |
else: | |
return gr.Dropdown(choices=both_models), gr.Textbox(label="Endpoint", value="https://api.anthropic.com") | |
api_key.change(update_models, inputs=api_key, outputs=[model_dropdown, endpoint]) | |
generate_button.click(generate_response, inputs=[endpoint, api_key, model_dropdown, user_prompt], outputs=[generated_output, json_output, json_download]) | |
generate_button_2.click(generate_second_response, inputs=[endpoint, api_key, model_dropdown, generated_output, image_model], outputs=generated_output_2) | |
with gr.Tab("PNG Inject"): | |
gr.Markdown("# PNG Inject") | |
gr.Markdown("Upload a PNG image and inject JSON content into the PNG. PNG gets resized to 400x600 Center Crop.") | |
with gr.Row(): | |
with gr.Column(): | |
image_input = gr.Image(type="filepath", label="Upload PNG Image") | |
json_input = gr.Textbox(label="JSON Data") | |
json_file_input = gr.File(label="Or Upload JSON File", file_types=[".json"]) | |
inject_button = gr.Button("Inject JSON and Download PNG") | |
with gr.Column(): | |
injected_image_output = gr.File(label="Download Injected PNG") | |
def inject_json(image, json_data, json_file): | |
if json_file: | |
jsonc = open(json_file,) | |
json_data = json.load(jsonc) | |
if image is None: | |
return None | |
if json_data is None: | |
return None | |
injected_image = inject_json_to_png(image, json_data) | |
return injected_image | |
inject_button.click(inject_json, inputs=[image_input, json_input, json_file_input], outputs=injected_image_output) | |
demo.launch() |