testing / app.py
Xilixmeaty40's picture
Update app.py
78b3ee4 verified
raw
history blame contribute delete
No virus
5.4 kB
import gradio as gr
from gradio_client import Client
from huggingface_hub import InferenceClient
import random
ss_client = Client("https://xilixmeaty40-html-image-current-tabx.hf.space/")
with open("models.txt", "r") as file:
models = file.read().splitlines()
combined_model = "\n\n".join(models)
try:
client = InferenceClient(combined_model)
except Exception as e:
raise Exception(f"Failed to load models: {e}")
def load_models(inp):
return gr.update(label=models[inp])
def format_prompt(message, history, cust_p):
prompt = ""
if history:
for user_prompt, bot_response in history:
prompt += f"<start_of_turn>user{user_prompt}<end_of_turn>"
prompt += f"<start_of_turn>model{bot_response}<end_of_turn>"
prompt += cust_p.replace("USER_INPUT", message)
return prompt
def chat_inf(system_prompt, prompt, history, memory, seed, temp, tokens, top_p, rep_p, chat_mem, cust_p):
hist_len = 0
if not history:
history = []
if not memory:
memory = []
if memory:
for ea in memory[0 - chat_mem:]:
hist_len += len(str(ea))
in_len = len(system_prompt + prompt) + hist_len
if (in_len + tokens) > 8000:
history.append((prompt, "Wait, that's too many tokens, please reduce the 'Chat Memory' value, or reduce the 'Max new tokens' value"))
yield history, memory
else:
generate_kwargs = dict(
temperature=temp,
max_new_tokens=tokens,
top_p=top_p,
repetition_penalty=rep_p,
do_sample=True,
seed=seed,
)
if system_prompt:
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", memory[0 - chat_mem:], cust_p)
else:
formatted_prompt = format_prompt(prompt, memory[0 - chat_mem:], cust_p)
try:
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True, timeout=10)
output = ""
for response in stream:
output += response.token.text
yield [(prompt, output)], memory
history.append((prompt, output))
memory.append((prompt, output))
yield history, memory
except Exception as e:
print(f"Error during model inference: {e}")
yield [("Error", "The model failed to respond, possibly due to a timeout. Please try again.")], memory
def get_screenshot(chat, height=5000, width=600, chatblock=[], theme="light", wait=3000, header=True):
tog = 0
if chatblock:
tog = 3
result = ss_client.predict(str(chat), height, width, chatblock, header, theme, wait, api_name="/run_script")
out = f'https://xilixmeaty40-html-image-current-tabx.hf.space/file={result[tog]}'
return out
def clear_fn():
return None, None, None, None
rand_val = random.randint(1, 1111111111111111)
def check_rand(inp, val):
if inp:
return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=random.randint(1, 1111111111111111))
else:
return gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, value=int(val))
with gr.Blocks() as app:
memory = gr.State()
chat_b = gr.Chatbot(height=500)
with gr.Group():
with gr.Row():
with gr.Column(scale=3):
inp = gr.Textbox(label="Prompt")
sys_inp = gr.Textbox(label="System Prompt (optional)")
custom_prompt = gr.Textbox(label="Modify Prompt Format", lines=3, value="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model")
with gr.Row():
with gr.Column(scale=2):
btn = gr.Button("Chat")
with gr.Column(scale=1):
stop_btn = gr.Button("Stop")
clear_btn = gr.Button("Clear")
seed = gr.Slider(label="Seed", minimum=1, maximum=1111111111111111, step=1, value=rand_val)
tokens = gr.Slider(label="Max new tokens", value=300000, minimum=0, maximum=800000, step=64)
temp = gr.Slider(label="Temperature", step=0.01, minimum=0.01, maximum=1.0, value=0.49)
top_p = gr.Slider(label="Top-P", step=0.01, minimum=0.01, maximum=1.0, value=0.49)
rep_p = gr.Slider(label="Repetition Penalty", step=0.01, minimum=0.1, maximum=2.0, value=0.99)
chat_mem = gr.Number(label="Chat Memory", value=4)
with gr.Accordion(label="Screenshot", open=False):
im_btn = gr.Button("Screenshot")
img = gr.Image(type='filepath')
im_height = gr.Number(label="Height", value=5000)
im_width = gr.Number(label="Width", value=500)
wait_time = gr.Number(label="Wait Time", value=3000)
theme = gr.Radio(label="Theme", choices=["light", "dark"], value="light")
chatblock = gr.Dropdown(label="Chatblocks", choices=list(range(0, 21)), value=0, type="index")
header = gr.Checkbox(label="Include header?", value=True)
check_rand(rand_val, rand_val)
btn.click(chat_inf, inputs=[sys_inp, inp, chat_b, memory, seed, temp, tokens, top_p, rep_p, chat_mem, custom_prompt], outputs=[chat_b, memory])
stop_btn.click(lambda: None, [])
clear_btn.click(clear_fn, [])
app.launch(share=True)