Spaces:
Running
Running
File size: 1,651 Bytes
d7b9aa8 8039e43 d7b9aa8 8039e43 d7b9aa8 8039e43 d7b9aa8 08f8163 8039e43 17945bc e5ce199 77949d0 08f8163 17945bc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import subprocess
import gradio as gr
# Compile the model using "!make runfast"
try:
subprocess.run(["make", "runfast"], check=True, shell=True)
print("Model compilation successful.")
except subprocess.CalledProcessError as e:
print("Error:", e)
print(e.stderr)
def chatbot(prompt, temperature, topt, maxtoken, model_file):
command = ["./run", model_file, "-t", str(temperature), "-p", str(topt), "-n", str(maxtoken), "-i", f"{prompt}"]
try:
result = subprocess.run(command, capture_output=True, text=True, check=True, shell=False)
response = result.stdout
except subprocess.CalledProcessError as e:
response = "Error occurred while processing the request."
return response
with gr.Blocks() as demo:
gr.Markdown("HF Spaces for TinyStories")
with gr.Row():
with gr.Column():
inp = gr.Textbox(placeholder="Type the beginning of the story")
with gr.Row():
with gr.Column():
temperature_slider = gr.Slider(minimum=0.1, maximum=2.0, value=0.8, label="Temperature")
topt_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.9, label="Topt")
maxtoken_slider = gr.Slider(minimum=16, maximum=512, value=256, label="Max Tokens")
model_file_dropdown = gr.Dropdown(choices=["stories15M.bin", "stories42M.bin", "stories110M.bin"], label="Model File", value="stories15M.bin")
out = gr.Textbox()
btn = gr.Button("Run")
btn.click(fn=chatbot, inputs=[inp, temperature_slider, topt_slider, maxtoken_slider, model_file_dropdown], outputs=out)
demo.launch()
|