|
import logging |
|
import os |
|
import re |
|
import warnings |
|
from pathlib import Path |
|
|
|
import gradio as gr |
|
import requests |
|
import torch |
|
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline, Pipeline |
|
|
|
from templates import starting_app_code, update_iframe_js, copy_snippet_js, download_code_js, load_js, DemoType, \ |
|
copy_share_link_js |
|
|
|
|
|
warnings.filterwarnings("ignore", message='Trying to convert audio automatically from int32 to 16-bit int format') |
|
|
|
logging.basicConfig( |
|
level=logging.INFO, |
|
format="%(asctime)s - %(message)s", |
|
datefmt="%Y-%m-%d %H:%M:%S", |
|
) |
|
|
|
logger = logging.getLogger("my_logger") |
|
|
|
HF_TOKEN = os.getenv("HF_TOKEN") |
|
|
|
if not HF_TOKEN: |
|
raise Exception("HF_TOKEN environment variable is required to call remote API.") |
|
|
|
API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta" |
|
headers = {"Authorization": f"Bearer {HF_TOKEN}"} |
|
|
|
|
|
def init_speech_to_text_model() -> Pipeline: |
|
device = "cuda:0" if torch.cuda.is_available() else "cpu" |
|
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 |
|
|
|
model_id = "distil-whisper/distil-medium.en" |
|
model = AutoModelForSpeechSeq2Seq.from_pretrained( |
|
model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True |
|
) |
|
model.to(device) |
|
processor = AutoProcessor.from_pretrained(model_id) |
|
return pipeline( |
|
"automatic-speech-recognition", |
|
model=model, |
|
tokenizer=processor.tokenizer, |
|
feature_extractor=processor.feature_extractor, |
|
max_new_tokens=128, |
|
torch_dtype=torch_dtype, |
|
device=device, |
|
) |
|
|
|
|
|
whisper_pipe = init_speech_to_text_model() |
|
|
|
code_pattern = re.compile(r'```python\n(.*?)```', re.DOTALL) |
|
|
|
|
|
def query(payload: dict): |
|
response = requests.post(API_URL, headers=headers, json=payload) |
|
return response.json() |
|
|
|
|
|
def generate_text(code: str, prompt: str) -> (str, str, str): |
|
logger.info(f"Calling API with prompt:\n{prompt}") |
|
prompt = f"```python\n{code}```\nGiven the code above return only updated code for the following request:\n{prompt}\n<|assistant|>" |
|
params = {"max_new_tokens": 512} |
|
output = query({"inputs": prompt, "parameters": params}) |
|
if 'error' in output: |
|
logger.warning(f'Language model call failed: {output["error"]}') |
|
raise gr.Warning(f'Language model call failed: {output["error"]}') |
|
logger.info(f'API RESPONSE\n{output[0]["generated_text"]}') |
|
assistant_reply = output[0]["generated_text"].split('<|assistant|>')[1] |
|
match = re.search(code_pattern, assistant_reply) |
|
if not match: |
|
return assistant_reply, code, None |
|
new_code = match.group(1) |
|
logger.info(f'NEW CODE:\nnew_code') |
|
return assistant_reply, new_code, None |
|
|
|
|
|
def transcribe(audio: str) -> (str, str): |
|
result = whisper_pipe(audio) |
|
return result["text"], None |
|
|
|
|
|
def link_copy_notify(code: str, requirements: str): |
|
gr.Info("Share link copied!") |
|
|
|
|
|
def copy_notify(code: str, requirements: str): |
|
gr.Info("App code snippet copied!") |
|
|
|
|
|
def add_hotkeys() -> str: |
|
return Path("hotkeys.js").read_text() |
|
|
|
|
|
def apply_query_params(code: str, request: gr.Request) -> (str, str): |
|
params = dict(request.query_params) |
|
return params.get('code') or code, params.get('requirements') or '' |
|
|
|
|
|
def update_state(requirements: [str], error: str): |
|
return '\n'.join(sorted(requirements)), error |
|
|
|
|
|
with gr.Blocks(title="KiteWind") as demo: |
|
gr.Markdown("<h1 id=\"TEST\" align=\"center\"><a href=\"?\">KiteWind</a> πͺπ</h1>") |
|
gr.Markdown( |
|
"<h4 align=\"center\">Chat-assisted web app creator by <a href=\"https://huggingface.co/gstaff\">@gstaff</a></h4>") |
|
selectedTab = gr.State(value='gradio-lite') |
|
with gr.Tab('Gradio (gradio-lite)') as gradio_lite_tab: |
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Markdown("## 1. Run your app in the browser!") |
|
gr.HTML(value='<div id="gradioDemoDiv"></div>') |
|
gr.Markdown("## 2. Customize using voice requests!") |
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Group(): |
|
in_audio = gr.Audio(label="Record a voice request (click or press ctrl + ` to start/stop)", |
|
source='microphone', type='filepath', elem_classes=["record-btn"]) |
|
in_prompt = gr.Textbox(label="Or type a text request and press Enter", |
|
placeholder="Need an idea? Try one of these:\n- Add a button to reverse the name\n- Change the greeting to Spanish\n- Put the reversed name output into a separate textbox") |
|
out_text = gr.TextArea(label="π€ Chat Assistant Response") |
|
clear = gr.ClearButton([in_prompt, in_audio, out_text]) |
|
with gr.Column(): |
|
gradio_code_area = gr.Code( |
|
label="App Code - You can also edit directly and then click Update App or ctrl + space", |
|
language='python', value=starting_app_code(DemoType.GRADIO)) |
|
gradio_requirements_area = gr.Code(label="App Requirements (additional modules pip installed for pyodide)") |
|
update_btn = gr.Button("Update App (Ctrl + Space)", variant="primary", elem_classes=["update-btn"]) |
|
last_error = gr.State() |
|
code_update_params = {'fn': update_state, 'inputs': [gradio_code_area, gradio_requirements_area], |
|
'outputs': [gradio_requirements_area, last_error], |
|
'_js': update_iframe_js(DemoType.GRADIO)} |
|
gen_text_params = {'fn': generate_text, 'inputs': [gradio_code_area, in_prompt], |
|
'outputs': [out_text, gradio_code_area]} |
|
transcribe_params = {'fn': transcribe, 'inputs': [in_audio], 'outputs': [in_prompt, in_audio]} |
|
update_btn.click(**code_update_params) |
|
in_prompt.submit(**gen_text_params).then(**code_update_params) |
|
in_audio.stop_recording(**transcribe_params).then(**gen_text_params).then(**code_update_params) |
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Markdown("## 3. Export your app to share!") |
|
share_link_btn = gr.Button("π Copy share link to clipboard") |
|
share_link_btn.click(link_copy_notify, [gradio_code_area, gradio_requirements_area], None, _js=copy_share_link_js(DemoType.GRADIO)) |
|
copy_snippet_btn = gr.Button("βοΈ Copy app snippet to paste into another page") |
|
copy_snippet_btn.click(copy_notify, [gradio_code_area, gradio_requirements_area], None, _js=copy_snippet_js(DemoType.GRADIO)) |
|
download_btn = gr.Button("π Download app as a standalone file") |
|
download_btn.click(None, [gradio_code_area, gradio_requirements_area], None, _js=download_code_js(DemoType.GRADIO)) |
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Markdown("## Current limitations") |
|
with gr.Accordion("Click to view", open=False): |
|
gr.Markdown( |
|
"- Only gradio-lite apps using the libraries available in pyodide are supported\n- The chat hasn't been tuned on gradio library data; it may make mistakes") |
|
with gr.Tab('Streamlit (stlite)') as stlite_tab: |
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Markdown("## 1. Run your app in the browser!") |
|
gr.HTML(value='<div id="stliteDemoDiv"></div>') |
|
gr.Markdown("## 2. Customize using voice requests!") |
|
with gr.Row(): |
|
with gr.Column(): |
|
with gr.Group(): |
|
in_audio = gr.Audio(label="Record a voice request (click or press ctrl + ` to start/stop)", |
|
source='microphone', type='filepath', elem_classes=["record-btn"]) |
|
in_prompt = gr.Textbox(label="Or type a text request and press Enter", |
|
placeholder="Need an idea? Try one of these:\n- Add a button to reverse the name\n- Change the greeting to Spanish\n- Change the theme to soft") |
|
out_text = gr.TextArea(label="π€ Chat Assistant Response") |
|
clear_btn = gr.ClearButton([in_prompt, in_audio, out_text]) |
|
with gr.Column(): |
|
stlite_code_area = gr.Code( |
|
label="App Code - You can also edit directly and then click Update App or ctrl + space", |
|
language='python', value=starting_app_code(DemoType.STREAMLIT)) |
|
requirements_area = gr.Code(label="App Requirements (additional modules pip installed for pyodide)") |
|
update_btn = gr.Button("Update App (Ctrl + Space)", variant="primary", elem_classes=["update-btn"]) |
|
code_update_params = {'fn': None, 'inputs': [stlite_code_area, requirements_area], 'outputs': None, |
|
'_js': update_iframe_js(DemoType.STREAMLIT)} |
|
gen_text_params = {'fn': generate_text, 'inputs': [stlite_code_area, in_prompt], |
|
'outputs': [out_text, stlite_code_area]} |
|
transcribe_params = {'fn': transcribe, 'inputs': [in_audio], 'outputs': [in_prompt, in_audio]} |
|
update_btn.click(**code_update_params) |
|
in_prompt.submit(**gen_text_params).then(**code_update_params) |
|
in_audio.stop_recording(**transcribe_params).then(**gen_text_params).then(**code_update_params) |
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Markdown("## 3. Export your app to share!") |
|
copy_snippet_btn = gr.Button("βοΈ Copy app snippet into paste in another page") |
|
copy_snippet_btn.click(copy_notify, [stlite_code_area, requirements_area], None, _js=copy_snippet_js(DemoType.STREAMLIT)) |
|
download_btn = gr.Button("π Download app as a standalone file") |
|
download_btn.click(None, stlite_code_area, None, _js=download_code_js(DemoType.STREAMLIT)) |
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Markdown("## Current limitations") |
|
with gr.Accordion("Click to view", open=False): |
|
gr.Markdown( |
|
"- Only Streamlit apps using libraries available in pyodide are supported\n- The chat hasn't been tuned on Streamlit library data; it may make mistakes") |
|
gradio_lite_tab.select(lambda: "gradio-lite", None, selectedTab).then(None, None, None, |
|
_js=load_js(DemoType.GRADIO)) |
|
stlite_tab.select(lambda: "stlite", None, selectedTab).then(None, None, None, _js=load_js(DemoType.STREAMLIT)) |
|
demo.load(None, None, None, _js=load_js(DemoType.GRADIO)) |
|
demo.load(None, None, None, _js=add_hotkeys()) |
|
|
|
demo.load(apply_query_params, gradio_code_area, [gradio_code_area, gradio_requirements_area]) |
|
demo.css = "footer {visibility: hidden}" |
|
|
|
if __name__ == "__main__": |
|
demo.queue().launch(favicon_path='favicon-96x96.png') |
|
|