Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
def generate_message(prompt, model): | |
url = 'https://api.together.xyz/inference' | |
headers = { | |
'Authorization': 'Bearer ee16b2ea8440ef7142ac5e21f5d096ee2dfb9c0c206acff4818a35b91ae59cef', | |
'accept': 'application/json', | |
'content-type': 'application/json' | |
} | |
data = { | |
"model": model, | |
"prompt": prompt, | |
"max_tokens": 512, | |
"stop": [ | |
"</s>", | |
"[INST]", | |
"<<SYS>>", | |
"<</SYS>>", | |
"[/INST]", | |
"<s>" | |
], | |
"temperature": 0.75, | |
"top_p": 0.2, | |
"top_k": 10 | |
} | |
response = requests.post(url, headers=headers, json=data) | |
result = response.json() | |
return result['output']['choices'][0]['text'].strip() | |
def generate_message_with_submit(input_text, model): | |
return generate_message(input_text, model) | |
iface = gr.Interface( | |
fn=generate_message_with_submit, | |
inputs=[ | |
gr.TextArea(lines=20, label="Enter Prompt"), | |
gr.Radio(['meta-llama/Llama-2-70b-chat-hf', 'iamplus/model1-70b'], label="Model",value='meta-llama/Llama-2-70b-chat-hf') | |
], | |
outputs=gr.TextArea(label="AI Assistant Message"), | |
title="AI Assistant Message Generator", | |
description="Generate an appropriate message for an AI assistant to let the user know that it's working on a task and will get back soon.", | |
allow_flagging=False | |
) | |
iface.launch(share=True) | |