Spaces:
Running
on
Zero
Running
on
Zero
File size: 4,896 Bytes
9c078e7 4a8131a bf52e22 9c078e7 56f924f 0e24f0d a345dc2 b1a9b5c 9c078e7 aecc737 0e24f0d aecc737 9c078e7 99d2247 7c5f508 56f924f bf52e22 56f924f 4d92f8a 56f924f bf52e22 56f924f 4d92f8a e1ae004 4d92f8a e1ae004 a345dc2 0e24f0d 99d2247 899d9e5 0e24f0d c35893e d6c9c32 c35893e 3e5dccf badd707 73c0033 c35893e 3e5dccf 44cf124 c35893e 899d9e5 4dd41b4 a345dc2 4dd41b4 42c2b54 a345dc2 899d9e5 4dd41b4 899d9e5 73c0033 899d9e5 c35893e 44cf124 0e24f0d 899d9e5 a345dc2 899d9e5 a345dc2 d6c9c32 a345dc2 d6c9c32 0e24f0d aecc737 61a66aa aecc737 9c078e7 c35893e a345dc2 73c0033 9c078e7 0e24f0d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
import gradio as gr
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
model = AutoModelForCausalLM.from_pretrained("BatsResearch/bonito-v1")
tokenizer = AutoTokenizer.from_pretrained("BatsResearch/bonito-v1")
model.to("cuda")
@spaces.GPU
def respond(
message,
task_type,
max_tokens,
temperature,
top_p,
):
task_type = task_type.lower()
input_text = "<|tasktype|>\n" + task_type.strip()
input_text += "\n<|context|>\n" + message.strip() + "\n<|task|>\n"
input_ids = tokenizer.encode(input_text, return_tensors="pt").to("cuda")
output = model.generate(
input_ids,
max_new_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
do_sample=True,
stop_strings=["<|pipe|>"],
tokenizer=tokenizer,
)
output_with_greedy_response = model.generate(
output,
max_new_tokens=max_tokens,
temperature=0.0,
top_p=1.0,
do_sample=False,
)
pred_start = int(input_ids.shape[-1])
response = tokenizer.decode(output_with_greedy_response[0][pred_start:], skip_special_tokens=True)
# check if <|pipe|> is in the response
if "<|pipe|>" in response:
pair = response.split("<|pipe|>")
instruction = pair[0].strip().replace("{{context}}", message)
response = pair[1].strip()
else:
# fallback
instruction = pair[0].strip().replace("{{context}}", message)
response = "Unable to generate response. Please regenerate."
return instruction, response
task_types = [
"extractive question answering",
"multiple-choice question answering",
"question generation",
"question answering without choices",
"yes-no question answering",
"coreference resolution",
"paraphrase generation",
"paraphrase identification",
"sentence completion",
"sentiment",
"summarization",
"text generation",
"topic classification",
"word sense disambiguation",
"textual entailment",
"natural language inference",
]
# capitalize for better readability
task_types = [task_type.title() for task_type in task_types]
description = """
This is a demo for Bonito, an open-source model for conditional task generation: the task of converting unannotated text into task-specific synthetic instruction tuning data.
### More details on Bonito
- Model: https://huggingface.co/BatsResearch/bonito-v1
- Paper: https://arxiv.org/abs/2402.18334
- GitHub: https://github.com/BatsResearch/bonito
### Instructions
Try out the model by entering a context and selecting a task type from the dropdown. The model will generate a task instruction based on the context and task type you provide.
"""
examples = [
(
"""In 2013, American singer-songwriter Taylor Swift purchased High Watch for US$17.75 million. From 2013 to 2016, she received widespread press coverage for hosting annual American Independence Day parties on the estate, featuring numerous celebrity guests and lavish decorations often depicted on Instagram.""",
"Extractive Question Answering",
),
(
"""Providence was one of the first cities in the country to industrialize and became noted for its textile manufacturing and subsequent machine tool, jewelry, and silverware industries. Today, the city of Providence is home to eight hospitals and eight institutions of higher learning which have shifted the city's economy into service industries, though it still retains some manufacturing activity.""",
"Yes-No Question Answering",
),
(
"""John Wick (Keanu Reeves) uncovers a path to defeating The High Table. But before he can earn his freedom, Wick must face off against a new enemy with powerful alliances across the globe and forces that turn old friends into foes.""",
"Natural Language Inference",
),
]
examples_with_additional = [[x[0], x[1]] for x in examples]
demo = gr.Interface(
fn=respond,
inputs=[
gr.Textbox(label="Passage", lines=5, placeholder="Enter context here.."),
gr.Dropdown(
task_types,
value="Natural language inference",
label="Task Type",
),
],
outputs=[
gr.Textbox(
label="Instruction",
lines=5,
),
gr.Textbox(label="Response"),
],
additional_inputs=[
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.5, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
title="Bonito",
description=description,
examples=examples_with_additional,
)
if __name__ == "__main__":
demo.launch()
|