File size: 1,469 Bytes
26b4ff9
 
 
383edc3
 
 
 
 
197520e
383edc3
650bd03
a1fb2a8
b0cc410
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a1fb2a8
b0cc410
 
 
a1fb2a8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer

model = AutoModelForCausalLM.from_pretrained(
      'vitaliy-sharandin/wiseai',
      load_in_8bit=True,
      device_map = {"": 0}
  )
tokenizer = AutoTokenizer.from_pretrained('vitaliy-sharandin/wiseai')

pipe = pipeline('text-generation', model=model,tokenizer=tokenizer)

def generate_text(instruction, input):
  if not instruction.strip():
    return str('The instruction field is required.')
  
  if instruction.strip() and input.strip():
    input_prompt = (f"Below is an instruction that describes a task. "
      "Write a response that appropriately completes the request.\n\n"
      "### Instruction:\n"
      f"{instruction}\n\n"
      "### Input:\n"
      f"{input}\n\n"
      f"### Response: \n")
  else :
    input_prompt = (f"Below is an instruction that describes a task. "
        "Write a response that appropriately completes the request.\n\n"
        "### Instruction:\n"
        f"{instruction}\n\n"
        f"### Response: \n")
  result = pipe(input_prompt, max_length=200, top_p=0.9, temperature=0.9, num_return_sequences=1, return_full_text=False)[0]['generated_text']
  return result[:str(result).find("###")]

iface = gr.Interface(fn=generate_text, inputs=[gr.Textbox(label="Instruction"), 
                                               gr.Textbox(label="Additional Input")], 
                     outputs=gr.Textbox(label="Response"))
iface.launch()