mooncellar commited on
Commit
93c1ef4
1 Parent(s): 71dcbeb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -6
app.py CHANGED
@@ -1,13 +1,19 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- model = AutoModelForCausalLM.from_pretrained("mlabonne/Meta-Llama-3.1-8B-Instruct-abliterated", device_map="auto")
5
- tokenizer = AutoTokenizer.from_pretrained("mlabonne/Meta-Llama-3.1-8B-Instruct-abliterated")
 
 
 
 
6
 
7
  def generate_text(prompt):
8
- input_ids = tokenizer.encode(prompt, return_tensors="pt")
9
- output = model.generate(input_ids, max_length=200)
10
- return tokenizer.decode(output[0], skip_special_tokens=True)
 
 
11
 
12
  iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
13
  iface.launch()
 
1
  import gradio as gr
2
+ import requests
3
 
4
+ API_URL = "https://api-inference.huggingface.co/models/mlabonne/Meta-Llama-3.1-8B-Instruct-abliterated"
5
+ headers = {"Authorization": "Bearer hf_..."} # Replace with your Hugging Face API token
6
+
7
+ def query(payload):
8
+ response = requests.post(API_URL, headers=headers, json=payload)
9
+ return response.json()
10
 
11
  def generate_text(prompt):
12
+ output = query({
13
+ "inputs": prompt,
14
+ "parameters": {"max_new_tokens": 200}
15
+ })
16
+ return output[0]['generated_text']
17
 
18
  iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
19
  iface.launch()