Spaces:
Runtime error
Runtime error
File size: 3,238 Bytes
a1ccbf7 248cc1f 0607ab6 248cc1f 8aa1219 248cc1f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 |
import gradio as gr
import os
from huggingface_hub import login
token = os.environ.get("token")
login(token)
####
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel, PeftConfig
from transformers import AutoTokenizer, pipeline
# Assuming you have it stored securely
model_name="Ikeofai/gemma-2b-for-python-v2"
max_length=200
tokenizer = AutoTokenizer.from_pretrained(model_name)
config = PeftConfig.from_pretrained(model_name,token=token)
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
model = PeftModel.from_pretrained(model, model_name)
#testing
#tokenizer = AutoTokenizer.from_pretrained("Orcawise/eu-ai-act-align", use_fast=True,max_length=200)
#pipe = pipeline("text2text-generation", model=model,tokenizer=tokenizer)
#pipe = pipeline("conversational", model="google/vit-base-patch16-224")
#gr.Interface.from_pipeline(pipe).launch()
def generate_text(prompt):
"""Generates text using the PEFT model.
Args:
prompt (str): The user-provided prompt to start the generation.
Returns:
str: The generated text.
"""
# Preprocess the prompt
input_ids = tokenizer(prompt, return_tensors="pt")["input_ids"]
# Generate text using beam search
output = model.generate(
input_ids=input_ids,
max_length=max_length,
num_beams=1, # Adjust num_beams for better quality (may increase processing time)
)
# Decode the generated tokens
generated_text = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
return generated_text
#############
### working correctly but the welcoming message isnt rendering
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox(placeholder="Ask your question...") # Add placeholder text
submit_button = gr.Button("Submit")
clear = gr.Button("Clear")
def user(user_message, history):
return "", history + [[user_message, None]]
def bot(history):
history[-1][1] = "" # Update the last bot message (welcome message or response)
if len(history) < 0: # Check if it's the first interaction
bot_message = "Hi there! How can I help you today?"
history.append([None, bot_message]) # Add welcome message to history
for character in bot_message:
history[-1][1] += character
yield history # Yield the updated history character by character
else:
previous_message = history[-1][0] # Access the previous user message
bot_message = generate_text(previous_message) # Generate response based on previous message
for character in bot_message:
history[-1][1] += character
yield history # Yield the updated history character by character
# Connect submit button to user and then bot functions
submit_button.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
# Trigger user function on Enter key press (same chain as submit button)
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
clear.click(lambda: None, None, chatbot, queue=False)
demo.launch() |