File size: 3,897 Bytes
2373d11 ff85ed3 2373d11 6669a8d 2373d11 086e901 2373d11 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
import gradio as gr
import os
from huggingface_hub import login
from transformers import AutoModelForSeq2SeqLM, T5Tokenizer
from peft import PeftModel, PeftConfig
token = os.environ.get("token")
login(token)
print("login is succesful")
max_length=512
MODEL_NAME = "google/flan-t5-base"
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME, token=token)
config = PeftConfig.from_pretrained("Orcawise/eu_ai_act_orcawise_july12")
base_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
model = PeftModel.from_pretrained(base_model, "Orcawise/eu_ai_act_orcawise_july12")
#gr.Interface.from_pipeline(pipe).launch()
def generate_text(prompt):
"""Generates text using the PEFT model.
Args:
prompt (str): The user-provided prompt to start the generation.
Returns:
str: The generated text.
"""
# Preprocess the prompt
# inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
inputs = tokenizer(prompt, return_tensors="pt")
# Generate text using beam search
outputs = model.generate(
input_ids = inputs["input_ids"],
max_length=max_length,
num_beams=3,
repetition_penalty=2.2,
)
print("show the output", outputs)
# Decode the generated tokens
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
print("show the generated text", generated_text)
return generated_text
#############
custom_css="""
.message.pending {
background: #A8C4D6;
}
/* Response message */
.message.bot.svelte-1s78gfg.message-bubble-border {
/* background: white; */
border-color: #266B99
}
/* User message */
.message.user.svelte-1s78gfg.message-bubble-border{
background: #9DDDF9;
border-color: #9DDDF9
}
/* For both user and response message as per the document */
span.md.svelte-8tpqd2.chatbot.prose p {
color: #266B99;
}
/* Chatbot comtainer */
.gradio-container{
/* background: #84D5F7 */
}
/* RED (Hex: #DB1616) for action buttons and links only */
.clear-btn {
background: #DB1616;
color: white;
}
/* #84D5F7 - Primary colours are set to be used for all sorts */
.submit-btn {
background: #266B99;
color: white;
}
"""
### working correctly but the welcoming message isnt rendering
with gr.Blocks(css=custom_css) as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox(placeholder="Ask your question...") # Add placeholder text
submit_button = gr.Button("Submit", elem_classes="submit-btn")
clear = gr.Button("Clear", elem_classes="clear-btn")
def user(user_message, history):
return "", history + [[user_message, None]]
def bot(history):
history[-1][1] = "" # Update the last bot message (welcome message or response)
if len(history) < 0: # Check if it's the first interaction
bot_message = "Hi there! How can I help you today?"
history.append([None, bot_message]) # Add welcome message to history
for character in bot_message:
history[-1][1] += character
yield history # Yield the updated history character by character
else:
previous_message = history[-1][0] # Access the previous user message
bot_message = generate_text(previous_message) # Generate response based on previous message
for character in bot_message:
history[-1][1] += character
yield history # Yield the updated history character by character
# Connect submit button to user and then bot functions
submit_button.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
# Trigger user function on Enter key press (same chain as submit button)
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
clear.click(lambda: None, None, chatbot, queue=False)
demo.launch() |