Spaces:
Sleeping
Sleeping
Komal-patra
commited on
Commit
•
c3599b6
1
Parent(s):
dea4866
Update app.py
Browse files
app.py
CHANGED
@@ -4,122 +4,99 @@ from huggingface_hub import login
|
|
4 |
from transformers import AutoModelForSeq2SeqLM, T5Tokenizer
|
5 |
from peft import PeftModel, PeftConfig
|
6 |
|
|
|
7 |
token = os.environ.get("token")
|
8 |
login(token)
|
9 |
-
print("
|
10 |
-
max_length=150
|
11 |
|
|
|
12 |
MODEL_NAME = "google/flan-t5-base"
|
13 |
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME, token=token)
|
14 |
config = PeftConfig.from_pretrained("Komal-patra/results")
|
15 |
base_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
|
16 |
model = PeftModel.from_pretrained(base_model, "Komal-patra/results")
|
17 |
|
18 |
-
#
|
19 |
-
|
20 |
def generate_text(prompt, max_length=150):
|
21 |
-
"""Generates text using the PEFT model.
|
22 |
-
Args:
|
23 |
-
prompt (str): The user-provided prompt to start the generation.
|
24 |
-
Returns:
|
25 |
-
str: The generated text.
|
26 |
-
"""
|
27 |
-
|
28 |
-
|
29 |
-
# Preprocess the prompt
|
30 |
-
# inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
31 |
inputs = tokenizer(prompt, return_tensors="pt")
|
32 |
-
|
33 |
-
# Generate text using beam search
|
34 |
outputs = model.generate(
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
print(outputs)
|
42 |
-
# Decode the generated tokens
|
43 |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
44 |
-
print("show the generated text", generated_text)
|
45 |
return generated_text
|
46 |
|
47 |
-
|
48 |
-
custom_css="""
|
49 |
.message.pending {
|
50 |
background: #A8C4D6;
|
51 |
}
|
52 |
/* Response message */
|
53 |
.message.bot.svelte-1s78gfg.message-bubble-border {
|
54 |
-
|
55 |
-
border-color: #266B99
|
56 |
}
|
57 |
/* User message */
|
58 |
-
.message.user.svelte-1s78gfg.message-bubble-border{
|
59 |
background: #9DDDF9;
|
60 |
-
border-color: #9DDDF9
|
61 |
-
|
62 |
}
|
63 |
/* For both user and response message as per the document */
|
64 |
span.md.svelte-8tpqd2.chatbot.prose p {
|
65 |
-
color: #266B99;
|
66 |
}
|
67 |
-
/* Chatbot
|
68 |
-
.gradio-container{
|
69 |
-
|
|
|
70 |
}
|
71 |
/* RED (Hex: #DB1616) for action buttons and links only */
|
72 |
.clear-btn {
|
73 |
-
|
74 |
-
|
75 |
}
|
76 |
-
/*
|
77 |
.submit-btn {
|
78 |
-
|
79 |
-
|
80 |
}
|
81 |
"""
|
82 |
|
83 |
-
|
84 |
with gr.Blocks(css=custom_css) as demo:
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
bot, chatbot, chatbot
|
121 |
-
)
|
122 |
-
|
123 |
-
clear.click(lambda: None, None, chatbot, queue=False)
|
124 |
-
|
125 |
-
demo.launch()
|
|
|
4 |
from transformers import AutoModelForSeq2SeqLM, T5Tokenizer
|
5 |
from peft import PeftModel, PeftConfig
|
6 |
|
7 |
+
# Hugging Face login
|
8 |
token = os.environ.get("token")
|
9 |
login(token)
|
10 |
+
print("Login is successful")
|
|
|
11 |
|
12 |
+
# Model and tokenizer setup
|
13 |
MODEL_NAME = "google/flan-t5-base"
|
14 |
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME, token=token)
|
15 |
config = PeftConfig.from_pretrained("Komal-patra/results")
|
16 |
base_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base")
|
17 |
model = PeftModel.from_pretrained(base_model, "Komal-patra/results")
|
18 |
|
19 |
+
# Text generation function
|
|
|
20 |
def generate_text(prompt, max_length=150):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
|
|
22 |
outputs = model.generate(
|
23 |
+
input_ids=inputs["input_ids"],
|
24 |
+
max_length=max_length,
|
25 |
+
num_beams=1,
|
26 |
+
repetition_penalty=2.2
|
27 |
+
)
|
|
|
|
|
|
|
28 |
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
|
|
29 |
return generated_text
|
30 |
|
31 |
+
# Custom CSS for the UI
|
32 |
+
custom_css = """
|
33 |
.message.pending {
|
34 |
background: #A8C4D6;
|
35 |
}
|
36 |
/* Response message */
|
37 |
.message.bot.svelte-1s78gfg.message-bubble-border {
|
38 |
+
border-color: #266B99;
|
|
|
39 |
}
|
40 |
/* User message */
|
41 |
+
.message.user.svelte-1s78gfg.message-bubble-border {
|
42 |
background: #9DDDF9;
|
43 |
+
border-color: #9DDDF9;
|
|
|
44 |
}
|
45 |
/* For both user and response message as per the document */
|
46 |
span.md.svelte-8tpqd2.chatbot.prose p {
|
47 |
+
color: #266B99;
|
48 |
}
|
49 |
+
/* Chatbot container */
|
50 |
+
.gradio-container {
|
51 |
+
background: #1c1c1c; /* Dark background */
|
52 |
+
color: white; /* Light text color */
|
53 |
}
|
54 |
/* RED (Hex: #DB1616) for action buttons and links only */
|
55 |
.clear-btn {
|
56 |
+
background: #DB1616;
|
57 |
+
color: white;
|
58 |
}
|
59 |
+
/* Primary colors are set to be used for all sorts */
|
60 |
.submit-btn {
|
61 |
+
background: #266B99;
|
62 |
+
color: white;
|
63 |
}
|
64 |
"""
|
65 |
|
66 |
+
# Gradio interface setup
|
67 |
with gr.Blocks(css=custom_css) as demo:
|
68 |
+
with gr.Row():
|
69 |
+
with gr.Column(scale=1):
|
70 |
+
gr.Markdown("<h2>My chats</h2>")
|
71 |
+
chat_topics = gr.Markdown("<!-- Dynamic content -->")
|
72 |
+
|
73 |
+
with gr.Column(scale=3):
|
74 |
+
gr.Markdown("<h1>Ask a question about the EU AI Act</h1>")
|
75 |
+
chatbot = gr.Chatbot()
|
76 |
+
msg = gr.Textbox(placeholder="Ask your question...", show_label=False) # Add placeholder text
|
77 |
+
submit_button = gr.Button("Submit", elem_classes="submit-btn")
|
78 |
+
clear = gr.Button("Clear", elem_classes="clear-btn")
|
79 |
+
|
80 |
+
def user(user_message, history):
|
81 |
+
return "", history + [[user_message, None]]
|
82 |
+
|
83 |
+
def bot(history):
|
84 |
+
if len(history) == 1: # Check if it's the first interaction
|
85 |
+
bot_message = "Hi there! How can I help you today?"
|
86 |
+
history[-1][1] = bot_message # Add welcome message to history
|
87 |
+
else:
|
88 |
+
history[-1][1] = "" # Clear the last bot message
|
89 |
+
previous_message = history[-1][0] # Access the previous user message
|
90 |
+
bot_message = generate_text(previous_message) # Generate response based on previous message
|
91 |
+
history[-1][1] = bot_message # Update the last bot message
|
92 |
+
return history
|
93 |
+
|
94 |
+
submit_button.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
95 |
+
bot, chatbot, chatbot
|
96 |
+
)
|
97 |
+
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
98 |
+
bot, chatbot, chatbot
|
99 |
+
)
|
100 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
101 |
+
|
102 |
+
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|