Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,36 +1,48 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import pipeline
|
3 |
import re
|
|
|
|
|
|
|
4 |
|
5 |
def initialize_pipeline(token):
|
6 |
"""Initialize the Hugging Face pipeline with authentication token."""
|
7 |
-
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def generate_response(token, question, penalty_alpha, do_sample, top_k, temperature, repetition_penalty, max_new_tokens):
|
11 |
"""Generate the response using the configured parameters and provided question."""
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
26 |
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
|
33 |
-
|
|
|
|
|
|
|
34 |
|
35 |
def extract_and_clean_response(text: str) -> str:
|
36 |
"""Extracts the part after 'assistant:' and cleans unwanted patterns from it."""
|
@@ -64,6 +76,5 @@ iface = gr.Interface(
|
|
64 |
live=False # Requires user to press "submit" to run
|
65 |
)
|
66 |
|
67 |
-
|
68 |
# Launch the app
|
69 |
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
3 |
import re
|
4 |
+
import logging
|
5 |
+
|
6 |
+
logging.basicConfig(level=logging.INFO)
|
7 |
|
8 |
def initialize_pipeline(token):
|
9 |
"""Initialize the Hugging Face pipeline with authentication token."""
|
10 |
+
try:
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained("riotu-lab/ArabianGPT-0.8B-IslamicQA", use_auth_token=token)
|
12 |
+
model = AutoModelForCausalLM.from_pretrained("riotu-lab/ArabianGPT-0.8B-IslamicQA", use_auth_token=token)
|
13 |
+
return pipeline("text-generation", model=model, tokenizer=tokenizer)
|
14 |
+
except Exception as e:
|
15 |
+
logging.error(f"Failed to load model with token. Error: {str(e)}")
|
16 |
+
raise
|
17 |
|
18 |
def generate_response(token, question, penalty_alpha, do_sample, top_k, temperature, repetition_penalty, max_new_tokens):
|
19 |
"""Generate the response using the configured parameters and provided question."""
|
20 |
+
try:
|
21 |
+
# Initialize the pipeline with the token
|
22 |
+
pipe = initialize_pipeline(token)
|
23 |
+
|
24 |
+
# Apply the formatted prompt
|
25 |
+
formatted_prompt_text = f"user\n{question}\nassistant:"
|
26 |
+
|
27 |
+
# Generate the response using the model pipeline with specified configuration
|
28 |
+
result = pipe(formatted_prompt_text,
|
29 |
+
num_return_sequences=1,
|
30 |
+
do_sample=do_sample,
|
31 |
+
top_k=top_k,
|
32 |
+
temperature=temperature,
|
33 |
+
repetition_penalty=repetition_penalty,
|
34 |
+
max_new_tokens=max_new_tokens)
|
35 |
|
36 |
+
# Assuming the result is in the correct format, extract the generated text
|
37 |
+
response_text = result[0]['generated_text'] if result else ""
|
38 |
+
|
39 |
+
# Extract and clean the response
|
40 |
+
cleaned_response = extract_and_clean_response(response_text)
|
41 |
|
42 |
+
return cleaned_response
|
43 |
+
except Exception as e:
|
44 |
+
logging.error(f"An error occurred during generation: {str(e)}")
|
45 |
+
return "An error occurred. Please check your input and try again."
|
46 |
|
47 |
def extract_and_clean_response(text: str) -> str:
|
48 |
"""Extracts the part after 'assistant:' and cleans unwanted patterns from it."""
|
|
|
76 |
live=False # Requires user to press "submit" to run
|
77 |
)
|
78 |
|
|
|
79 |
# Launch the app
|
80 |
iface.launch()
|