riotu-lab commited on
Commit
74d3b70
1 Parent(s): 636397f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -24
app.py CHANGED
@@ -1,36 +1,48 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
  import re
 
 
 
4
 
5
  def initialize_pipeline(token):
6
  """Initialize the Hugging Face pipeline with authentication token."""
7
- return pipeline("text-generation", model="riotu-lab/ArabianGPT-0.8B-IslamicQA", use_auth_token=token)
8
-
 
 
 
 
 
9
 
10
  def generate_response(token, question, penalty_alpha, do_sample, top_k, temperature, repetition_penalty, max_new_tokens):
11
  """Generate the response using the configured parameters and provided question."""
12
- # Initialize the pipeline with the token
13
- pipe = initialize_pipeline(token)
14
-
15
- # Apply the formatted prompt
16
- formatted_prompt_text = f"user\n{question}\nassistant:"
17
-
18
- # Generate the response using the model pipeline with specified configuration
19
- result = pipe(formatted_prompt_text,
20
- num_return_sequences=1,
21
- do_sample=do_sample,
22
- top_k=top_k,
23
- temperature=temperature,
24
- repetition_penalty=repetition_penalty,
25
- max_new_tokens=max_new_tokens)
 
26
 
27
- # Assuming the result is in the correct format, extract the generated text
28
- response_text = result[0]['generated_text'] if result else ""
29
-
30
- # Extract and clean the response
31
- cleaned_response = extract_and_clean_response(response_text)
32
 
33
- return cleaned_response
 
 
 
34
 
35
  def extract_and_clean_response(text: str) -> str:
36
  """Extracts the part after 'assistant:' and cleans unwanted patterns from it."""
@@ -64,6 +76,5 @@ iface = gr.Interface(
64
  live=False # Requires user to press "submit" to run
65
  )
66
 
67
-
68
  # Launch the app
69
  iface.launch()
 
1
  import gradio as gr
2
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
  import re
4
+ import logging
5
+
6
+ logging.basicConfig(level=logging.INFO)
7
 
8
  def initialize_pipeline(token):
9
  """Initialize the Hugging Face pipeline with authentication token."""
10
+ try:
11
+ tokenizer = AutoTokenizer.from_pretrained("riotu-lab/ArabianGPT-0.8B-IslamicQA", use_auth_token=token)
12
+ model = AutoModelForCausalLM.from_pretrained("riotu-lab/ArabianGPT-0.8B-IslamicQA", use_auth_token=token)
13
+ return pipeline("text-generation", model=model, tokenizer=tokenizer)
14
+ except Exception as e:
15
+ logging.error(f"Failed to load model with token. Error: {str(e)}")
16
+ raise
17
 
18
  def generate_response(token, question, penalty_alpha, do_sample, top_k, temperature, repetition_penalty, max_new_tokens):
19
  """Generate the response using the configured parameters and provided question."""
20
+ try:
21
+ # Initialize the pipeline with the token
22
+ pipe = initialize_pipeline(token)
23
+
24
+ # Apply the formatted prompt
25
+ formatted_prompt_text = f"user\n{question}\nassistant:"
26
+
27
+ # Generate the response using the model pipeline with specified configuration
28
+ result = pipe(formatted_prompt_text,
29
+ num_return_sequences=1,
30
+ do_sample=do_sample,
31
+ top_k=top_k,
32
+ temperature=temperature,
33
+ repetition_penalty=repetition_penalty,
34
+ max_new_tokens=max_new_tokens)
35
 
36
+ # Assuming the result is in the correct format, extract the generated text
37
+ response_text = result[0]['generated_text'] if result else ""
38
+
39
+ # Extract and clean the response
40
+ cleaned_response = extract_and_clean_response(response_text)
41
 
42
+ return cleaned_response
43
+ except Exception as e:
44
+ logging.error(f"An error occurred during generation: {str(e)}")
45
+ return "An error occurred. Please check your input and try again."
46
 
47
  def extract_and_clean_response(text: str) -> str:
48
  """Extracts the part after 'assistant:' and cleans unwanted patterns from it."""
 
76
  live=False # Requires user to press "submit" to run
77
  )
78
 
 
79
  # Launch the app
80
  iface.launch()