Spaces:
Sleeping
Sleeping
ajaynagotha
commited on
Commit
•
b7473ec
1
Parent(s):
a09cbc2
Update app.py
Browse files
app.py
CHANGED
@@ -35,6 +35,12 @@ except Exception as e:
|
|
35 |
logger.error(f"Error loading model or tokenizer: {str(e)}")
|
36 |
sys.exit(1)
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
def answer_question(question, system_prompt, temperature, max_new_tokens, top_p, frequency_penalty, presence_penalty, top_k, echo, best_of):
|
39 |
logger.info(f"Received question: {question}")
|
40 |
logger.info(f"Parameters: temp={temperature}, max_tokens={max_new_tokens}, top_p={top_p}, freq_penalty={frequency_penalty}, pres_penalty={presence_penalty}, top_k={top_k}, echo={echo}, best_of={best_of}")
|
@@ -55,9 +61,14 @@ def answer_question(question, system_prompt, temperature, max_new_tokens, top_p,
|
|
55 |
logger.info("Processing output to get answer")
|
56 |
answer_start = torch.argmax(outputs.start_logits)
|
57 |
answer_end = torch.argmax(outputs.end_logits) + 1
|
58 |
-
|
|
|
59 |
logger.info(f"Generated answer: {answer}")
|
60 |
|
|
|
|
|
|
|
|
|
61 |
disclaimer = "\n\nPlease note: This response is generated by an AI model based on the Bhagavad Gita. For authoritative information, please consult the original text or scholarly sources."
|
62 |
full_response = answer + disclaimer
|
63 |
logger.info("Answer generated successfully")
|
|
|
35 |
logger.error(f"Error loading model or tokenizer: {str(e)}")
|
36 |
sys.exit(1)
|
37 |
|
38 |
+
def clean_answer(answer):
|
39 |
+
# Remove special tokens and leading/trailing whitespace
|
40 |
+
special_tokens = set(tokenizer.all_special_tokens)
|
41 |
+
cleaned_answer = ' '.join(token for token in answer.split() if token not in special_tokens)
|
42 |
+
return cleaned_answer.strip()
|
43 |
+
|
44 |
def answer_question(question, system_prompt, temperature, max_new_tokens, top_p, frequency_penalty, presence_penalty, top_k, echo, best_of):
|
45 |
logger.info(f"Received question: {question}")
|
46 |
logger.info(f"Parameters: temp={temperature}, max_tokens={max_new_tokens}, top_p={top_p}, freq_penalty={frequency_penalty}, pres_penalty={presence_penalty}, top_k={top_k}, echo={echo}, best_of={best_of}")
|
|
|
61 |
logger.info("Processing output to get answer")
|
62 |
answer_start = torch.argmax(outputs.start_logits)
|
63 |
answer_end = torch.argmax(outputs.end_logits) + 1
|
64 |
+
raw_answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(inputs["input_ids"][0][answer_start:answer_end]))
|
65 |
+
answer = clean_answer(raw_answer)
|
66 |
logger.info(f"Generated answer: {answer}")
|
67 |
|
68 |
+
if not answer:
|
69 |
+
logger.warning("Generated answer was empty after cleaning")
|
70 |
+
answer = "I'm sorry, but I couldn't find a specific answer to that question based on the Bhagavad Gita. Could you please rephrase your question or ask about one of the core concepts like dharma, karma, bhakti, or the different types of yoga discussed in the Gita?"
|
71 |
+
|
72 |
disclaimer = "\n\nPlease note: This response is generated by an AI model based on the Bhagavad Gita. For authoritative information, please consult the original text or scholarly sources."
|
73 |
full_response = answer + disclaimer
|
74 |
logger.info("Answer generated successfully")
|