Spaces:
Runtime error
Runtime error
robertselvam
commited on
Commit
•
10de3c4
1
Parent(s):
74ffdc6
Update app.py
Browse files
app.py
CHANGED
@@ -23,7 +23,6 @@ import re
|
|
23 |
nltk.download('punkt')
|
24 |
nltk.download('stopwords')
|
25 |
|
26 |
-
|
27 |
class VideoAnalytics:
|
28 |
"""
|
29 |
Class for performing analytics on videos including transcription, summarization, topic generation,
|
@@ -425,8 +424,7 @@ class VideoAnalytics:
|
|
425 |
return prompt+prompt1
|
426 |
|
427 |
|
428 |
-
def generate(self,
|
429 |
-
repetition_penalty=1.0) -> str:
|
430 |
"""
|
431 |
Generates text based on the prompt and transcribed text.
|
432 |
Args:
|
@@ -439,31 +437,36 @@ class VideoAnalytics:
|
|
439 |
Returns:
|
440 |
str: Generated text.
|
441 |
"""
|
442 |
-
try:
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
|
|
|
|
|
|
|
|
|
|
467 |
|
468 |
def video_qa(self, question: str, model: str) -> str:
|
469 |
"""
|
@@ -474,26 +477,26 @@ class VideoAnalytics:
|
|
474 |
Returns:
|
475 |
str: Answer to the user's question.
|
476 |
"""
|
477 |
-
try:
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
485 |
-
|
486 |
-
|
487 |
-
|
|
|
|
|
|
|
|
|
|
|
488 |
return result
|
489 |
-
|
490 |
-
|
491 |
-
|
492 |
-
result = self.generate(question,self.english_text)
|
493 |
-
return result
|
494 |
-
except Exception as e:
|
495 |
-
logging.error(f"Error in video question answering: {e}")
|
496 |
-
return "An error occurred during video question answering."
|
497 |
|
498 |
|
499 |
def write_text_files(self, text: str, filename: str) -> None:
|
@@ -658,7 +661,7 @@ class VideoAnalytics:
|
|
658 |
result = gr.Textbox(label='Answer',lines=10)
|
659 |
submit_btn.click(self.main,[video,yt_link,model_selection],[summary,Important_Sentences,Topics,summary_audio,important_sentence_audio,topics_audio])
|
660 |
question.submit(self.video_qa,[question,model],result)
|
661 |
-
demo.launch()
|
662 |
|
663 |
if __name__ == "__main__":
|
664 |
video_analytics = VideoAnalytics()
|
|
|
23 |
nltk.download('punkt')
|
24 |
nltk.download('stopwords')
|
25 |
|
|
|
26 |
class VideoAnalytics:
|
27 |
"""
|
28 |
Class for performing analytics on videos including transcription, summarization, topic generation,
|
|
|
424 |
return prompt+prompt1
|
425 |
|
426 |
|
427 |
+
def generate(self, question: str) -> str:
|
|
|
428 |
"""
|
429 |
Generates text based on the prompt and transcribed text.
|
430 |
Args:
|
|
|
437 |
Returns:
|
438 |
str: Generated text.
|
439 |
"""
|
440 |
+
# try:
|
441 |
+
temperature=0.9
|
442 |
+
max_new_tokens=5000
|
443 |
+
top_p=0.95
|
444 |
+
repetition_penalty=1.0
|
445 |
+
|
446 |
+
temperature = float(temperature)
|
447 |
+
if temperature < 1e-2:
|
448 |
+
temperature = 1e-2
|
449 |
+
top_p = float(top_p)
|
450 |
+
|
451 |
+
generate_kwargs = dict(
|
452 |
+
temperature=temperature,
|
453 |
+
max_new_tokens=max_new_tokens,
|
454 |
+
top_p=top_p,
|
455 |
+
repetition_penalty=repetition_penalty,
|
456 |
+
do_sample=True,
|
457 |
+
seed=42,
|
458 |
+
)
|
459 |
+
prompt = self.format_prompt(question, self.english_text)
|
460 |
+
# Generate text using the mistral client
|
461 |
+
stream = self.mistral_client.text_generation(prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
462 |
+
output = ""
|
463 |
+
# Concatenate generated text
|
464 |
+
for response in stream:
|
465 |
+
output += response.token.text
|
466 |
+
return output.replace("</s>","")
|
467 |
+
# except Exception as e:
|
468 |
+
# logging.error(f"Error in text generation: {e}")
|
469 |
+
# return "An error occurred during text generation."
|
470 |
|
471 |
def video_qa(self, question: str, model: str) -> str:
|
472 |
"""
|
|
|
477 |
Returns:
|
478 |
str: Answer to the user's question.
|
479 |
"""
|
480 |
+
# try:
|
481 |
+
if model == "OpenAI":
|
482 |
+
template = """you are the universal language expert .your task is analyze the given text and user ask any question about given text answer to the user question.otherwise reply i don't know.
|
483 |
+
english_text:{text}
|
484 |
+
user_question:{question}"""
|
485 |
+
|
486 |
+
prompt = PromptTemplate(template=template, input_variables=["text","question"])
|
487 |
+
llm_chain = LLMChain(prompt=prompt, verbose=True, llm=self.openai_llm)
|
488 |
+
|
489 |
+
# Run the language model chain
|
490 |
+
result = llm_chain.run({"text":self.english_text,"question":question})
|
491 |
+
return result
|
492 |
+
|
493 |
+
elif model == "Mixtral":
|
494 |
+
# Generate answer using Mixtral model
|
495 |
+
result = self.generate(question)
|
496 |
return result
|
497 |
+
# except Exception as e:
|
498 |
+
# logging.error(f"Error in video question answering: {e}")
|
499 |
+
# return "An error occurred during video question answering."
|
|
|
|
|
|
|
|
|
|
|
500 |
|
501 |
|
502 |
def write_text_files(self, text: str, filename: str) -> None:
|
|
|
661 |
result = gr.Textbox(label='Answer',lines=10)
|
662 |
submit_btn.click(self.main,[video,yt_link,model_selection],[summary,Important_Sentences,Topics,summary_audio,important_sentence_audio,topics_audio])
|
663 |
question.submit(self.video_qa,[question,model],result)
|
664 |
+
demo.launch(debug=True)
|
665 |
|
666 |
if __name__ == "__main__":
|
667 |
video_analytics = VideoAnalytics()
|