Spaces:
Sleeping
Sleeping
from crewai import Task, Agent, Crew, Process | |
from langchain.tools import tool, Tool | |
import re | |
import os | |
from langchain_groq import ChatGroq | |
# llm = ChatGroq(model='mixtral-8x7b-32768', temperature=0.6, max_tokens=2048) | |
llm = ChatGroq(model='llama3-70b-8192', temperature=0.6, max_tokens=1024, api_key='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') | |
from langchain_community.tools import WikipediaQueryRun | |
from langchain_community.utilities import WikipediaAPIWrapper | |
from langchain_core.pydantic_v1 import BaseModel, Field | |
import requests | |
# import pyttsx3 | |
import io | |
import tempfile | |
from gtts import gTTS | |
from pydub import AudioSegment | |
from groq import Groq | |
import cv2 | |
import numpy as np | |
from PIL import Image, ImageDraw, ImageFont | |
from moviepy.editor import VideoFileClip, AudioFileClip, concatenate_videoclips, ImageClip | |
def process_script(script): | |
"""Used to process the script into dictionary format""" | |
dict = {} | |
text_for_image_generation = re.findall(r'<image>(.*?)</?image>', script, re.DOTALL) | |
text_for_speech_generation = re.findall(r'<narration>(.*?)</?narration>', script, re.DOTALL) | |
dict['text_for_image_generation'] = text_for_image_generation | |
dict['text_for_speech_generation'] = text_for_speech_generation | |
return dict | |
def image_generator(script): | |
"""Generates images for the given script. | |
Saves it to images_dir and return path | |
Args: | |
script: a complete script containing narrations and image descriptions | |
Returns: | |
A list of images in bytes format. | |
""" | |
# images_dir = './outputs/images' | |
# for filename in os.listdir(images_dir): | |
# file_path = os.path.join(images_dir, filename) | |
# if os.path.isfile(file_path): | |
# os.remove(file_path) | |
dict = process_script(script) | |
images_list = [] | |
for i, text in enumerate(dict['text_for_image_generation']): | |
response = requests.post( | |
f"https://api.stability.ai/v2beta/stable-image/generate/core", | |
headers={ | |
"authorization": f'sk-2h9CmjC33uxc9W8fmx23oIicgqHk2jVtBF9KoEfdyTUIfODt', | |
"accept": "image/*" | |
}, | |
files={"none": ''}, | |
data={ | |
"prompt": text, | |
"output_format": "png", | |
'aspect_ratio': "9:16", | |
}, | |
) | |
print('image generated') | |
if response.status_code == 200: | |
images_list.append(response.content) | |
else: | |
raise Exception(str(response.json())) | |
return images_list | |
def generate_speech(script, lang='en', speed=1.2, max_segments=2): | |
""" | |
Generates speech for the given script using gTTS and adjusts the speed. | |
Args: | |
script (str): The script containing narration segments. | |
lang (str, optional): The language code (default is 'en' for English). | |
speed (float, optional): The speed factor of speech generation (default is 1.0). | |
max_segments (int, optional): Maximum number of speech segments to generate (default is 2). | |
Returns: | |
list: List of generated speech segments as bytes. | |
""" | |
dict = process_script(script) | |
speeches_list = [] | |
# Ensure we limit the number of segments processed | |
segments_to_process = min(max_segments, len(dict['text_for_speech_generation'])) | |
for text in dict['text_for_speech_generation'][:segments_to_process]: | |
# Generate speech | |
tts = gTTS(text=text, lang=lang) | |
# Save speech to BytesIO | |
speech_data = io.BytesIO() | |
tts.write_to_fp(speech_data) | |
speech_data.seek(0) | |
# Adjust speed if necessary | |
if speed != 1.0: | |
audio_segment = AudioSegment.from_file(speech_data, format="mp3") | |
audio_segment = audio_segment.speedup(playback_speed=speed) | |
speech_data = io.BytesIO() | |
audio_segment.export(speech_data, format="mp3") | |
speech_data.seek(0) | |
speeches_list.append(speech_data.read()) | |
return speeches_list | |
def split_text_into_chunks(text, chunk_size): | |
words = text.split() | |
return [' '.join(words[i:i + chunk_size]) for i in range(0, len(words), chunk_size)] | |
def add_text_to_video(input_video, output_video, text, duration=1, fontsize=40, fontcolor=(255, 255, 255), | |
outline_thickness=2, outline_color=(0, 0, 0), delay_between_chunks=0.1, | |
font_path=os.path.join(os.path.dirname(os.path.abspath(__name__)),'Montserrat-Bold.ttf')): | |
chunks = split_text_into_chunks(text, 3) # Adjust chunk size as needed | |
cap = cv2.VideoCapture(input_video) | |
fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
fps = int(cap.get(cv2.CAP_PROP_FPS)) | |
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
out = cv2.VideoWriter(output_video, fourcc, fps, (width, height)) | |
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) | |
chunk_duration_frames = duration * fps | |
delay_frames = int(delay_between_chunks * fps) | |
font = ImageFont.truetype(font_path, fontsize) | |
current_frame = 0 | |
while cap.isOpened(): | |
ret, frame = cap.read() | |
if not ret: | |
break | |
frame_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) | |
draw = ImageDraw.Draw(frame_pil) | |
chunk_index = current_frame // (chunk_duration_frames + delay_frames) | |
if current_frame % (chunk_duration_frames + delay_frames) < chunk_duration_frames and chunk_index < len(chunks): | |
chunk = chunks[chunk_index] | |
text_bbox = draw.textbbox((0, 0), chunk, font=font) | |
text_width, text_height = text_bbox[2] - text_bbox[0], text_bbox[3] - text_bbox[1] | |
text_x = (width - text_width) // 2 | |
text_y = height - 400 # Position text at the bottom | |
if text_width > width: | |
words = chunk.split() | |
half = len(words) // 2 | |
line1 = ' '.join(words[:half]) | |
line2 = ' '.join(words[half:]) | |
text_size_line1 = draw.textsize(line1, font=font) | |
text_size_line2 = draw.textsize(line2, font=font) | |
text_x_line1 = (width - text_size_line1[0]) // 2 | |
text_x_line2 = (width - text_size_line2[0]) // 2 | |
text_y = height - 250 - text_size_line1[1] # Adjust vertical position for two lines | |
for dx in range(-outline_thickness, outline_thickness + 1): | |
for dy in range(-outline_thickness, outline_thickness + 1): | |
if dx != 0 or dy != 0: | |
draw.text((text_x_line1 + dx, text_y + dy), line1, font=font, fill=outline_color) | |
draw.text((text_x_line2 + dx, text_y + text_size_line1[1] + dy), line2, font=font, fill=outline_color) | |
draw.text((text_x_line1, text_y), line1, font=font, fill=fontcolor) | |
draw.text((text_x_line2, text_y + text_size_line1[1]), line2, font=font, fill=fontcolor) | |
else: | |
for dx in range(-outline_thickness, outline_thickness + 1): | |
for dy in range(-outline_thickness, outline_thickness + 1): | |
if dx != 0 or dy != 0: | |
draw.text((text_x + dx, text_y + dy), chunk, font=font, fill=outline_color) | |
draw.text((text_x, text_y), chunk, font=font, fill=fontcolor) | |
frame = cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR) | |
out.write(frame) | |
current_frame += 1 | |
cap.release() | |
out.release() | |
cv2.destroyAllWindows() | |
def apply_zoom_in_effect(clip, zoom_factor=1.2): | |
width, height = clip.size | |
duration = clip.duration | |
def zoom_in_effect(get_frame, t): | |
frame = get_frame(t) | |
zoom = 1 + (zoom_factor - 1) * (t / duration) | |
new_width, new_height = int(width * zoom), int(height * zoom) | |
resized_frame = cv2.resize(frame, (new_width, new_height)) | |
x_start = (new_width - width) // 2 | |
y_start = (new_height - height) // 2 | |
cropped_frame = resized_frame[y_start:y_start + height, x_start:x_start + width] | |
return cropped_frame | |
return clip.fl(zoom_in_effect, apply_to=['mask']) | |
def create_video_from_images_and_audio(images, speeches, zoom_factor=1.2): | |
"""Creates video using images and audios. | |
Args: | |
images: list of images in bytes format | |
speeches: list of speeches in bytes format""" | |
clips = [] | |
temp_files = [] | |
for i in range(min(len(images), len(speeches))): | |
# Save image to a temporary file | |
img_path = f"./temp_image_{i}.png" | |
with open(img_path, 'wb') as img_file: | |
img_file.write(images[i]) | |
# Create an ImageClip | |
img_clip = ImageClip(img_path) | |
# Save audio to a temporary file | |
audio_path = f"./temp_audio_{i}.mp3" | |
with open(audio_path, 'wb') as audio_file: | |
audio_file.write(speeches[i]) | |
# Create an AudioClip | |
audioclip = AudioFileClip(audio_path) | |
# Set the duration of the video clip to match the audio duration | |
videoclip = img_clip.set_duration(audioclip.duration) | |
zoomed_clip = apply_zoom_in_effect(videoclip, zoom_factor) | |
# Generate captions using the text for speech generation | |
caption = process_script(script)['text_for_speech_generation'][i] | |
temp_video_path = f"./temp_zoomed_{i}.mp4" | |
zoomed_clip.write_videofile(temp_video_path, codec='libx264', fps=24) | |
temp_files.append(temp_video_path) | |
final_video_path = f"./temp_captioned_{i}.mp4" | |
add_text_to_video(temp_video_path, final_video_path, caption, duration=1, fontsize=60) | |
temp_files.append(final_video_path) | |
final_clip = VideoFileClip(final_video_path) | |
final_clip = final_clip.set_audio(audioclip) | |
clips.append(final_clip) | |
final_clip = concatenate_videoclips(clips) | |
final_clip.write_videofile("./final_video.mp4", codec='libx264', fps=24) | |
# Close all video files properly | |
for clip in clips: | |
clip.close() | |
# Remove all temporary files | |
# for temp_file in temp_files: | |
# try: | |
# os.remove(temp_file) | |
# except Exception as e: | |
# print(f"Error removing file {temp_file}: {e}") | |
return "./final_video.mp4" | |
class WikiInputs(BaseModel): | |
"""Inputs to the wikipedia tool.""" | |
query: str = Field(description="query to look up in Wikipedia, should be 3 or less words") | |
api_wrapper = WikipediaAPIWrapper(top_k_results=3)#, doc_content_chars_max=100) | |
wiki_tool = WikipediaQueryRun( | |
name="wiki-tool", | |
description="{query:'input here'}", | |
args_schema=WikiInputs, | |
api_wrapper=api_wrapper, | |
return_direct=True, | |
) | |
wiki = Tool( | |
name = 'wikipedia', | |
func = wiki_tool.run, | |
description= "{query:'input here'}" | |
) | |