imseldrith commited on
Commit
b556ac0
1 Parent(s): 3ff7f2c

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. DeepFakeAI/__pycache__/__init__.cpython-310.pyc +0 -0
  2. DeepFakeAI/__pycache__/capturer.cpython-310.pyc +0 -0
  3. DeepFakeAI/__pycache__/choices.cpython-310.pyc +0 -0
  4. DeepFakeAI/__pycache__/core.cpython-310.pyc +0 -0
  5. DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc +0 -0
  6. DeepFakeAI/__pycache__/face_reference.cpython-310.pyc +0 -0
  7. DeepFakeAI/__pycache__/globals.cpython-310.pyc +0 -0
  8. DeepFakeAI/__pycache__/metadata.cpython-310.pyc +0 -0
  9. DeepFakeAI/__pycache__/predictor.cpython-310.pyc +0 -0
  10. DeepFakeAI/__pycache__/typing.cpython-310.pyc +0 -0
  11. DeepFakeAI/__pycache__/utilities.cpython-310.pyc +0 -0
  12. DeepFakeAI/__pycache__/wording.cpython-310.pyc +0 -0
  13. DeepFakeAI/capturer.py +1 -1
  14. DeepFakeAI/choices.py +1 -1
  15. DeepFakeAI/core.py +69 -69
  16. DeepFakeAI/face_analyser.py +9 -9
  17. DeepFakeAI/face_reference.py +1 -1
  18. DeepFakeAI/globals.py +1 -1
  19. DeepFakeAI/metadata.py +1 -1
  20. DeepFakeAI/predictor.py +1 -1
  21. DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc +0 -0
  22. DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc +0 -0
  23. DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc +0 -0
  24. DeepFakeAI/processors/frame/core.py +11 -11
  25. DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  26. DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc +0 -0
  27. DeepFakeAI/processors/frame/modules/face_enhancer.py +10 -10
  28. DeepFakeAI/processors/frame/modules/face_swapper.py +21 -21
  29. DeepFakeAI/processors/frame/modules/frame_enhancer.py +4 -4
  30. DeepFakeAI/uis/__pycache__/__init__.cpython-310.pyc +0 -0
  31. DeepFakeAI/uis/__pycache__/core.cpython-310.pyc +0 -0
  32. DeepFakeAI/uis/__pycache__/typing.cpython-310.pyc +0 -0
  33. DeepFakeAI/uis/components/__pycache__/__init__.cpython-310.pyc +0 -0
  34. DeepFakeAI/uis/components/__pycache__/about.cpython-310.pyc +0 -0
  35. DeepFakeAI/uis/components/__pycache__/execution.cpython-310.pyc +0 -0
  36. DeepFakeAI/uis/components/__pycache__/face_analyser.cpython-310.pyc +0 -0
  37. DeepFakeAI/uis/components/__pycache__/face_selector.cpython-310.pyc +0 -0
  38. DeepFakeAI/uis/components/__pycache__/output.cpython-310.pyc +0 -0
  39. DeepFakeAI/uis/components/__pycache__/output_settings.cpython-310.pyc +0 -0
  40. DeepFakeAI/uis/components/__pycache__/preview.cpython-310.pyc +0 -0
  41. DeepFakeAI/uis/components/__pycache__/processors.cpython-310.pyc +0 -0
  42. DeepFakeAI/uis/components/__pycache__/settings.cpython-310.pyc +0 -0
  43. DeepFakeAI/uis/components/__pycache__/source.cpython-310.pyc +0 -0
  44. DeepFakeAI/uis/components/__pycache__/target.cpython-310.pyc +0 -0
  45. DeepFakeAI/uis/components/__pycache__/temp_frame.cpython-310.pyc +0 -0
  46. DeepFakeAI/uis/components/__pycache__/trim_frame.cpython-310.pyc +0 -0
  47. DeepFakeAI/uis/components/about.py +1 -1
  48. DeepFakeAI/uis/components/benchmark.py +13 -13
  49. DeepFakeAI/uis/components/execution.py +12 -12
  50. DeepFakeAI/uis/components/face_analyser.py +13 -13
DeepFakeAI/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/__pycache__/__init__.cpython-310.pyc and b/DeepFakeAI/__pycache__/__init__.cpython-310.pyc differ
 
DeepFakeAI/__pycache__/capturer.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/__pycache__/capturer.cpython-310.pyc and b/DeepFakeAI/__pycache__/capturer.cpython-310.pyc differ
 
DeepFakeAI/__pycache__/choices.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/__pycache__/choices.cpython-310.pyc and b/DeepFakeAI/__pycache__/choices.cpython-310.pyc differ
 
DeepFakeAI/__pycache__/core.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/__pycache__/core.cpython-310.pyc and b/DeepFakeAI/__pycache__/core.cpython-310.pyc differ
 
DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc and b/DeepFakeAI/__pycache__/face_analyser.cpython-310.pyc differ
 
DeepFakeAI/__pycache__/face_reference.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/__pycache__/face_reference.cpython-310.pyc and b/DeepFakeAI/__pycache__/face_reference.cpython-310.pyc differ
 
DeepFakeAI/__pycache__/globals.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/__pycache__/globals.cpython-310.pyc and b/DeepFakeAI/__pycache__/globals.cpython-310.pyc differ
 
DeepFakeAI/__pycache__/metadata.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/__pycache__/metadata.cpython-310.pyc and b/DeepFakeAI/__pycache__/metadata.cpython-310.pyc differ
 
DeepFakeAI/__pycache__/predictor.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/__pycache__/predictor.cpython-310.pyc and b/DeepFakeAI/__pycache__/predictor.cpython-310.pyc differ
 
DeepFakeAI/__pycache__/typing.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/__pycache__/typing.cpython-310.pyc and b/DeepFakeAI/__pycache__/typing.cpython-310.pyc differ
 
DeepFakeAI/__pycache__/utilities.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/__pycache__/utilities.cpython-310.pyc and b/DeepFakeAI/__pycache__/utilities.cpython-310.pyc differ
 
DeepFakeAI/__pycache__/wording.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/__pycache__/wording.cpython-310.pyc and b/DeepFakeAI/__pycache__/wording.cpython-310.pyc differ
 
DeepFakeAI/capturer.py CHANGED
@@ -1,7 +1,7 @@
1
  from typing import Optional
2
  import cv2
3
 
4
- from facefusion.typing import Frame
5
 
6
 
7
  def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[Frame]:
 
1
  from typing import Optional
2
  import cv2
3
 
4
+ from DeepFakeAI.typing import Frame
5
 
6
 
7
  def get_video_frame(video_path : str, frame_number : int = 0) -> Optional[Frame]:
DeepFakeAI/choices.py CHANGED
@@ -1,6 +1,6 @@
1
  from typing import List
2
 
3
- from facefusion.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder
4
 
5
  face_recognition : List[FaceRecognition] = [ 'reference', 'many' ]
6
  face_analyser_direction : List[FaceAnalyserDirection] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small']
 
1
  from typing import List
2
 
3
+ from DeepFakeAI.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat, OutputVideoEncoder
4
 
5
  face_recognition : List[FaceRecognition] = [ 'reference', 'many' ]
6
  face_analyser_direction : List[FaceAnalyserDirection] = [ 'left-right', 'right-left', 'top-bottom', 'bottom-top', 'small-large', 'large-small']
DeepFakeAI/core.py CHANGED
@@ -15,12 +15,12 @@ import argparse
15
  import onnxruntime
16
  import tensorflow
17
 
18
- import facefusion.choices
19
- import facefusion.globals
20
- from facefusion import wording, metadata
21
- from facefusion.predictor import predict_image, predict_video
22
- from facefusion.processors.frame.core import get_frame_processors_modules
23
- from facefusion.utilities import is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, normalize_output_path, list_module_names, decode_execution_providers, encode_execution_providers
24
 
25
  warnings.filterwarnings('ignore', category = FutureWarning, module = 'insightface')
26
  warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
@@ -32,23 +32,23 @@ def parse_args() -> None:
32
  program.add_argument('-s', '--source', help = wording.get('source_help'), dest = 'source_path')
33
  program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path')
34
  program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path')
35
- program.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(list_module_names('facefusion/processors/frame/modules'))), dest = 'frame_processors', default = ['face_swapper'], nargs='+')
36
- program.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('facefusion/uis/layouts'))), dest = 'ui_layouts', default = ['default'], nargs='+')
37
  program.add_argument('--keep-fps', help = wording.get('keep_fps_help'), dest = 'keep_fps', action='store_true')
38
  program.add_argument('--keep-temp', help = wording.get('keep_temp_help'), dest = 'keep_temp', action='store_true')
39
  program.add_argument('--skip-audio', help = wording.get('skip_audio_help'), dest = 'skip_audio', action='store_true')
40
- program.add_argument('--face-recognition', help = wording.get('face_recognition_help'), dest = 'face_recognition', default = 'reference', choices = facefusion.choices.face_recognition)
41
- program.add_argument('--face-analyser-direction', help = wording.get('face_analyser_direction_help'), dest = 'face_analyser_direction', default = 'left-right', choices = facefusion.choices.face_analyser_direction)
42
- program.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), dest = 'face_analyser_age', choices = facefusion.choices.face_analyser_age)
43
- program.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), dest = 'face_analyser_gender', choices = facefusion.choices.face_analyser_gender)
44
  program.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0)
45
  program.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 1.5)
46
  program.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0)
47
  program.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), dest = 'trim_frame_start', type = int)
48
  program.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), dest = 'trim_frame_end', type = int)
49
- program.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), dest = 'temp_frame_format', default = 'jpg', choices = facefusion.choices.temp_frame_format)
50
  program.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = range(101), metavar = '[0-100]')
51
- program.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), dest = 'output_video_encoder', default = 'libx264', choices = facefusion.choices.output_video_encoder)
52
  program.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 90, choices = range(101), metavar = '[0-100]')
53
  program.add_argument('--max-memory', help = wording.get('max_memory_help'), dest = 'max_memory', type = int)
54
  program.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = 'cpu'), dest = 'execution_providers', default = ['cpu'], choices = suggest_execution_providers_choices(), nargs='+')
@@ -58,32 +58,32 @@ def parse_args() -> None:
58
 
59
  args = program.parse_args()
60
 
61
- facefusion.globals.source_path = args.source_path
62
- facefusion.globals.target_path = args.target_path
63
- facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, args.output_path)
64
- facefusion.globals.headless = facefusion.globals.source_path is not None and facefusion.globals.target_path is not None and facefusion.globals.output_path is not None
65
- facefusion.globals.frame_processors = args.frame_processors
66
- facefusion.globals.ui_layouts = args.ui_layouts
67
- facefusion.globals.keep_fps = args.keep_fps
68
- facefusion.globals.keep_temp = args.keep_temp
69
- facefusion.globals.skip_audio = args.skip_audio
70
- facefusion.globals.face_recognition = args.face_recognition
71
- facefusion.globals.face_analyser_direction = args.face_analyser_direction
72
- facefusion.globals.face_analyser_age = args.face_analyser_age
73
- facefusion.globals.face_analyser_gender = args.face_analyser_gender
74
- facefusion.globals.reference_face_position = args.reference_face_position
75
- facefusion.globals.reference_frame_number = args.reference_frame_number
76
- facefusion.globals.reference_face_distance = args.reference_face_distance
77
- facefusion.globals.trim_frame_start = args.trim_frame_start
78
- facefusion.globals.trim_frame_end = args.trim_frame_end
79
- facefusion.globals.temp_frame_format = args.temp_frame_format
80
- facefusion.globals.temp_frame_quality = args.temp_frame_quality
81
- facefusion.globals.output_video_encoder = args.output_video_encoder
82
- facefusion.globals.output_video_quality = args.output_video_quality
83
- facefusion.globals.max_memory = args.max_memory
84
- facefusion.globals.execution_providers = decode_execution_providers(args.execution_providers)
85
- facefusion.globals.execution_thread_count = args.execution_thread_count
86
- facefusion.globals.execution_queue_count = args.execution_queue_count
87
 
88
 
89
  def suggest_execution_providers_choices() -> List[str]:
@@ -104,10 +104,10 @@ def limit_resources() -> None:
104
  tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit = 1024)
105
  ])
106
  # limit memory usage
107
- if facefusion.globals.max_memory:
108
- memory = facefusion.globals.max_memory * 1024 ** 3
109
  if platform.system().lower() == 'darwin':
110
- memory = facefusion.globals.max_memory * 1024 ** 6
111
  if platform.system().lower() == 'windows':
112
  import ctypes
113
  kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
@@ -132,69 +132,69 @@ def pre_check() -> bool:
132
 
133
 
134
  def process_image() -> None:
135
- if predict_image(facefusion.globals.target_path):
136
  return
137
- shutil.copy2(facefusion.globals.target_path, facefusion.globals.output_path)
138
  # process frame
139
- for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
140
  update_status(wording.get('processing'), frame_processor_module.NAME)
141
- frame_processor_module.process_image(facefusion.globals.source_path, facefusion.globals.output_path, facefusion.globals.output_path)
142
  frame_processor_module.post_process()
143
  # validate image
144
- if is_image(facefusion.globals.target_path):
145
  update_status(wording.get('processing_image_succeed'))
146
  else:
147
  update_status(wording.get('processing_image_failed'))
148
 
149
 
150
  def process_video() -> None:
151
- if predict_video(facefusion.globals.target_path):
152
  return
153
- fps = detect_fps(facefusion.globals.target_path) if facefusion.globals.keep_fps else 25.0
154
  update_status(wording.get('creating_temp'))
155
- create_temp(facefusion.globals.target_path)
156
  # extract frames
157
  update_status(wording.get('extracting_frames_fps').format(fps = fps))
158
- extract_frames(facefusion.globals.target_path, fps)
159
  # process frame
160
- temp_frame_paths = get_temp_frame_paths(facefusion.globals.target_path)
161
  if temp_frame_paths:
162
- for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
163
  update_status(wording.get('processing'), frame_processor_module.NAME)
164
- frame_processor_module.process_video(facefusion.globals.source_path, temp_frame_paths)
165
  frame_processor_module.post_process()
166
  else:
167
  update_status(wording.get('temp_frames_not_found'))
168
  return
169
  # create video
170
  update_status(wording.get('creating_video_fps').format(fps = fps))
171
- if not create_video(facefusion.globals.target_path, fps):
172
  update_status(wording.get('creating_video_failed'))
173
  return
174
  # handle audio
175
- if facefusion.globals.skip_audio:
176
  update_status(wording.get('skipping_audio'))
177
- move_temp(facefusion.globals.target_path, facefusion.globals.output_path)
178
  else:
179
  update_status(wording.get('restoring_audio'))
180
- restore_audio(facefusion.globals.target_path, facefusion.globals.output_path)
181
  # clear temp
182
  update_status(wording.get('clearing_temp'))
183
- clear_temp(facefusion.globals.target_path)
184
  # validate video
185
- if is_video(facefusion.globals.target_path):
186
  update_status(wording.get('processing_video_succeed'))
187
  else:
188
  update_status(wording.get('processing_video_failed'))
189
 
190
 
191
  def conditional_process() -> None:
192
- for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
193
  if not frame_processor_module.pre_process():
194
  return
195
- if is_image(facefusion.globals.target_path):
196
  process_image()
197
- if is_video(facefusion.globals.target_path):
198
  process_video()
199
 
200
 
@@ -204,19 +204,19 @@ def run() -> None:
204
  # pre check
205
  if not pre_check():
206
  return
207
- for frame_processor in get_frame_processors_modules(facefusion.globals.frame_processors):
208
  if not frame_processor.pre_check():
209
  return
210
  # process or launch
211
- if facefusion.globals.headless:
212
  conditional_process()
213
  else:
214
- import facefusion.uis.core as ui
215
 
216
  ui.launch()
217
 
218
 
219
  def destroy() -> None:
220
- if facefusion.globals.target_path:
221
- clear_temp(facefusion.globals.target_path)
222
  sys.exit()
 
15
  import onnxruntime
16
  import tensorflow
17
 
18
+ import DeepFakeAI.choices
19
+ import DeepFakeAI.globals
20
+ from DeepFakeAI import wording, metadata
21
+ from DeepFakeAI.predictor import predict_image, predict_video
22
+ from DeepFakeAI.processors.frame.core import get_frame_processors_modules
23
+ from DeepFakeAI.utilities import is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clear_temp, normalize_output_path, list_module_names, decode_execution_providers, encode_execution_providers
24
 
25
  warnings.filterwarnings('ignore', category = FutureWarning, module = 'insightface')
26
  warnings.filterwarnings('ignore', category = UserWarning, module = 'torchvision')
 
32
  program.add_argument('-s', '--source', help = wording.get('source_help'), dest = 'source_path')
33
  program.add_argument('-t', '--target', help = wording.get('target_help'), dest = 'target_path')
34
  program.add_argument('-o', '--output', help = wording.get('output_help'), dest = 'output_path')
35
+ program.add_argument('--frame-processors', help = wording.get('frame_processors_help').format(choices = ', '.join(list_module_names('DeepFakeAI/processors/frame/modules'))), dest = 'frame_processors', default = ['face_swapper'], nargs='+')
36
+ program.add_argument('--ui-layouts', help = wording.get('ui_layouts_help').format(choices = ', '.join(list_module_names('DeepFakeAI/uis/layouts'))), dest = 'ui_layouts', default = ['default'], nargs='+')
37
  program.add_argument('--keep-fps', help = wording.get('keep_fps_help'), dest = 'keep_fps', action='store_true')
38
  program.add_argument('--keep-temp', help = wording.get('keep_temp_help'), dest = 'keep_temp', action='store_true')
39
  program.add_argument('--skip-audio', help = wording.get('skip_audio_help'), dest = 'skip_audio', action='store_true')
40
+ program.add_argument('--face-recognition', help = wording.get('face_recognition_help'), dest = 'face_recognition', default = 'reference', choices = DeepFakeAI.choices.face_recognition)
41
+ program.add_argument('--face-analyser-direction', help = wording.get('face_analyser_direction_help'), dest = 'face_analyser_direction', default = 'left-right', choices = DeepFakeAI.choices.face_analyser_direction)
42
+ program.add_argument('--face-analyser-age', help = wording.get('face_analyser_age_help'), dest = 'face_analyser_age', choices = DeepFakeAI.choices.face_analyser_age)
43
+ program.add_argument('--face-analyser-gender', help = wording.get('face_analyser_gender_help'), dest = 'face_analyser_gender', choices = DeepFakeAI.choices.face_analyser_gender)
44
  program.add_argument('--reference-face-position', help = wording.get('reference_face_position_help'), dest = 'reference_face_position', type = int, default = 0)
45
  program.add_argument('--reference-face-distance', help = wording.get('reference_face_distance_help'), dest = 'reference_face_distance', type = float, default = 1.5)
46
  program.add_argument('--reference-frame-number', help = wording.get('reference_frame_number_help'), dest = 'reference_frame_number', type = int, default = 0)
47
  program.add_argument('--trim-frame-start', help = wording.get('trim_frame_start_help'), dest = 'trim_frame_start', type = int)
48
  program.add_argument('--trim-frame-end', help = wording.get('trim_frame_end_help'), dest = 'trim_frame_end', type = int)
49
+ program.add_argument('--temp-frame-format', help = wording.get('temp_frame_format_help'), dest = 'temp_frame_format', default = 'jpg', choices = DeepFakeAI.choices.temp_frame_format)
50
  program.add_argument('--temp-frame-quality', help = wording.get('temp_frame_quality_help'), dest = 'temp_frame_quality', type = int, default = 100, choices = range(101), metavar = '[0-100]')
51
+ program.add_argument('--output-video-encoder', help = wording.get('output_video_encoder_help'), dest = 'output_video_encoder', default = 'libx264', choices = DeepFakeAI.choices.output_video_encoder)
52
  program.add_argument('--output-video-quality', help = wording.get('output_video_quality_help'), dest = 'output_video_quality', type = int, default = 90, choices = range(101), metavar = '[0-100]')
53
  program.add_argument('--max-memory', help = wording.get('max_memory_help'), dest = 'max_memory', type = int)
54
  program.add_argument('--execution-providers', help = wording.get('execution_providers_help').format(choices = 'cpu'), dest = 'execution_providers', default = ['cpu'], choices = suggest_execution_providers_choices(), nargs='+')
 
58
 
59
  args = program.parse_args()
60
 
61
+ DeepFakeAI.globals.source_path = args.source_path
62
+ DeepFakeAI.globals.target_path = args.target_path
63
+ DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, args.output_path)
64
+ DeepFakeAI.globals.headless = DeepFakeAI.globals.source_path is not None and DeepFakeAI.globals.target_path is not None and DeepFakeAI.globals.output_path is not None
65
+ DeepFakeAI.globals.frame_processors = args.frame_processors
66
+ DeepFakeAI.globals.ui_layouts = args.ui_layouts
67
+ DeepFakeAI.globals.keep_fps = args.keep_fps
68
+ DeepFakeAI.globals.keep_temp = args.keep_temp
69
+ DeepFakeAI.globals.skip_audio = args.skip_audio
70
+ DeepFakeAI.globals.face_recognition = args.face_recognition
71
+ DeepFakeAI.globals.face_analyser_direction = args.face_analyser_direction
72
+ DeepFakeAI.globals.face_analyser_age = args.face_analyser_age
73
+ DeepFakeAI.globals.face_analyser_gender = args.face_analyser_gender
74
+ DeepFakeAI.globals.reference_face_position = args.reference_face_position
75
+ DeepFakeAI.globals.reference_frame_number = args.reference_frame_number
76
+ DeepFakeAI.globals.reference_face_distance = args.reference_face_distance
77
+ DeepFakeAI.globals.trim_frame_start = args.trim_frame_start
78
+ DeepFakeAI.globals.trim_frame_end = args.trim_frame_end
79
+ DeepFakeAI.globals.temp_frame_format = args.temp_frame_format
80
+ DeepFakeAI.globals.temp_frame_quality = args.temp_frame_quality
81
+ DeepFakeAI.globals.output_video_encoder = args.output_video_encoder
82
+ DeepFakeAI.globals.output_video_quality = args.output_video_quality
83
+ DeepFakeAI.globals.max_memory = args.max_memory
84
+ DeepFakeAI.globals.execution_providers = decode_execution_providers(args.execution_providers)
85
+ DeepFakeAI.globals.execution_thread_count = args.execution_thread_count
86
+ DeepFakeAI.globals.execution_queue_count = args.execution_queue_count
87
 
88
 
89
  def suggest_execution_providers_choices() -> List[str]:
 
104
  tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit = 1024)
105
  ])
106
  # limit memory usage
107
+ if DeepFakeAI.globals.max_memory:
108
+ memory = DeepFakeAI.globals.max_memory * 1024 ** 3
109
  if platform.system().lower() == 'darwin':
110
+ memory = DeepFakeAI.globals.max_memory * 1024 ** 6
111
  if platform.system().lower() == 'windows':
112
  import ctypes
113
  kernel32 = ctypes.windll.kernel32 # type: ignore[attr-defined]
 
132
 
133
 
134
  def process_image() -> None:
135
+ if predict_image(DeepFakeAI.globals.target_path):
136
  return
137
+ shutil.copy2(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
138
  # process frame
139
+ for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
140
  update_status(wording.get('processing'), frame_processor_module.NAME)
141
+ frame_processor_module.process_image(DeepFakeAI.globals.source_path, DeepFakeAI.globals.output_path, DeepFakeAI.globals.output_path)
142
  frame_processor_module.post_process()
143
  # validate image
144
+ if is_image(DeepFakeAI.globals.target_path):
145
  update_status(wording.get('processing_image_succeed'))
146
  else:
147
  update_status(wording.get('processing_image_failed'))
148
 
149
 
150
  def process_video() -> None:
151
+ if predict_video(DeepFakeAI.globals.target_path):
152
  return
153
+ fps = detect_fps(DeepFakeAI.globals.target_path) if DeepFakeAI.globals.keep_fps else 25.0
154
  update_status(wording.get('creating_temp'))
155
+ create_temp(DeepFakeAI.globals.target_path)
156
  # extract frames
157
  update_status(wording.get('extracting_frames_fps').format(fps = fps))
158
+ extract_frames(DeepFakeAI.globals.target_path, fps)
159
  # process frame
160
+ temp_frame_paths = get_temp_frame_paths(DeepFakeAI.globals.target_path)
161
  if temp_frame_paths:
162
+ for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
163
  update_status(wording.get('processing'), frame_processor_module.NAME)
164
+ frame_processor_module.process_video(DeepFakeAI.globals.source_path, temp_frame_paths)
165
  frame_processor_module.post_process()
166
  else:
167
  update_status(wording.get('temp_frames_not_found'))
168
  return
169
  # create video
170
  update_status(wording.get('creating_video_fps').format(fps = fps))
171
+ if not create_video(DeepFakeAI.globals.target_path, fps):
172
  update_status(wording.get('creating_video_failed'))
173
  return
174
  # handle audio
175
+ if DeepFakeAI.globals.skip_audio:
176
  update_status(wording.get('skipping_audio'))
177
+ move_temp(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
178
  else:
179
  update_status(wording.get('restoring_audio'))
180
+ restore_audio(DeepFakeAI.globals.target_path, DeepFakeAI.globals.output_path)
181
  # clear temp
182
  update_status(wording.get('clearing_temp'))
183
+ clear_temp(DeepFakeAI.globals.target_path)
184
  # validate video
185
+ if is_video(DeepFakeAI.globals.target_path):
186
  update_status(wording.get('processing_video_succeed'))
187
  else:
188
  update_status(wording.get('processing_video_failed'))
189
 
190
 
191
  def conditional_process() -> None:
192
+ for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
193
  if not frame_processor_module.pre_process():
194
  return
195
+ if is_image(DeepFakeAI.globals.target_path):
196
  process_image()
197
+ if is_video(DeepFakeAI.globals.target_path):
198
  process_video()
199
 
200
 
 
204
  # pre check
205
  if not pre_check():
206
  return
207
+ for frame_processor in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
208
  if not frame_processor.pre_check():
209
  return
210
  # process or launch
211
+ if DeepFakeAI.globals.headless:
212
  conditional_process()
213
  else:
214
+ import DeepFakeAI.uis.core as ui
215
 
216
  ui.launch()
217
 
218
 
219
  def destroy() -> None:
220
+ if DeepFakeAI.globals.target_path:
221
+ clear_temp(DeepFakeAI.globals.target_path)
222
  sys.exit()
DeepFakeAI/face_analyser.py CHANGED
@@ -3,8 +3,8 @@ from typing import Any, Optional, List
3
  import insightface
4
  import numpy
5
 
6
- import facefusion.globals
7
- from facefusion.typing import Frame, Face, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender
8
 
9
  FACE_ANALYSER = None
10
  THREAD_LOCK = threading.Lock()
@@ -15,7 +15,7 @@ def get_face_analyser() -> Any:
15
 
16
  with THREAD_LOCK:
17
  if FACE_ANALYSER is None:
18
- FACE_ANALYSER = insightface.app.FaceAnalysis(name = 'buffalo_l', providers = facefusion.globals.execution_providers)
19
  FACE_ANALYSER.prepare(ctx_id = 0)
20
  return FACE_ANALYSER
21
 
@@ -39,12 +39,12 @@ def get_one_face(frame : Frame, position : int = 0) -> Optional[Face]:
39
  def get_many_faces(frame : Frame) -> List[Face]:
40
  try:
41
  faces = get_face_analyser().get(frame)
42
- if facefusion.globals.face_analyser_direction:
43
- faces = sort_by_direction(faces, facefusion.globals.face_analyser_direction)
44
- if facefusion.globals.face_analyser_age:
45
- faces = filter_by_age(faces, facefusion.globals.face_analyser_age)
46
- if facefusion.globals.face_analyser_gender:
47
- faces = filter_by_gender(faces, facefusion.globals.face_analyser_gender)
48
  return faces
49
  except (AttributeError, ValueError):
50
  return []
 
3
  import insightface
4
  import numpy
5
 
6
+ import DeepFakeAI.globals
7
+ from DeepFakeAI.typing import Frame, Face, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender
8
 
9
  FACE_ANALYSER = None
10
  THREAD_LOCK = threading.Lock()
 
15
 
16
  with THREAD_LOCK:
17
  if FACE_ANALYSER is None:
18
+ FACE_ANALYSER = insightface.app.FaceAnalysis(name = 'buffalo_l', providers = DeepFakeAI.globals.execution_providers)
19
  FACE_ANALYSER.prepare(ctx_id = 0)
20
  return FACE_ANALYSER
21
 
 
39
  def get_many_faces(frame : Frame) -> List[Face]:
40
  try:
41
  faces = get_face_analyser().get(frame)
42
+ if DeepFakeAI.globals.face_analyser_direction:
43
+ faces = sort_by_direction(faces, DeepFakeAI.globals.face_analyser_direction)
44
+ if DeepFakeAI.globals.face_analyser_age:
45
+ faces = filter_by_age(faces, DeepFakeAI.globals.face_analyser_age)
46
+ if DeepFakeAI.globals.face_analyser_gender:
47
+ faces = filter_by_gender(faces, DeepFakeAI.globals.face_analyser_gender)
48
  return faces
49
  except (AttributeError, ValueError):
50
  return []
DeepFakeAI/face_reference.py CHANGED
@@ -1,6 +1,6 @@
1
  from typing import Optional
2
 
3
- from facefusion.typing import Face
4
 
5
  FACE_REFERENCE = None
6
 
 
1
  from typing import Optional
2
 
3
+ from DeepFakeAI.typing import Face
4
 
5
  FACE_REFERENCE = None
6
 
DeepFakeAI/globals.py CHANGED
@@ -1,6 +1,6 @@
1
  from typing import List, Optional
2
 
3
- from facefusion.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat
4
 
5
  source_path : Optional[str] = None
6
  target_path : Optional[str] = None
 
1
  from typing import List, Optional
2
 
3
+ from DeepFakeAI.typing import FaceRecognition, FaceAnalyserDirection, FaceAnalyserAge, FaceAnalyserGender, TempFrameFormat
4
 
5
  source_path : Optional[str] = None
6
  target_path : Optional[str] = None
DeepFakeAI/metadata.py CHANGED
@@ -5,7 +5,7 @@ METADATA =\
5
  'version': '1.0.0',
6
  'license': 'MIT',
7
  'author': 'Henry Ruhs',
8
- 'url': 'https://facefusion.io'
9
  }
10
 
11
 
 
5
  'version': '1.0.0',
6
  'license': 'MIT',
7
  'author': 'Henry Ruhs',
8
+ 'url': 'https://DeepFakeAI.io'
9
  }
10
 
11
 
DeepFakeAI/predictor.py CHANGED
@@ -4,7 +4,7 @@ import opennsfw2
4
  from PIL import Image
5
  from keras import Model
6
 
7
- from facefusion.typing import Frame
8
 
9
  PREDICTOR = None
10
  THREAD_LOCK = threading.Lock()
 
4
  from PIL import Image
5
  from keras import Model
6
 
7
+ from DeepFakeAI.typing import Frame
8
 
9
  PREDICTOR = None
10
  THREAD_LOCK = threading.Lock()
DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc and b/DeepFakeAI/processors/__pycache__/__init__.cpython-310.pyc differ
 
DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc and b/DeepFakeAI/processors/frame/__pycache__/__init__.cpython-310.pyc differ
 
DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc and b/DeepFakeAI/processors/frame/__pycache__/core.cpython-310.pyc differ
 
DeepFakeAI/processors/frame/core.py CHANGED
@@ -8,8 +8,8 @@ from types import ModuleType
8
  from typing import Any, List, Callable
9
  from tqdm import tqdm
10
 
11
- import facefusion.globals
12
- from facefusion import wording
13
 
14
  FRAME_PROCESSORS_MODULES : List[ModuleType] = []
15
  FRAME_PROCESSORS_METHODS =\
@@ -28,7 +28,7 @@ FRAME_PROCESSORS_METHODS =\
28
 
29
  def load_frame_processor_module(frame_processor : str) -> Any:
30
  try:
31
- frame_processor_module = importlib.import_module('facefusion.processors.frame.modules.' + frame_processor)
32
  for method_name in FRAME_PROCESSORS_METHODS:
33
  if not hasattr(frame_processor_module, method_name):
34
  raise NotImplementedError
@@ -52,16 +52,16 @@ def get_frame_processors_modules(frame_processors : List[str]) -> List[ModuleTyp
52
  def clear_frame_processors_modules() -> None:
53
  global FRAME_PROCESSORS_MODULES
54
 
55
- for frame_processor_module in get_frame_processors_modules(facefusion.globals.frame_processors):
56
  frame_processor_module.clear_frame_processor()
57
  FRAME_PROCESSORS_MODULES = []
58
 
59
 
60
  def multi_process_frame(source_path : str, temp_frame_paths : List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None:
61
- with ThreadPoolExecutor(max_workers = facefusion.globals.execution_thread_count) as executor:
62
  futures = []
63
  queue = create_queue(temp_frame_paths)
64
- queue_per_future = max(len(temp_frame_paths) // facefusion.globals.execution_thread_count * facefusion.globals.execution_queue_count, 1)
65
  while not queue.empty():
66
  future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update)
67
  futures.append(future)
@@ -97,17 +97,17 @@ def update_progress(progress : Any = None) -> None:
97
  progress.set_postfix(
98
  {
99
  'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB',
100
- 'execution_providers': facefusion.globals.execution_providers,
101
- 'execution_thread_count': facefusion.globals.execution_thread_count,
102
- 'execution_queue_count': facefusion.globals.execution_queue_count
103
  })
104
  progress.refresh()
105
  progress.update(1)
106
 
107
 
108
  def get_device() -> str:
109
- if 'CUDAExecutionProvider' in facefusion.globals.execution_providers:
110
  return 'cuda'
111
- if 'CoreMLExecutionProvider' in facefusion.globals.execution_providers:
112
  return 'mps'
113
  return 'cpu'
 
8
  from typing import Any, List, Callable
9
  from tqdm import tqdm
10
 
11
+ import DeepFakeAI.globals
12
+ from DeepFakeAI import wording
13
 
14
  FRAME_PROCESSORS_MODULES : List[ModuleType] = []
15
  FRAME_PROCESSORS_METHODS =\
 
28
 
29
  def load_frame_processor_module(frame_processor : str) -> Any:
30
  try:
31
+ frame_processor_module = importlib.import_module('DeepFakeAI.processors.frame.modules.' + frame_processor)
32
  for method_name in FRAME_PROCESSORS_METHODS:
33
  if not hasattr(frame_processor_module, method_name):
34
  raise NotImplementedError
 
52
  def clear_frame_processors_modules() -> None:
53
  global FRAME_PROCESSORS_MODULES
54
 
55
+ for frame_processor_module in get_frame_processors_modules(DeepFakeAI.globals.frame_processors):
56
  frame_processor_module.clear_frame_processor()
57
  FRAME_PROCESSORS_MODULES = []
58
 
59
 
60
  def multi_process_frame(source_path : str, temp_frame_paths : List[str], process_frames: Callable[[str, List[str], Any], None], update: Callable[[], None]) -> None:
61
+ with ThreadPoolExecutor(max_workers = DeepFakeAI.globals.execution_thread_count) as executor:
62
  futures = []
63
  queue = create_queue(temp_frame_paths)
64
+ queue_per_future = max(len(temp_frame_paths) // DeepFakeAI.globals.execution_thread_count * DeepFakeAI.globals.execution_queue_count, 1)
65
  while not queue.empty():
66
  future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update)
67
  futures.append(future)
 
97
  progress.set_postfix(
98
  {
99
  'memory_usage': '{:.2f}'.format(memory_usage).zfill(5) + 'GB',
100
+ 'execution_providers': DeepFakeAI.globals.execution_providers,
101
+ 'execution_thread_count': DeepFakeAI.globals.execution_thread_count,
102
+ 'execution_queue_count': DeepFakeAI.globals.execution_queue_count
103
  })
104
  progress.refresh()
105
  progress.update(1)
106
 
107
 
108
  def get_device() -> str:
109
+ if 'CUDAExecutionProvider' in DeepFakeAI.globals.execution_providers:
110
  return 'cuda'
111
+ if 'CoreMLExecutionProvider' in DeepFakeAI.globals.execution_providers:
112
  return 'mps'
113
  return 'cpu'
DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc and b/DeepFakeAI/processors/frame/modules/__pycache__/__init__.cpython-310.pyc differ
 
DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc and b/DeepFakeAI/processors/frame/modules/__pycache__/face_swapper.cpython-310.pyc differ
 
DeepFakeAI/processors/frame/modules/face_enhancer.py CHANGED
@@ -3,13 +3,13 @@ import cv2
3
  import threading
4
  from gfpgan.utils import GFPGANer
5
 
6
- import facefusion.globals
7
- import facefusion.processors.frame.core as frame_processors
8
- from facefusion import wording
9
- from facefusion.core import update_status
10
- from facefusion.face_analyser import get_many_faces
11
- from facefusion.typing import Frame, Face
12
- from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video
13
 
14
  FRAME_PROCESSOR = None
15
  THREAD_SEMAPHORE = threading.Semaphore()
@@ -39,12 +39,12 @@ def clear_frame_processor() -> None:
39
 
40
  def pre_check() -> bool:
41
  download_directory_path = resolve_relative_path('../.assets/models')
42
- conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/GFPGANv1.4.pth'])
43
  return True
44
 
45
 
46
  def pre_process() -> bool:
47
- if not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
48
  update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
49
  return False
50
  return True
@@ -97,4 +97,4 @@ def process_image(source_path : str, target_path : str, output_path : str) -> No
97
 
98
 
99
  def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
100
- facefusion.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
 
3
  import threading
4
  from gfpgan.utils import GFPGANer
5
 
6
+ import DeepFakeAI.globals
7
+ import DeepFakeAI.processors.frame.core as frame_processors
8
+ from DeepFakeAI import wording
9
+ from DeepFakeAI.core import update_status
10
+ from DeepFakeAI.face_analyser import get_many_faces
11
+ from DeepFakeAI.typing import Frame, Face
12
+ from DeepFakeAI.utilities import conditional_download, resolve_relative_path, is_image, is_video
13
 
14
  FRAME_PROCESSOR = None
15
  THREAD_SEMAPHORE = threading.Semaphore()
 
39
 
40
  def pre_check() -> bool:
41
  download_directory_path = resolve_relative_path('../.assets/models')
42
+ conditional_download(download_directory_path, ['https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/GFPGANv1.4.pth'])
43
  return True
44
 
45
 
46
  def pre_process() -> bool:
47
+ if not is_image(DeepFakeAI.globals.target_path) and not is_video(DeepFakeAI.globals.target_path):
48
  update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
49
  return False
50
  return True
 
97
 
98
 
99
  def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
100
+ DeepFakeAI.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
DeepFakeAI/processors/frame/modules/face_swapper.py CHANGED
@@ -3,14 +3,14 @@ import cv2
3
  import insightface
4
  import threading
5
 
6
- import facefusion.globals
7
- import facefusion.processors.frame.core as frame_processors
8
- from facefusion import wording
9
- from facefusion.core import update_status
10
- from facefusion.face_analyser import get_one_face, get_many_faces, find_similar_faces
11
- from facefusion.face_reference import get_face_reference, set_face_reference
12
- from facefusion.typing import Face, Frame
13
- from facefusion.utilities import conditional_download, resolve_relative_path, is_image, is_video
14
 
15
  FRAME_PROCESSOR = None
16
  THREAD_LOCK = threading.Lock()
@@ -23,7 +23,7 @@ def get_frame_processor() -> Any:
23
  with THREAD_LOCK:
24
  if FRAME_PROCESSOR is None:
25
  model_path = resolve_relative_path('../.assets/models/inswapper_128.onnx')
26
- FRAME_PROCESSOR = insightface.model_zoo.get_model(model_path, providers = facefusion.globals.execution_providers)
27
  return FRAME_PROCESSOR
28
 
29
 
@@ -35,18 +35,18 @@ def clear_frame_processor() -> None:
35
 
36
  def pre_check() -> bool:
37
  download_directory_path = resolve_relative_path('../.assets/models')
38
- conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/inswapper_128.onnx'])
39
  return True
40
 
41
 
42
  def pre_process() -> bool:
43
- if not is_image(facefusion.globals.source_path):
44
  update_status(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME)
45
  return False
46
- elif not get_one_face(cv2.imread(facefusion.globals.source_path)):
47
  update_status(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME)
48
  return False
49
- if not is_image(facefusion.globals.target_path) and not is_video(facefusion.globals.target_path):
50
  update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
51
  return False
52
  return True
@@ -61,12 +61,12 @@ def swap_face(source_face : Face, target_face : Face, temp_frame : Frame) -> Fra
61
 
62
 
63
  def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
64
- if 'reference' in facefusion.globals.face_recognition:
65
- similar_faces = find_similar_faces(temp_frame, reference_face, facefusion.globals.reference_face_distance)
66
  if similar_faces:
67
  for similar_face in similar_faces:
68
  temp_frame = swap_face(source_face, similar_face, temp_frame)
69
- if 'many' in facefusion.globals.face_recognition:
70
  many_faces = get_many_faces(temp_frame)
71
  if many_faces:
72
  for target_face in many_faces:
@@ -76,7 +76,7 @@ def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame)
76
 
77
  def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None:
78
  source_face = get_one_face(cv2.imread(source_path))
79
- reference_face = get_face_reference() if 'reference' in facefusion.globals.face_recognition else None
80
  for temp_frame_path in temp_frame_paths:
81
  temp_frame = cv2.imread(temp_frame_path)
82
  result_frame = process_frame(source_face, reference_face, temp_frame)
@@ -88,7 +88,7 @@ def process_frames(source_path : str, temp_frame_paths : List[str], update: Call
88
  def process_image(source_path : str, target_path : str, output_path : str) -> None:
89
  source_face = get_one_face(cv2.imread(source_path))
90
  target_frame = cv2.imread(target_path)
91
- reference_face = get_one_face(target_frame, facefusion.globals.reference_face_position) if 'reference' in facefusion.globals.face_recognition else None
92
  result_frame = process_frame(source_face, reference_face, target_frame)
93
  cv2.imwrite(output_path, result_frame)
94
 
@@ -99,7 +99,7 @@ def process_video(source_path : str, temp_frame_paths : List[str]) -> None:
99
 
100
 
101
  def conditional_set_face_reference(temp_frame_paths : List[str]) -> None:
102
- if 'reference' in facefusion.globals.face_recognition and not get_face_reference():
103
- reference_frame = cv2.imread(temp_frame_paths[facefusion.globals.reference_frame_number])
104
- reference_face = get_one_face(reference_frame, facefusion.globals.reference_face_position)
105
  set_face_reference(reference_face)
 
3
  import insightface
4
  import threading
5
 
6
+ import DeepFakeAI.globals
7
+ import DeepFakeAI.processors.frame.core as frame_processors
8
+ from DeepFakeAI import wording
9
+ from DeepFakeAI.core import update_status
10
+ from DeepFakeAI.face_analyser import get_one_face, get_many_faces, find_similar_faces
11
+ from DeepFakeAI.face_reference import get_face_reference, set_face_reference
12
+ from DeepFakeAI.typing import Face, Frame
13
+ from DeepFakeAI.utilities import conditional_download, resolve_relative_path, is_image, is_video
14
 
15
  FRAME_PROCESSOR = None
16
  THREAD_LOCK = threading.Lock()
 
23
  with THREAD_LOCK:
24
  if FRAME_PROCESSOR is None:
25
  model_path = resolve_relative_path('../.assets/models/inswapper_128.onnx')
26
+ FRAME_PROCESSOR = insightface.model_zoo.get_model(model_path, providers = DeepFakeAI.globals.execution_providers)
27
  return FRAME_PROCESSOR
28
 
29
 
 
35
 
36
  def pre_check() -> bool:
37
  download_directory_path = resolve_relative_path('../.assets/models')
38
+ conditional_download(download_directory_path, ['https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/inswapper_128.onnx'])
39
  return True
40
 
41
 
42
  def pre_process() -> bool:
43
+ if not is_image(DeepFakeAI.globals.source_path):
44
  update_status(wording.get('select_image_source') + wording.get('exclamation_mark'), NAME)
45
  return False
46
+ elif not get_one_face(cv2.imread(DeepFakeAI.globals.source_path)):
47
  update_status(wording.get('no_source_face_detected') + wording.get('exclamation_mark'), NAME)
48
  return False
49
+ if not is_image(DeepFakeAI.globals.target_path) and not is_video(DeepFakeAI.globals.target_path):
50
  update_status(wording.get('select_image_or_video_target') + wording.get('exclamation_mark'), NAME)
51
  return False
52
  return True
 
61
 
62
 
63
  def process_frame(source_face : Face, reference_face : Face, temp_frame : Frame) -> Frame:
64
+ if 'reference' in DeepFakeAI.globals.face_recognition:
65
+ similar_faces = find_similar_faces(temp_frame, reference_face, DeepFakeAI.globals.reference_face_distance)
66
  if similar_faces:
67
  for similar_face in similar_faces:
68
  temp_frame = swap_face(source_face, similar_face, temp_frame)
69
+ if 'many' in DeepFakeAI.globals.face_recognition:
70
  many_faces = get_many_faces(temp_frame)
71
  if many_faces:
72
  for target_face in many_faces:
 
76
 
77
  def process_frames(source_path : str, temp_frame_paths : List[str], update: Callable[[], None]) -> None:
78
  source_face = get_one_face(cv2.imread(source_path))
79
+ reference_face = get_face_reference() if 'reference' in DeepFakeAI.globals.face_recognition else None
80
  for temp_frame_path in temp_frame_paths:
81
  temp_frame = cv2.imread(temp_frame_path)
82
  result_frame = process_frame(source_face, reference_face, temp_frame)
 
88
  def process_image(source_path : str, target_path : str, output_path : str) -> None:
89
  source_face = get_one_face(cv2.imread(source_path))
90
  target_frame = cv2.imread(target_path)
91
+ reference_face = get_one_face(target_frame, DeepFakeAI.globals.reference_face_position) if 'reference' in DeepFakeAI.globals.face_recognition else None
92
  result_frame = process_frame(source_face, reference_face, target_frame)
93
  cv2.imwrite(output_path, result_frame)
94
 
 
99
 
100
 
101
  def conditional_set_face_reference(temp_frame_paths : List[str]) -> None:
102
+ if 'reference' in DeepFakeAI.globals.face_recognition and not get_face_reference():
103
+ reference_frame = cv2.imread(temp_frame_paths[DeepFakeAI.globals.reference_frame_number])
104
+ reference_face = get_one_face(reference_frame, DeepFakeAI.globals.reference_face_position)
105
  set_face_reference(reference_face)
DeepFakeAI/processors/frame/modules/frame_enhancer.py CHANGED
@@ -4,9 +4,9 @@ import threading
4
  from basicsr.archs.rrdbnet_arch import RRDBNet
5
  from realesrgan import RealESRGANer
6
 
7
- import facefusion.processors.frame.core as frame_processors
8
- from facefusion.typing import Frame, Face
9
- from facefusion.utilities import conditional_download, resolve_relative_path
10
 
11
  FRAME_PROCESSOR = None
12
  THREAD_SEMAPHORE = threading.Semaphore()
@@ -47,7 +47,7 @@ def clear_frame_processor() -> None:
47
 
48
  def pre_check() -> bool:
49
  download_directory_path = resolve_relative_path('../.assets/models')
50
- conditional_download(download_directory_path, ['https://github.com/facefusion/facefusion-assets/releases/download/models/RealESRGAN_x4plus.pth'])
51
  return True
52
 
53
 
 
4
  from basicsr.archs.rrdbnet_arch import RRDBNet
5
  from realesrgan import RealESRGANer
6
 
7
+ import DeepFakeAI.processors.frame.core as frame_processors
8
+ from DeepFakeAI.typing import Frame, Face
9
+ from DeepFakeAI.utilities import conditional_download, resolve_relative_path
10
 
11
  FRAME_PROCESSOR = None
12
  THREAD_SEMAPHORE = threading.Semaphore()
 
47
 
48
  def pre_check() -> bool:
49
  download_directory_path = resolve_relative_path('../.assets/models')
50
+ conditional_download(download_directory_path, ['https://github.com/DeepFakeAI/DeepFakeAI-assets/releases/download/models/RealESRGAN_x4plus.pth'])
51
  return True
52
 
53
 
DeepFakeAI/uis/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/__pycache__/__init__.cpython-310.pyc and b/DeepFakeAI/uis/__pycache__/__init__.cpython-310.pyc differ
 
DeepFakeAI/uis/__pycache__/core.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/__pycache__/core.cpython-310.pyc and b/DeepFakeAI/uis/__pycache__/core.cpython-310.pyc differ
 
DeepFakeAI/uis/__pycache__/typing.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/__pycache__/typing.cpython-310.pyc and b/DeepFakeAI/uis/__pycache__/typing.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/__init__.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/__init__.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/__init__.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/about.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/about.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/about.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/execution.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/execution.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/execution.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/face_analyser.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/face_analyser.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/face_analyser.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/face_selector.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/face_selector.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/face_selector.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/output.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/output.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/output.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/output_settings.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/output_settings.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/output_settings.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/preview.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/preview.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/preview.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/processors.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/processors.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/processors.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/settings.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/settings.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/settings.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/source.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/source.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/source.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/target.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/target.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/target.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/temp_frame.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/temp_frame.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/temp_frame.cpython-310.pyc differ
 
DeepFakeAI/uis/components/__pycache__/trim_frame.cpython-310.pyc CHANGED
Binary files a/DeepFakeAI/uis/components/__pycache__/trim_frame.cpython-310.pyc and b/DeepFakeAI/uis/components/__pycache__/trim_frame.cpython-310.pyc differ
 
DeepFakeAI/uis/components/about.py CHANGED
@@ -1,7 +1,7 @@
1
  from typing import Optional
2
  import gradio
3
 
4
- from facefusion import metadata
5
 
6
  ABOUT_HTML : Optional[gradio.HTML] = None
7
 
 
1
  from typing import Optional
2
  import gradio
3
 
4
+ from DeepFakeAI import metadata
5
 
6
  ABOUT_HTML : Optional[gradio.HTML] = None
7
 
DeepFakeAI/uis/components/benchmark.py CHANGED
@@ -4,12 +4,12 @@ import tempfile
4
  import statistics
5
  import gradio
6
 
7
- import facefusion.globals
8
- from facefusion import wording
9
- from facefusion.capturer import get_video_frame_total
10
- from facefusion.core import conditional_process
11
- from facefusion.uis.typing import Update
12
- from facefusion.utilities import normalize_output_path, clear_temp
13
 
14
  BENCHMARK_RESULT_DATAFRAME : Optional[gradio.Dataframe] = None
15
  BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None
@@ -65,7 +65,7 @@ def listen() -> None:
65
 
66
 
67
  def update(benchmark_cycles : int) -> Update:
68
- facefusion.globals.source_path = '.assets/examples/source.jpg'
69
  target_paths =\
70
  [
71
  '.assets/examples/target-240p.mp4',
@@ -84,9 +84,9 @@ def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
84
  process_times = []
85
  total_fps = 0.0
86
  for i in range(benchmark_cycles + 1):
87
- facefusion.globals.target_path = target_path
88
- facefusion.globals.output_path = normalize_output_path(facefusion.globals.source_path, facefusion.globals.target_path, tempfile.gettempdir())
89
- video_frame_total = get_video_frame_total(facefusion.globals.target_path)
90
  start_time = time.perf_counter()
91
  conditional_process()
92
  end_time = time.perf_counter()
@@ -101,7 +101,7 @@ def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
101
  relative_fps = round(total_fps / benchmark_cycles, 2)
102
  return\
103
  [
104
- facefusion.globals.target_path,
105
  benchmark_cycles,
106
  average_run,
107
  fastest_run,
@@ -111,6 +111,6 @@ def benchmark(target_path : str, benchmark_cycles : int) -> List[Any]:
111
 
112
 
113
  def clear() -> Update:
114
- if facefusion.globals.target_path:
115
- clear_temp(facefusion.globals.target_path)
116
  return gradio.update(value = None)
 
4
  import statistics
5
  import gradio
6
 
7
+ import DeepFakeAI.globals
8
+ from DeepFakeAI import wording
9
+ from DeepFakeAI.capturer import get_video_frame_total
10
+ from DeepFakeAI.core import conditional_process
11
+ from DeepFakeAI.uis.typing import Update
12
+ from DeepFakeAI.utilities import normalize_output_path, clear_temp
13
 
14
  BENCHMARK_RESULT_DATAFRAME : Optional[gradio.Dataframe] = None
15
  BENCHMARK_CYCLES_SLIDER : Optional[gradio.Button] = None
 
65
 
66
 
67
  def update(benchmark_cycles : int) -> Update:
68
+ DeepFakeAI.globals.source_path = '.assets/examples/source.jpg'
69
  target_paths =\
70
  [
71
  '.assets/examples/target-240p.mp4',
 
84
  process_times = []
85
  total_fps = 0.0
86
  for i in range(benchmark_cycles + 1):
87
+ DeepFakeAI.globals.target_path = target_path
88
+ DeepFakeAI.globals.output_path = normalize_output_path(DeepFakeAI.globals.source_path, DeepFakeAI.globals.target_path, tempfile.gettempdir())
89
+ video_frame_total = get_video_frame_total(DeepFakeAI.globals.target_path)
90
  start_time = time.perf_counter()
91
  conditional_process()
92
  end_time = time.perf_counter()
 
101
  relative_fps = round(total_fps / benchmark_cycles, 2)
102
  return\
103
  [
104
+ DeepFakeAI.globals.target_path,
105
  benchmark_cycles,
106
  average_run,
107
  fastest_run,
 
111
 
112
 
113
  def clear() -> Update:
114
+ if DeepFakeAI.globals.target_path:
115
+ clear_temp(DeepFakeAI.globals.target_path)
116
  return gradio.update(value = None)
DeepFakeAI/uis/components/execution.py CHANGED
@@ -2,12 +2,12 @@ from typing import List, Optional
2
  import gradio
3
  import onnxruntime
4
 
5
- import facefusion.globals
6
- from facefusion import wording
7
- from facefusion.face_analyser import clear_face_analyser
8
- from facefusion.processors.frame.core import clear_frame_processors_modules
9
- from facefusion.uis.typing import Update
10
- from facefusion.utilities import encode_execution_providers, decode_execution_providers
11
 
12
  EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
13
  EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None
@@ -23,18 +23,18 @@ def render() -> None:
23
  EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
24
  label = wording.get('execution_providers_checkbox_group_label'),
25
  choices = encode_execution_providers(onnxruntime.get_available_providers()),
26
- value = encode_execution_providers(facefusion.globals.execution_providers)
27
  )
28
  EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider(
29
  label = wording.get('execution_thread_count_slider_label'),
30
- value = facefusion.globals.execution_thread_count,
31
  step = 1,
32
  minimum = 1,
33
  maximum = 128
34
  )
35
  EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider(
36
  label = wording.get('execution_queue_count_slider_label'),
37
- value = facefusion.globals.execution_queue_count,
38
  step = 1,
39
  minimum = 1,
40
  maximum = 16
@@ -50,15 +50,15 @@ def listen() -> None:
50
  def update_execution_providers(execution_providers : List[str]) -> Update:
51
  clear_face_analyser()
52
  clear_frame_processors_modules()
53
- facefusion.globals.execution_providers = decode_execution_providers(execution_providers)
54
  return gradio.update(value = execution_providers)
55
 
56
 
57
  def update_execution_thread_count(execution_thread_count : int = 1) -> Update:
58
- facefusion.globals.execution_thread_count = execution_thread_count
59
  return gradio.update(value = execution_thread_count)
60
 
61
 
62
  def update_execution_queue_count(execution_queue_count : int = 1) -> Update:
63
- facefusion.globals.execution_queue_count = execution_queue_count
64
  return gradio.update(value = execution_queue_count)
 
2
  import gradio
3
  import onnxruntime
4
 
5
+ import DeepFakeAI.globals
6
+ from DeepFakeAI import wording
7
+ from DeepFakeAI.face_analyser import clear_face_analyser
8
+ from DeepFakeAI.processors.frame.core import clear_frame_processors_modules
9
+ from DeepFakeAI.uis.typing import Update
10
+ from DeepFakeAI.utilities import encode_execution_providers, decode_execution_providers
11
 
12
  EXECUTION_PROVIDERS_CHECKBOX_GROUP : Optional[gradio.CheckboxGroup] = None
13
  EXECUTION_THREAD_COUNT_SLIDER : Optional[gradio.Slider] = None
 
23
  EXECUTION_PROVIDERS_CHECKBOX_GROUP = gradio.CheckboxGroup(
24
  label = wording.get('execution_providers_checkbox_group_label'),
25
  choices = encode_execution_providers(onnxruntime.get_available_providers()),
26
+ value = encode_execution_providers(DeepFakeAI.globals.execution_providers)
27
  )
28
  EXECUTION_THREAD_COUNT_SLIDER = gradio.Slider(
29
  label = wording.get('execution_thread_count_slider_label'),
30
+ value = DeepFakeAI.globals.execution_thread_count,
31
  step = 1,
32
  minimum = 1,
33
  maximum = 128
34
  )
35
  EXECUTION_QUEUE_COUNT_SLIDER = gradio.Slider(
36
  label = wording.get('execution_queue_count_slider_label'),
37
+ value = DeepFakeAI.globals.execution_queue_count,
38
  step = 1,
39
  minimum = 1,
40
  maximum = 16
 
50
  def update_execution_providers(execution_providers : List[str]) -> Update:
51
  clear_face_analyser()
52
  clear_frame_processors_modules()
53
+ DeepFakeAI.globals.execution_providers = decode_execution_providers(execution_providers)
54
  return gradio.update(value = execution_providers)
55
 
56
 
57
  def update_execution_thread_count(execution_thread_count : int = 1) -> Update:
58
+ DeepFakeAI.globals.execution_thread_count = execution_thread_count
59
  return gradio.update(value = execution_thread_count)
60
 
61
 
62
  def update_execution_queue_count(execution_queue_count : int = 1) -> Update:
63
+ DeepFakeAI.globals.execution_queue_count = execution_queue_count
64
  return gradio.update(value = execution_queue_count)
DeepFakeAI/uis/components/face_analyser.py CHANGED
@@ -2,11 +2,11 @@ from typing import Optional
2
 
3
  import gradio
4
 
5
- import facefusion.choices
6
- import facefusion.globals
7
- from facefusion import wording
8
- from facefusion.uis import core as ui
9
- from facefusion.uis.typing import Update
10
 
11
  FACE_ANALYSER_DIRECTION_DROPDOWN : Optional[gradio.Dropdown] = None
12
  FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None
@@ -22,18 +22,18 @@ def render() -> None:
22
  with gradio.Row():
23
  FACE_ANALYSER_DIRECTION_DROPDOWN = gradio.Dropdown(
24
  label = wording.get('face_analyser_direction_dropdown_label'),
25
- choices = facefusion.choices.face_analyser_direction,
26
- value = facefusion.globals.face_analyser_direction
27
  )
28
  FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown(
29
  label = wording.get('face_analyser_age_dropdown_label'),
30
- choices = ['none'] + facefusion.choices.face_analyser_age,
31
- value = facefusion.globals.face_analyser_age or 'none'
32
  )
33
  FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown(
34
  label = wording.get('face_analyser_gender_dropdown_label'),
35
- choices = ['none'] + facefusion.choices.face_analyser_gender,
36
- value = facefusion.globals.face_analyser_gender or 'none'
37
  )
38
  ui.register_component('face_analyser_direction_dropdown', FACE_ANALYSER_DIRECTION_DROPDOWN)
39
  ui.register_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN)
@@ -48,7 +48,7 @@ def listen() -> None:
48
 
49
  def update_dropdown(name : str, value : str) -> Update:
50
  if value == 'none':
51
- setattr(facefusion.globals, name, None)
52
  else:
53
- setattr(facefusion.globals, name, value)
54
  return gradio.update(value = value)
 
2
 
3
  import gradio
4
 
5
+ import DeepFakeAI.choices
6
+ import DeepFakeAI.globals
7
+ from DeepFakeAI import wording
8
+ from DeepFakeAI.uis import core as ui
9
+ from DeepFakeAI.uis.typing import Update
10
 
11
  FACE_ANALYSER_DIRECTION_DROPDOWN : Optional[gradio.Dropdown] = None
12
  FACE_ANALYSER_AGE_DROPDOWN : Optional[gradio.Dropdown] = None
 
22
  with gradio.Row():
23
  FACE_ANALYSER_DIRECTION_DROPDOWN = gradio.Dropdown(
24
  label = wording.get('face_analyser_direction_dropdown_label'),
25
+ choices = DeepFakeAI.choices.face_analyser_direction,
26
+ value = DeepFakeAI.globals.face_analyser_direction
27
  )
28
  FACE_ANALYSER_AGE_DROPDOWN = gradio.Dropdown(
29
  label = wording.get('face_analyser_age_dropdown_label'),
30
+ choices = ['none'] + DeepFakeAI.choices.face_analyser_age,
31
+ value = DeepFakeAI.globals.face_analyser_age or 'none'
32
  )
33
  FACE_ANALYSER_GENDER_DROPDOWN = gradio.Dropdown(
34
  label = wording.get('face_analyser_gender_dropdown_label'),
35
+ choices = ['none'] + DeepFakeAI.choices.face_analyser_gender,
36
+ value = DeepFakeAI.globals.face_analyser_gender or 'none'
37
  )
38
  ui.register_component('face_analyser_direction_dropdown', FACE_ANALYSER_DIRECTION_DROPDOWN)
39
  ui.register_component('face_analyser_age_dropdown', FACE_ANALYSER_AGE_DROPDOWN)
 
48
 
49
  def update_dropdown(name : str, value : str) -> Update:
50
  if value == 'none':
51
+ setattr(DeepFakeAI.globals, name, None)
52
  else:
53
+ setattr(DeepFakeAI.globals, name, value)
54
  return gradio.update(value = value)