hallo / app.py
multimodalart's picture
Create app.py
704f323 verified
raw
history blame
1.59 kB
import os
import shutil
from huggingface_hub import snapshot_download
import gradio as gr
from scripts.inference import inference_process
import argparse
# Download the repository contents into a directory
hallo_dir = snapshot_download(repo_id="fudan-generative-ai/hallo")
# Define the new directory path for the pretrained models
new_dir = 'pretrained_models'
# Ensure the new directory exists
os.makedirs(new_dir, exist_ok=True)
# Move all contents from the downloaded directory to the new directory
for filename in os.listdir(hallo_dir):
shutil.move(os.path.join(hallo_dir, filename), os.path.join(new_dir, filename))
def run_inference(source_image, driving_audio, gr.Progress(track_tqdm=True)):
# Construct the argparse.Namespace object with all necessary attributes
args = argparse.Namespace(
config='configs/inference/default.yaml', # Adjust this path as necessary
source_image=source_image.name,
driving_audio=driving_audio.name,
output='output.mp4', # You might want to manage output paths dynamically
pose_weight=1.0,
face_weight=1.0,
lip_weight=1.0,
face_expand_ratio=1.2,
checkpoint=None # Adjust or set this according to your checkpointing strategy
)
# Call the imported function
inference_process(args)
# Return output or path to output
return 'output.mp4' # Modify based on your output handling
iface = gr.Interface(
fn=run_inference,
inputs=[gr.inputs.Image(type="file"), gr.inputs.Audio(type="file")],
outputs="text"
)
iface.launch()