|
import time |
|
import shutil |
|
import gradio as gr |
|
import os |
|
import json |
|
import torch |
|
import argparse |
|
import numpy as np |
|
from lib.data import get_meanpose |
|
from lib.network import get_autoencoder |
|
from lib.util.motion import preprocess_mixamo, preprocess_test, postprocess |
|
from lib.util.general import get_config |
|
from lib.operation import rotate_and_maybe_project_world |
|
from itertools import combinations |
|
from lib.util.visualization import motion2video |
|
from PIL import Image |
|
|
|
def load_and_preprocess(path, config, mean_pose, std_pose): |
|
|
|
motion3d = np.load(path) |
|
|
|
|
|
_, _, T = motion3d.shape |
|
T = (T // 8) * 8 |
|
motion3d = motion3d[:, :, :T] |
|
|
|
|
|
motion_proj = motion3d[:, [0, 2], :] |
|
|
|
|
|
motion_proj = preprocess_mixamo(motion_proj, unit=1.0) |
|
|
|
|
|
motion_proj, start = preprocess_test(motion_proj, mean_pose, std_pose, config.data.unit) |
|
motion_proj = motion_proj.reshape((-1, motion_proj.shape[-1])) |
|
motion_proj = torch.from_numpy(motion_proj).float() |
|
|
|
return motion_proj, start |
|
|
|
def handle_motion_generation(npy1,npy2): |
|
path1 = './data/a.npy' |
|
path2 = './data/b.npy' |
|
np.save(path1,npy1) |
|
np.save(path2,npy2) |
|
config_path = './configs/transmomo.yaml' |
|
description_path = "./data/mse_description.json" |
|
checkpoint_path = './data/autoencoder_00200000.pt' |
|
out_dir_path = './output' |
|
|
|
config = get_config(config_path) |
|
ae = get_autoencoder(config) |
|
ae.load_state_dict(torch.load(checkpoint_path)) |
|
ae.cuda() |
|
ae.eval() |
|
mean_pose, std_pose = get_meanpose("test", config.data) |
|
|
|
|
|
description = json.load(open(description_path)) |
|
chars = list(description.keys()) |
|
|
|
os.makedirs(out_dir_path, exist_ok=True) |
|
|
|
|
|
|
|
out_path1 = os.path.join(out_dir_path, "adv.npy") |
|
|
|
|
|
x_a, x_a_start = load_and_preprocess(path1, config, mean_pose, std_pose) |
|
x_b, x_b_start = load_and_preprocess(path2, config, mean_pose, std_pose) |
|
|
|
x_a_batch = x_a.unsqueeze(0).cuda() |
|
x_b_batch = x_b.unsqueeze(0).cuda() |
|
|
|
x_ab = ae.cross2d(x_a_batch, x_b_batch, x_a_batch) |
|
x_ab = postprocess(x_ab, mean_pose, std_pose, config.data.unit, start=x_a_start) |
|
|
|
np.save(out_path1, x_ab) |
|
motion_data = x_ab |
|
height = 512 |
|
width = 512 |
|
save_path = './an.mp4' |
|
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)] |
|
bg_color = (255, 255, 255) |
|
fps = 25 |
|
|
|
|
|
|
|
motion2video(motion_data, height, width, save_path, colors, bg_color=bg_color, transparency=False, fps=fps) |
|
first_frame_image = Image.open('./an-frames/0000.png') |
|
return first_frame_image |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("Upload two `.npy` files to generate motion and visualize the first frame of the output animation.") |
|
|
|
with gr.Row(): |
|
file1 = gr.File(file_types=[".npy"], label="Upload first .npy file") |
|
file2 = gr.File(file_types=[".npy"], label="Upload second .npy file") |
|
|
|
with gr.Row(): |
|
generate_btn = gr.Button("Generate Motion") |
|
|
|
output_image = gr.Image(label="First Frame of the Generated Animation") |
|
|
|
generate_btn.click( |
|
fn=handle_motion_generation, |
|
inputs=[file1, file2], |
|
outputs=output_image |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
demo.launch() |