Spaces:
Runtime error
Runtime error
maduvantha
commited on
Commit
·
e1ff00c
1
Parent(s):
79ba87f
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import imageio
|
2 |
+
import imageio.v3 as iio
|
3 |
+
import numpy as np
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
+
import matplotlib.animation as animation
|
6 |
+
from skimage.transform import resize
|
7 |
+
from IPython.display import HTML
|
8 |
+
import asyncio
|
9 |
+
import warnings
|
10 |
+
from demo import load_checkpoints
|
11 |
+
import torch
|
12 |
+
from animate import normalize_kp
|
13 |
+
from skimage import img_as_ubyte
|
14 |
+
from tqdm import tqdm
|
15 |
+
from scipy.spatial import ConvexHull
|
16 |
+
from urllib.request import Request, urlopen
|
17 |
+
from nextcord import Interaction, SlashOption, ChannelType
|
18 |
+
from nextcord.abc import GuildChannel
|
19 |
+
from nextcord.ext import commands
|
20 |
+
import nextcord
|
21 |
+
|
22 |
+
client = commands.Bot(command_prefix='!')
|
23 |
+
testigaerverid=989256818398203945
|
24 |
+
#aaaaaaaaaaaa
|
25 |
+
def find_best_frame(source, driving, cpu=False):
|
26 |
+
import face_alignment
|
27 |
+
|
28 |
+
def normalize_kp(kp):
|
29 |
+
kp = kp - kp.mean(axis=0, keepdims=True)
|
30 |
+
area = ConvexHull(kp[:, :2]).volume
|
31 |
+
area = np.sqrt(area)
|
32 |
+
kp[:, :2] = kp[:, :2] / area
|
33 |
+
return kp
|
34 |
+
|
35 |
+
fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=True,
|
36 |
+
device='cpu' if cpu else 'cuda')
|
37 |
+
kp_source = fa.get_landmarks(255 * source)[0]
|
38 |
+
kp_source = normalize_kp(kp_source)
|
39 |
+
norm = float('inf')
|
40 |
+
frame_num = 0
|
41 |
+
for i, image in tqdm(enumerate(driving)):
|
42 |
+
kp_driving = fa.get_landmarks(255 * image)[0]
|
43 |
+
kp_driving = normalize_kp(kp_driving)
|
44 |
+
new_norm = (np.abs(kp_source - kp_driving) ** 2).sum()
|
45 |
+
if new_norm < norm:
|
46 |
+
norm = new_norm
|
47 |
+
frame_num = i
|
48 |
+
return frame_num
|
49 |
+
#sssssssssssss
|
50 |
+
def display(source, driving, generated=None):
|
51 |
+
fig = plt.figure(figsize=(8 + 4 * (generated is not None), 6))
|
52 |
+
|
53 |
+
ims = []
|
54 |
+
for i in range(len(driving)):
|
55 |
+
cols = [source]
|
56 |
+
cols.append(driving[i])
|
57 |
+
if generated is not None:
|
58 |
+
cols.append(generated[i])
|
59 |
+
im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
|
60 |
+
plt.axis('off')
|
61 |
+
ims.append([im])
|
62 |
+
|
63 |
+
ani = animation.ArtistAnimation(fig, ims, interval=50, repeat_delay=1000)
|
64 |
+
plt.close()
|
65 |
+
return ani
|
66 |
+
#Resize image and video to 256x256
|
67 |
+
def dy():
|
68 |
+
for i in range( 10):
|
69 |
+
i=i+1
|
70 |
+
time.sleep(5)
|
71 |
+
yield str(i)
|
72 |
+
|
73 |
+
|
74 |
+
@client.slash_command(name="repeat", description="whatever doyou want",guild_ids=[testigaerverid])
|
75 |
+
async def repeat(interaction : Interaction, message:str, message2:str):
|
76 |
+
channel = interaction.channel
|
77 |
+
await interaction.response.send_message("jj")
|
78 |
+
warnings.filterwarnings("ignore")
|
79 |
+
web_image = message
|
80 |
+
request_site = Request(web_image, headers={"User-Agent": "Mozilla/5.0"})
|
81 |
+
source_image =iio.imread(urlopen(request_site).read())
|
82 |
+
urlforvideo=message2
|
83 |
+
driving_video = iio.imread(urlforvideo)
|
84 |
+
source_image = resize(source_image, (256, 256))[..., :3]
|
85 |
+
driving_video = [resize(frame, (256, 256))[..., :3] for frame in driving_video]
|
86 |
+
eg,cd=load_checkpoints('config//vox-256.yaml','vox-cpk.pth.tar',True)
|
87 |
+
cpu=True
|
88 |
+
print("came to this")
|
89 |
+
with torch.no_grad():
|
90 |
+
predictions = []
|
91 |
+
source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
|
92 |
+
if not cpu:
|
93 |
+
source = source.cuda()
|
94 |
+
driving = torch.tensor(np.array(driving_video)[np.newaxis].astype(np.float32)).permute(0, 4, 1, 2, 3)
|
95 |
+
kp_source = cd(source)
|
96 |
+
kp_driving_initial = cd(driving[:, :, 0])
|
97 |
+
progress_message = await channel.send("Progress: 0%")
|
98 |
+
for frame_idx in tqdm(range(driving.shape[2])):
|
99 |
+
|
100 |
+
driving_frame = driving[:, :, frame_idx]
|
101 |
+
if not cpu:
|
102 |
+
driving_frame = driving_frame.cuda()
|
103 |
+
kp_driving = cd(driving_frame)
|
104 |
+
kp_norm = normalize_kp(kp_source=kp_source, kp_driving=kp_driving,
|
105 |
+
kp_driving_initial=kp_driving_initial, use_relative_movement=True,
|
106 |
+
use_relative_jacobian=True, adapt_movement_scale=True)
|
107 |
+
out = eg(source, kp_source=kp_source, kp_driving=kp_norm)
|
108 |
+
predictions.append(np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0])
|
109 |
+
await progress_message.edit(content=f"{frame_idx}")
|
110 |
+
|
111 |
+
print("print karandath puluvan bola")
|
112 |
+
imageio.mimsave('../generated.mp4', [img_as_ubyte(frame) for frame in predictions])
|
113 |
+
@client.event
|
114 |
+
async def on_ready():
|
115 |
+
print("Bot is connected")
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
client.run("MTA1OTcwODIxNTY5MDAwNjU5OA.GjNyS_.QNudUyA7G-gHbMZPQDuPWIQdmldKFJOi5c6AdI")
|