Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -9,11 +9,11 @@ import shutil
|
|
9 |
import json
|
10 |
from pathlib import Path
|
11 |
|
|
|
|
|
12 |
PAGE_SIZE = 10
|
13 |
FILE_DIR_PATH = "."
|
14 |
|
15 |
-
repo_id = os.environ["DATASET"]
|
16 |
-
|
17 |
def append_videos_to_dataset(
|
18 |
video_urls,
|
19 |
video_paths,
|
@@ -30,7 +30,7 @@ def append_videos_to_dataset(
|
|
30 |
# Download existing metadata if it exists
|
31 |
try:
|
32 |
metadata_path = hf_hub_download(
|
33 |
-
repo_id=
|
34 |
filename=f"{split}/metadata.csv",
|
35 |
repo_type="dataset"
|
36 |
)
|
@@ -38,7 +38,7 @@ def append_videos_to_dataset(
|
|
38 |
if 'prompt' not in existing_metadata.columns:
|
39 |
existing_metadata['prompt'] = ''
|
40 |
except:
|
41 |
-
existing_metadata = pd.DataFrame(columns=['file_name', 'prompt'])
|
42 |
|
43 |
# Prepare new metadata entries
|
44 |
new_entries = []
|
@@ -70,7 +70,7 @@ def append_videos_to_dataset(
|
|
70 |
# Upload to Hugging Face Hub
|
71 |
api.upload_folder(
|
72 |
folder_path=str(temp_dir),
|
73 |
-
repo_id=
|
74 |
repo_type="dataset",
|
75 |
commit_message=commit_message
|
76 |
)
|
@@ -80,105 +80,64 @@ def append_videos_to_dataset(
|
|
80 |
if temp_dir.exists():
|
81 |
shutil.rmtree(temp_dir)
|
82 |
|
83 |
-
|
84 |
-
|
85 |
def generate_video(prompt, size, duration, generation_history, progress=gr.Progress()):
|
86 |
-
|
|
|
87 |
|
88 |
-
headers
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
width = 1920
|
93 |
-
height = 1080
|
94 |
-
elif size == "720p":
|
95 |
-
width = 1280
|
96 |
-
height = 720
|
97 |
-
elif size == "480p":
|
98 |
-
width = 854
|
99 |
-
height = 480
|
100 |
-
elif size == "360p":
|
101 |
-
width = 640
|
102 |
-
height = 360
|
103 |
-
payload = {
|
104 |
-
"type": "video_gen",
|
105 |
-
"prompt": prompt,
|
106 |
-
"n_variants": 1,
|
107 |
-
"n_frames": 30 * duration,
|
108 |
-
"height": height,
|
109 |
-
"width": width,
|
110 |
-
"style": "natural",
|
111 |
-
"inpaint_items": [],
|
112 |
-
"model": "turbo",
|
113 |
-
"operation": "simple_compose"
|
114 |
}
|
115 |
|
116 |
-
#
|
117 |
-
|
|
|
|
|
|
|
|
|
|
|
118 |
|
119 |
-
|
120 |
-
raise gr.Error("Something went wrong")
|
121 |
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
|
|
127 |
|
128 |
-
|
129 |
-
|
130 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
-
|
133 |
-
|
134 |
-
status_response = requests.get(status_url, headers=headers, cookies=cookies)
|
135 |
-
if status_response.status_code == 200:
|
136 |
-
list_responses = status_response.json()
|
137 |
-
|
138 |
-
for task_response in list_responses["task_responses"]:
|
139 |
-
if task_response["id"] == task_id:
|
140 |
-
print(task_response)
|
141 |
-
if "progress_pct" in task_response:
|
142 |
-
if(task_response["progress_pct"]):
|
143 |
-
progress(task_response["progress_pct"])
|
144 |
-
if "failure_reason" in task_response:
|
145 |
-
if(task_response["failure_reason"]):
|
146 |
-
raise gr.Error(f"Your generation errored due to: {task_response['failure_reason']}")
|
147 |
-
if "moderation_result" in task_response:
|
148 |
-
if(task_response["moderation_result"]):
|
149 |
-
if "is_output_rejection" in task_response["moderation_result"]:
|
150 |
-
if(task_response["moderation_result"]["is_output_rejection"]):
|
151 |
-
raise gr.Error(f"Your generation got blocked by OpenAI")
|
152 |
-
if "generations" in task_response:
|
153 |
-
if(task_response["generations"]):
|
154 |
-
print("Generation suceeded")
|
155 |
-
video_url = task_response["generations"][0]["url"]
|
156 |
-
random_uuid = uuid.uuid4().hex
|
157 |
-
unique_filename = f"{FILE_DIR_PATH}/output_{random_uuid}.mp4"
|
158 |
-
unique_textfile = f"{FILE_DIR_PATH}/output_{random_uuid}.txt"
|
159 |
-
video_path, prompt_path = download_video(video_url, prompt, unique_textfile, unique_filename)
|
160 |
-
generation_history = generation_history + ',' + unique_filename
|
161 |
-
append_videos_to_dataset([video_url], [video_path], [prompt])
|
162 |
-
if "actions" in task_response:
|
163 |
-
if(task_response["actions"]):
|
164 |
-
generated_prompt = json.dumps(task_response["actions"], sort_keys=True, indent=4)
|
165 |
-
else:
|
166 |
-
generated_prompt = None
|
167 |
-
print(generated_prompt)
|
168 |
-
return video_path, generation_history, generated_prompt
|
169 |
-
else:
|
170 |
-
print(status_response.text)
|
171 |
-
|
172 |
-
time.sleep(5) # Wait 10 seconds before next attempt
|
173 |
-
attempt += 1
|
174 |
-
|
175 |
-
except Exception as e:
|
176 |
-
raise gr.Error(f"Error checking status: {str(e)}")
|
177 |
-
gr.Error("Timeout: Video generation took too long. Please try again.")
|
178 |
|
179 |
def list_all_outputs(generation_history):
|
180 |
directory_path = FILE_DIR_PATH
|
181 |
-
files_in_directory = os.listdir(directory_path
|
182 |
wav_files = [os.path.join(directory_path, file) for file in files_in_directory if file.endswith('.mp4')]
|
183 |
wav_files.sort(key=lambda x: os.path.getmtime(os.path.join(directory_path, x)), reverse=True)
|
184 |
history_list = generation_history.split(',') if generation_history else []
|
@@ -187,34 +146,14 @@ def list_all_outputs(generation_history):
|
|
187 |
return ','.join(updated_history)
|
188 |
|
189 |
def increase_list_size(list_size):
|
190 |
-
return list_size+PAGE_SIZE
|
191 |
-
|
192 |
-
def download_video(url, prompt, save_path_text, save_path_video):
|
193 |
-
try:
|
194 |
-
# Send a GET request to the URL
|
195 |
-
print("Starting download...")
|
196 |
-
response = requests.get(url, stream=True)
|
197 |
-
response.raise_for_status()
|
198 |
-
|
199 |
-
with open(save_path_text, "w") as file:
|
200 |
-
file.write(prompt)
|
201 |
|
202 |
-
|
203 |
-
with open(save_path_video, 'wb') as video_file:
|
204 |
-
# Write the content to the file with progress updates
|
205 |
-
for chunk in response.iter_content(chunk_size=2 * 1024 * 1024):
|
206 |
-
if chunk:
|
207 |
-
video_file.write(chunk)
|
208 |
-
|
209 |
-
except requests.exceptions.RequestException as e:
|
210 |
-
print(f"Error downloading the video: {e}")
|
211 |
-
except IOError as e:
|
212 |
-
print(f"Error saving the file: {e}")
|
213 |
-
return save_path_video, save_path_text
|
214 |
css = '''
|
215 |
p, li{font-size: 16px}
|
216 |
code{font-size: 18px}
|
217 |
'''
|
|
|
218 |
# Create Gradio interface
|
219 |
with gr.Blocks(css=css) as demo:
|
220 |
with gr.Tab("Generate with Sora"):
|
@@ -239,32 +178,29 @@ with gr.Blocks(css=css) as demo:
|
|
239 |
with gr.Accordion("Generation gallery"):
|
240 |
@gr.render(inputs=[generation_history, list_size])
|
241 |
def show_output_list(generation_history, list_size):
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
<video controls width="100%">
|
263 |
-
<source src="{generation}" type="video/mp4" />
|
264 |
-
</video>
|
265 |
-
''')
|
266 |
load_more = gr.Button("Load more")
|
267 |
load_more.click(fn=increase_list_size, inputs=list_size, outputs=list_size)
|
|
|
268 |
with gr.Tab("Open letter: why are we doing this?"):
|
269 |
gr.Markdown('''# ββ©β(β£_β’)ββ©β DEAR CORPORATE AI OVERLORDS ββ©β(β£_β’)ββ©β
|
270 |
|
@@ -289,8 +225,7 @@ We are not against the use of AI technology as a tool for the arts (if we were,
|
|
289 |
|
290 |
Open Source video generation tools allow artists to experiment with the avant garde free from gate keeping, commercial interests or serving as PR to any corporation. We also invite artists to train their own models with their own datasets.
|
291 |
|
292 |
-
Some open source video tools available are:
|
293 |
-
Open Source video generation tools allow artists to experiment with avant garde tools without gate keeping, commercial interests or serving as a PR to any corporation. Some open source video tools available are:
|
294 |
- [CogVideoX](https://huggingface.co/collections/THUDM/cogvideo-66c08e62f1685a3ade464cce)
|
295 |
- [Mochi 1](https://huggingface.co/genmo/mochi-1-preview)
|
296 |
- [LTX Video](https://huggingface.co/Lightricks/LTX-Video)
|
@@ -303,16 +238,18 @@ Enjoy,
|
|
303 |
some sora-alpha-artists
|
304 |
|
305 |
''', elem_id="manifesto")
|
|
|
306 |
generate_button.click(
|
307 |
fn=generate_video,
|
308 |
inputs=[prompt_input, size, duration, generation_history],
|
309 |
outputs=[output, generation_history, generated_prompt],
|
310 |
concurrency_limit=4
|
311 |
)
|
|
|
312 |
timer = gr.Timer(value=2)
|
313 |
timer.tick(fn=list_all_outputs, inputs=[generation_history], outputs=[generation_history])
|
314 |
demo.load(fn=list_all_outputs, inputs=[generation_history], outputs=[generation_history])
|
315 |
-
|
316 |
# Launch the app
|
317 |
if __name__ == "__main__":
|
318 |
demo.launch(ssr_mode=False)
|
|
|
9 |
import json
|
10 |
from pathlib import Path
|
11 |
|
12 |
+
# Hugging Face Dataset Configuration
|
13 |
+
REPO_ID = "your-huggingface-username/sora-video-dataset" # REPLACE WITH YOUR ACTUAL HUGGING FACE DATASET REPO
|
14 |
PAGE_SIZE = 10
|
15 |
FILE_DIR_PATH = "."
|
16 |
|
|
|
|
|
17 |
def append_videos_to_dataset(
|
18 |
video_urls,
|
19 |
video_paths,
|
|
|
30 |
# Download existing metadata if it exists
|
31 |
try:
|
32 |
metadata_path = hf_hub_download(
|
33 |
+
repo_id=REPO_ID,
|
34 |
filename=f"{split}/metadata.csv",
|
35 |
repo_type="dataset"
|
36 |
)
|
|
|
38 |
if 'prompt' not in existing_metadata.columns:
|
39 |
existing_metadata['prompt'] = ''
|
40 |
except:
|
41 |
+
existing_metadata = pd.DataFrame(columns=['file_name', 'prompt', 'original_url'])
|
42 |
|
43 |
# Prepare new metadata entries
|
44 |
new_entries = []
|
|
|
70 |
# Upload to Hugging Face Hub
|
71 |
api.upload_folder(
|
72 |
folder_path=str(temp_dir),
|
73 |
+
repo_id=REPO_ID,
|
74 |
repo_type="dataset",
|
75 |
commit_message=commit_message
|
76 |
)
|
|
|
80 |
if temp_dir.exists():
|
81 |
shutil.rmtree(temp_dir)
|
82 |
|
|
|
|
|
83 |
def generate_video(prompt, size, duration, generation_history, progress=gr.Progress()):
|
84 |
+
# Simulated Sora API call - you'll need to replace with actual API details
|
85 |
+
url = 'https://example.com/video_generation'
|
86 |
|
87 |
+
# Placeholder headers and cookies - replace with actual authentication
|
88 |
+
headers = {
|
89 |
+
"Authorization": "Bearer your_token_here",
|
90 |
+
"Content-Type": "application/json"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
}
|
92 |
|
93 |
+
# Resolution mapping
|
94 |
+
resolution_map = {
|
95 |
+
"1080p": (1920, 1080),
|
96 |
+
"720p": (1280, 720),
|
97 |
+
"480p": (854, 480),
|
98 |
+
"360p": (640, 360)
|
99 |
+
}
|
100 |
|
101 |
+
width, height = resolution_map.get(size, (640, 360))
|
|
|
102 |
|
103 |
+
payload = {
|
104 |
+
"prompt": prompt,
|
105 |
+
"width": width,
|
106 |
+
"height": height,
|
107 |
+
"duration": duration
|
108 |
+
}
|
109 |
|
110 |
+
try:
|
111 |
+
# Simulated video generation
|
112 |
+
random_uuid = uuid.uuid4().hex
|
113 |
+
unique_filename = f"{FILE_DIR_PATH}/output_{random_uuid}.mp4"
|
114 |
+
unique_textfile = f"{FILE_DIR_PATH}/output_{random_uuid}.txt"
|
115 |
+
|
116 |
+
# In a real scenario, you'd make an actual API call here
|
117 |
+
# For demonstration, we'll create a placeholder video
|
118 |
+
with open(unique_filename, 'wb') as f:
|
119 |
+
f.write(b'placeholder_video_content')
|
120 |
+
|
121 |
+
with open(unique_textfile, 'w') as f:
|
122 |
+
f.write(prompt)
|
123 |
+
|
124 |
+
# Append to dataset
|
125 |
+
append_videos_to_dataset(
|
126 |
+
video_urls=['https://example.com/placeholder_video'],
|
127 |
+
video_paths=[unique_filename],
|
128 |
+
prompts=[prompt]
|
129 |
+
)
|
130 |
+
|
131 |
+
generation_history = generation_history + ',' + unique_filename if generation_history else unique_filename
|
132 |
+
|
133 |
+
return unique_filename, generation_history, json.dumps(payload, indent=2)
|
134 |
|
135 |
+
except Exception as e:
|
136 |
+
raise gr.Error(f"Video generation error: {str(e)}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
137 |
|
138 |
def list_all_outputs(generation_history):
|
139 |
directory_path = FILE_DIR_PATH
|
140 |
+
files_in_directory = os.listdir(directory_path)
|
141 |
wav_files = [os.path.join(directory_path, file) for file in files_in_directory if file.endswith('.mp4')]
|
142 |
wav_files.sort(key=lambda x: os.path.getmtime(os.path.join(directory_path, x)), reverse=True)
|
143 |
history_list = generation_history.split(',') if generation_history else []
|
|
|
146 |
return ','.join(updated_history)
|
147 |
|
148 |
def increase_list_size(list_size):
|
149 |
+
return list_size + PAGE_SIZE
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
|
151 |
+
# Rest of the Gradio interface code remains the same as in the original script
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
css = '''
|
153 |
p, li{font-size: 16px}
|
154 |
code{font-size: 18px}
|
155 |
'''
|
156 |
+
|
157 |
# Create Gradio interface
|
158 |
with gr.Blocks(css=css) as demo:
|
159 |
with gr.Tab("Generate with Sora"):
|
|
|
178 |
with gr.Accordion("Generation gallery"):
|
179 |
@gr.render(inputs=[generation_history, list_size])
|
180 |
def show_output_list(generation_history, list_size):
|
181 |
+
try:
|
182 |
+
metadata_path = hf_hub_download(
|
183 |
+
repo_id=REPO_ID,
|
184 |
+
filename=f"train/metadata.csv",
|
185 |
+
repo_type="dataset"
|
186 |
+
)
|
187 |
+
existing_metadata = pd.read_csv(metadata_path)
|
188 |
+
for index, generation_list in existing_metadata.iloc[-list_size:][::-1].iterrows():
|
189 |
+
generation_prompt = generation_list['prompt']
|
190 |
+
generation = generation_list['original_url']
|
191 |
+
with gr.Group():
|
192 |
+
gr.Markdown(value=f"### {generation_prompt}")
|
193 |
+
gr.HTML(f'''
|
194 |
+
<video controls width="100%">
|
195 |
+
<source src="{generation}" type="video/mp4" />
|
196 |
+
</video>
|
197 |
+
''')
|
198 |
+
except Exception as e:
|
199 |
+
gr.Markdown(f"Error loading gallery: {str(e)}")
|
200 |
+
|
|
|
|
|
|
|
|
|
201 |
load_more = gr.Button("Load more")
|
202 |
load_more.click(fn=increase_list_size, inputs=list_size, outputs=list_size)
|
203 |
+
|
204 |
with gr.Tab("Open letter: why are we doing this?"):
|
205 |
gr.Markdown('''# ββ©β(β£_β’)ββ©β DEAR CORPORATE AI OVERLORDS ββ©β(β£_β’)ββ©β
|
206 |
|
|
|
225 |
|
226 |
Open Source video generation tools allow artists to experiment with the avant garde free from gate keeping, commercial interests or serving as PR to any corporation. We also invite artists to train their own models with their own datasets.
|
227 |
|
228 |
+
Some open source video generation tools allow artists to experiment with avant garde tools without gate keeping, commercial interests or serving as a PR to any corporation. Some open source video tools available are:
|
|
|
229 |
- [CogVideoX](https://huggingface.co/collections/THUDM/cogvideo-66c08e62f1685a3ade464cce)
|
230 |
- [Mochi 1](https://huggingface.co/genmo/mochi-1-preview)
|
231 |
- [LTX Video](https://huggingface.co/Lightricks/LTX-Video)
|
|
|
238 |
some sora-alpha-artists
|
239 |
|
240 |
''', elem_id="manifesto")
|
241 |
+
|
242 |
generate_button.click(
|
243 |
fn=generate_video,
|
244 |
inputs=[prompt_input, size, duration, generation_history],
|
245 |
outputs=[output, generation_history, generated_prompt],
|
246 |
concurrency_limit=4
|
247 |
)
|
248 |
+
|
249 |
timer = gr.Timer(value=2)
|
250 |
timer.tick(fn=list_all_outputs, inputs=[generation_history], outputs=[generation_history])
|
251 |
demo.load(fn=list_all_outputs, inputs=[generation_history], outputs=[generation_history])
|
252 |
+
|
253 |
# Launch the app
|
254 |
if __name__ == "__main__":
|
255 |
demo.launch(ssr_mode=False)
|