AiActivity commited on
Commit
1894745
1 Parent(s): 9f53ace

Upload 3 files

Browse files
Files changed (3) hide show
  1. app_hf.py +28 -0
  2. app_ltx_video.py +14 -0
  3. app_together.py +51 -0
app_hf.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from utils import get_app
2
+
3
+ demo = get_app(
4
+ models=[
5
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
6
+ "Qwen/Qwen2.5-72B-Instruct",
7
+ "meta-llama/Llama-3.1-70B-Instruct",
8
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
9
+ "meta-llama/Llama-3.1-8B-Instruct",
10
+ "google/gemma-2-9b-it",
11
+ "mistralai/Mistral-7B-v0.1",
12
+ "meta-llama/Llama-2-7b-chat-hf",
13
+ "meta-llama/Llama-3.2-3B-Instruct",
14
+ "meta-llama/Llama-3.2-1B-Instruct",
15
+ "Qwen/Qwen2.5-1.5B-Instruct",
16
+ "microsoft/Phi-3.5-mini-instruct",
17
+ "HuggingFaceTB/SmolLM2-1.7B-Instruct",
18
+ "google/gemma-2-2b-it",
19
+ "meta-llama/Llama-3.2-3B",
20
+ "meta-llama/Llama-3.2-1B",
21
+ "openai-community/gpt2",
22
+ ],
23
+ default_model="HuggingFaceTB/SmolLM2-1.7B-Instruct",
24
+ src="models",
25
+ )
26
+
27
+ if __name__ == "__main__":
28
+ demo.launch()
app_ltx_video.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import fal_gradio
3
+ from utils import get_app
4
+
5
+ demo = get_app(
6
+ models=[
7
+ "fal-ai/ltx-video",
8
+ ],
9
+ default_model="fal-ai/ltx-video",
10
+ src=fal_gradio.registry,
11
+ )
12
+
13
+ if __name__ == "__main__":
14
+ demo.launch()
app_together.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import together_gradio
4
+
5
+ from utils import get_app
6
+
7
+ demo = get_app(
8
+ models=[
9
+ "meta-llama/Llama-Vision-Free",
10
+ "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
11
+ "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
12
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
13
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
14
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
15
+ "meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
16
+ "meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
17
+ "meta-llama/Llama-3.2-3B-Instruct-Turbo",
18
+ "meta-llama/Meta-Llama-3-8B-Instruct-Lite",
19
+ "meta-llama/Meta-Llama-3-70B-Instruct-Lite",
20
+ "meta-llama/Llama-3-8b-chat-hf",
21
+ "meta-llama/Llama-3-70b-chat-hf",
22
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
23
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
24
+ "microsoft/WizardLM-2-8x22B",
25
+ "google/gemma-2-27b-it",
26
+ "google/gemma-2-9b-it",
27
+ "databricks/dbrx-instruct",
28
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
29
+ "mistralai/Mixtral-8x22B-Instruct-v0.1",
30
+ "Qwen/Qwen2.5-7B-Instruct-Turbo",
31
+ "Qwen/Qwen2.5-72B-Instruct-Turbo",
32
+ "Qwen/Qwen2-72B-Instruct",
33
+ "deepseek-ai/deepseek-llm-67b-chat",
34
+ "google/gemma-2b-it",
35
+ "Gryphe/MythoMax-L2-13b",
36
+ "meta-llama/Llama-2-13b-chat-hf",
37
+ "mistralai/Mistral-7B-Instruct-v0.1",
38
+ "mistralai/Mistral-7B-Instruct-v0.2",
39
+ "mistralai/Mistral-7B-Instruct-v0.3",
40
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
41
+ "togethercomputer/StripedHyena-Nous-7B",
42
+ "upstage/SOLAR-10.7B-Instruct-v1.0",
43
+ ],
44
+ default_model="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
45
+ src=together_gradio.registry,
46
+ accept_token=not os.getenv("TOGETHER_API_KEY"),
47
+ multimodal=True,
48
+ )
49
+
50
+ if __name__ == "__main__":
51
+ demo.launch()