Spaces:
Sleeping
Sleeping
tricktreat
commited on
Commit
•
cad9141
1
Parent(s):
c6067f0
local models update
Browse files- models_server.py +22 -22
models_server.py
CHANGED
@@ -156,20 +156,20 @@ def load_pipes(local_deployment):
|
|
156 |
|
157 |
if local_deployment in ["full", "standard"]:
|
158 |
standard_pipes = {
|
159 |
-
"nlpconnect/vit-gpt2-image-captioning":{
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
},
|
165 |
-
"espnet/kan-bayashi_ljspeech_vits": {
|
166 |
-
|
167 |
-
|
168 |
-
},
|
169 |
-
"lambdalabs/sd-image-variations-diffusers": {
|
170 |
-
|
171 |
-
|
172 |
-
},
|
173 |
"runwayml/stable-diffusion-v1-5": {
|
174 |
"model": DiffusionPipeline.from_pretrained(f"{local_models}runwayml/stable-diffusion-v1-5"),
|
175 |
"device": "cuda:0"
|
@@ -182,10 +182,10 @@ def load_pipes(local_deployment):
|
|
182 |
"model": pipeline(task="automatic-speech-recognition", model=f"{local_models}openai/whisper-base"),
|
183 |
"device": "cuda:0"
|
184 |
},
|
185 |
-
"microsoft/speecht5_asr": {
|
186 |
-
|
187 |
-
|
188 |
-
},
|
189 |
"Intel/dpt-large": {
|
190 |
"model": pipeline(task="depth-estimation", model=f"{local_models}Intel/dpt-large"),
|
191 |
"device": "cuda:0"
|
@@ -206,10 +206,10 @@ def load_pipes(local_deployment):
|
|
206 |
# "model": pipeline(task="zero-shot-image-classification", model=f"openai/clip-vit-large-patch14"),
|
207 |
# "device": "cuda:0"
|
208 |
# },
|
209 |
-
"google/owlvit-base-patch32": {
|
210 |
-
|
211 |
-
|
212 |
-
},
|
213 |
# "microsoft/DialoGPT-medium": {
|
214 |
# "model": pipeline(task="conversational", model=f"microsoft/DialoGPT-medium"),
|
215 |
# "device": "cuda:0"
|
|
|
156 |
|
157 |
if local_deployment in ["full", "standard"]:
|
158 |
standard_pipes = {
|
159 |
+
# "nlpconnect/vit-gpt2-image-captioning":{
|
160 |
+
# "model": VisionEncoderDecoderModel.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"),
|
161 |
+
# "feature_extractor": ViTImageProcessor.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"),
|
162 |
+
# "tokenizer": AutoTokenizer.from_pretrained(f"{local_models}nlpconnect/vit-gpt2-image-captioning"),
|
163 |
+
# "device": "cuda:0"
|
164 |
+
# },
|
165 |
+
# "espnet/kan-bayashi_ljspeech_vits": {
|
166 |
+
# "model": Text2Speech.from_pretrained("espnet/kan-bayashi_ljspeech_vits"),
|
167 |
+
# "device": "cuda:0"
|
168 |
+
# },
|
169 |
+
# "lambdalabs/sd-image-variations-diffusers": {
|
170 |
+
# "model": DiffusionPipeline.from_pretrained(f"{local_models}lambdalabs/sd-image-variations-diffusers"), #torch_dtype=torch.float16
|
171 |
+
# "device": "cuda:0"
|
172 |
+
# },
|
173 |
"runwayml/stable-diffusion-v1-5": {
|
174 |
"model": DiffusionPipeline.from_pretrained(f"{local_models}runwayml/stable-diffusion-v1-5"),
|
175 |
"device": "cuda:0"
|
|
|
182 |
"model": pipeline(task="automatic-speech-recognition", model=f"{local_models}openai/whisper-base"),
|
183 |
"device": "cuda:0"
|
184 |
},
|
185 |
+
# "microsoft/speecht5_asr": {
|
186 |
+
# "model": pipeline(task="automatic-speech-recognition", model=f"{local_models}microsoft/speecht5_asr"),
|
187 |
+
# "device": "cuda:0"
|
188 |
+
# },
|
189 |
"Intel/dpt-large": {
|
190 |
"model": pipeline(task="depth-estimation", model=f"{local_models}Intel/dpt-large"),
|
191 |
"device": "cuda:0"
|
|
|
206 |
# "model": pipeline(task="zero-shot-image-classification", model=f"openai/clip-vit-large-patch14"),
|
207 |
# "device": "cuda:0"
|
208 |
# },
|
209 |
+
# "google/owlvit-base-patch32": {
|
210 |
+
# "model": pipeline(task="zero-shot-object-detection", model=f"{local_models}google/owlvit-base-patch32"),
|
211 |
+
# "device": "cuda:0"
|
212 |
+
# },
|
213 |
# "microsoft/DialoGPT-medium": {
|
214 |
# "model": pipeline(task="conversational", model=f"microsoft/DialoGPT-medium"),
|
215 |
# "device": "cuda:0"
|