Spaces:
Sleeping
Sleeping
update demo
Browse files- Dockerfile +2 -2
- start.py +1 -1
Dockerfile
CHANGED
@@ -67,5 +67,5 @@ RUN pip install -r requirements.txt
|
|
67 |
|
68 |
RUN ls -lh
|
69 |
|
70 |
-
CMD ["bash", "start.sh"]
|
71 |
-
|
|
|
67 |
|
68 |
RUN ls -lh
|
69 |
|
70 |
+
# CMD ["bash", "start.sh"]
|
71 |
+
CMD ["python", 'start.py']
|
start.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import subprocess
|
2 |
|
3 |
if __name__ == '__main__':
|
4 |
-
backend_comand = ['python3', 'gradio_demo/seed_llama_flask.py', '--image_transform', 'configs/transform/clip_transform.yaml', '--tokenizer', 'configs/tokenizer/
|
5 |
|
6 |
frontend_comand = ['python3', 'gradio_demo/seed_llama_gradio.py', '--server_port', '7860', '--request_address', 'http://127.0.0.1:7890/generate', '--model_type', 'seed-llama-14b']
|
7 |
|
|
|
1 |
import subprocess
|
2 |
|
3 |
if __name__ == '__main__':
|
4 |
+
backend_comand = ['python3', 'gradio_demo/seed_llama_flask.py', '--image_transform', 'configs/transform/clip_transform.yaml', '--tokenizer', 'configs/tokenizer/seed_llama_tokenizer_hf.yaml', '--model', 'configs/llm/seed_llama_14b_8bit.yaml', '--port', '7890', '--llm_device', 'cuda:0', '--tokenizer_device', 'cuda:0', '--offload_encoder', '--offload_decoder']
|
5 |
|
6 |
frontend_comand = ['python3', 'gradio_demo/seed_llama_gradio.py', '--server_port', '7860', '--request_address', 'http://127.0.0.1:7890/generate', '--model_type', 'seed-llama-14b']
|
7 |
|