masanorihirano commited on
Commit
135325d
1 Parent(s): 0e9fa83
Files changed (3) hide show
  1. Dockerfile +1 -0
  2. app.py +3 -6
  3. model_pull.py +2 -7
Dockerfile CHANGED
@@ -38,4 +38,5 @@ RUN --mount=type=secret,id=HF_TOKEN,mode=0444,required=true \
38
  git config --global credential.helper store && \
39
  huggingface-cli login --token $(cat /run/secrets/HF_TOKEN) --add-to-git-credential
40
  RUN poetry run python model_pull.py
 
41
  ENTRYPOINT ["/home/user/.local/bin/poetry", "run", "python", "app.py", "--host", "0.0.0.0", "--port", "7860"]
 
38
  git config --global credential.helper store && \
39
  huggingface-cli login --token $(cat /run/secrets/HF_TOKEN) --add-to-git-credential
40
  RUN poetry run python model_pull.py
41
+ EXPOSE 7860
42
  ENTRYPOINT ["/home/user/.local/bin/poetry", "run", "python", "app.py", "--host", "0.0.0.0", "--port", "7860"]
app.py CHANGED
@@ -2,17 +2,13 @@ from typing import Optional
2
 
3
  import gradio as gr
4
  import torch
5
- import transformers
6
  from peft import PeftModel
7
  from transformers import GenerationConfig
 
 
8
 
9
  print("starting server ...")
10
 
11
- assert (
12
- "LlamaTokenizer" in transformers._import_structure["models.llama"]
13
- ), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
14
- from transformers import LlamaForCausalLM # noqa
15
- from transformers import LlamaTokenizer # noqa
16
 
17
  BASE_MODEL = "decapoda-research/llama-13b-hf"
18
  LORA_WEIGHTS = "izumi-lab/llama-13b-japanese-lora-v0-1ep"
@@ -139,4 +135,5 @@ g = gr.Interface(
139
  description="izumi-lab/calm-7b-lora-v0-1ep is a 7B-parameter Calm model finetuned to follow instructions. It is trained on the [izumi-lab/llm-japanese-dataset](https://huggingface.co/datasets/izumi-lab/llm-japanese-dataset) dataset and makes use of the Huggingface Calm-7b implementation. For more information, please visit [the project's website](https://llm.msuzuki.me).",
140
  )
141
  g.queue(concurrency_count=1)
 
142
  g.launch()
 
2
 
3
  import gradio as gr
4
  import torch
 
5
  from peft import PeftModel
6
  from transformers import GenerationConfig
7
+ from transformers import LlamaForCausalLM
8
+ from transformers import LlamaTokenizer
9
 
10
  print("starting server ...")
11
 
 
 
 
 
 
12
 
13
  BASE_MODEL = "decapoda-research/llama-13b-hf"
14
  LORA_WEIGHTS = "izumi-lab/llama-13b-japanese-lora-v0-1ep"
 
135
  description="izumi-lab/calm-7b-lora-v0-1ep is a 7B-parameter Calm model finetuned to follow instructions. It is trained on the [izumi-lab/llm-japanese-dataset](https://huggingface.co/datasets/izumi-lab/llm-japanese-dataset) dataset and makes use of the Huggingface Calm-7b implementation. For more information, please visit [the project's website](https://llm.msuzuki.me).",
136
  )
137
  g.queue(concurrency_count=1)
138
+ print("loading completed")
139
  g.launch()
model_pull.py CHANGED
@@ -1,12 +1,7 @@
1
  import torch
2
- import transformers
3
  from peft import PeftModel
4
-
5
- assert (
6
- "LlamaTokenizer" in transformers._import_structure["models.llama"]
7
- ), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
8
- from transformers import LlamaForCausalLM # noqa
9
- from transformers import LlamaTokenizer # noqa
10
 
11
  BASE_MODEL = "decapoda-research/llama-13b-hf"
12
  LORA_WEIGHTS = "izumi-lab/llama-13b-japanese-lora-v0-1ep"
 
1
  import torch
 
2
  from peft import PeftModel
3
+ from transformers import LlamaForCausalLM
4
+ from transformers import LlamaTokenizer
 
 
 
 
5
 
6
  BASE_MODEL = "decapoda-research/llama-13b-hf"
7
  LORA_WEIGHTS = "izumi-lab/llama-13b-japanese-lora-v0-1ep"