wuxiaojun commited on
Commit
d9c12fa
1 Parent(s): de1d7f1

指定BNB_CUDA_VERSION in bitsandbytes

Browse files
Files changed (2) hide show
  1. app.py +1 -0
  2. launch.py +5 -1
app.py CHANGED
@@ -2,5 +2,6 @@ import os
2
 
3
  os.system(
4
  f"git clone https://github.com/TimDettmers/bitsandbytes.git /home/user/app/bitsandbytes")
 
5
  os.system(f"cd /home/user/app/bitsandbytes && CUDA_VERSION=113 make cuda11x && python setup.py install")
6
  os.system(f"python /home/user/app/launch.py")
 
2
 
3
  os.system(
4
  f"git clone https://github.com/TimDettmers/bitsandbytes.git /home/user/app/bitsandbytes")
5
+ os.system(f"export BNB_CUDA_VERSION=113")
6
  os.system(f"cd /home/user/app/bitsandbytes && CUDA_VERSION=113 make cuda11x && python setup.py install")
7
  os.system(f"python /home/user/app/launch.py")
launch.py CHANGED
@@ -28,6 +28,7 @@ def is_chinese(text):
28
  AUTH_TOKEN = os.getenv("AUTH_TOKEN")
29
 
30
  LM_MODEL_PATH = "wuxiaojun/Ziya-LLaMA-13B-v1"
 
31
  lm_model = LlamaForCausalLM.from_pretrained(
32
  LM_MODEL_PATH,
33
  device_map="auto",
@@ -36,6 +37,7 @@ lm_model = LlamaForCausalLM.from_pretrained(
36
  quantization_config=BitsAndBytesConfig(load_in_4bit=True))
37
 
38
  TOKENIZER_PATH = "IDEA-CCNL/Ziya-LLaMA-13B-v1"
 
39
  # tokenizer = LlamaTokenizer.from_pretrained(LM_MODEL_PATH, use_auth_token=AUTH_TOKEN)
40
  tokenizer = LlamaTokenizer.from_pretrained(TOKENIZER_PATH)
41
 
@@ -43,8 +45,10 @@ tokenizer = LlamaTokenizer.from_pretrained(TOKENIZER_PATH)
43
  OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073]
44
  OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711]
45
  # demo.py is in the project path, so we can use local path ".". Otherwise you should use "IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1"
 
 
46
  model = AutoModelForCausalLM.from_pretrained(
47
- "IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1",
48
  trust_remote_code=True, use_auth_token=AUTH_TOKEN,
49
  torch_dtype=torch.float16)
50
  model.cuda() # if you use on cpu, comment this line
 
28
  AUTH_TOKEN = os.getenv("AUTH_TOKEN")
29
 
30
  LM_MODEL_PATH = "wuxiaojun/Ziya-LLaMA-13B-v1"
31
+ # LM_MODEL_PATH = "/cognitive_comp/wuxiaojun/pretrained/pytorch/huggingface/Ziya-LLaMA-13B-v1"
32
  lm_model = LlamaForCausalLM.from_pretrained(
33
  LM_MODEL_PATH,
34
  device_map="auto",
 
37
  quantization_config=BitsAndBytesConfig(load_in_4bit=True))
38
 
39
  TOKENIZER_PATH = "IDEA-CCNL/Ziya-LLaMA-13B-v1"
40
+ # TOKENIZER_PATH = "/cognitive_comp/wuxiaojun/pretrained/pytorch/huggingface/Ziya-LLaMA-13B-v1"
41
  # tokenizer = LlamaTokenizer.from_pretrained(LM_MODEL_PATH, use_auth_token=AUTH_TOKEN)
42
  tokenizer = LlamaTokenizer.from_pretrained(TOKENIZER_PATH)
43
 
 
45
  OPENAI_CLIP_MEAN = [0.48145466, 0.4578275, 0.40821073]
46
  OPENAI_CLIP_STD = [0.26862954, 0.26130258, 0.27577711]
47
  # demo.py is in the project path, so we can use local path ".". Otherwise you should use "IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1"
48
+ visual_model_path = "IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1"
49
+ # visual_model_path = "/cognitive_comp/wuxiaojun/pretrained/pytorch/huggingface/Ziya-BLIP2-14B-Visual-v1"
50
  model = AutoModelForCausalLM.from_pretrained(
51
+ visual_model_path,
52
  trust_remote_code=True, use_auth_token=AUTH_TOKEN,
53
  torch_dtype=torch.float16)
54
  model.cuda() # if you use on cpu, comment this line