LoRA weights
Browse files- adapter_config.json +18 -0
- adapter_model.bin +3 -0
- export_hf_checkpoint.py +60 -0
- export_state_dict_checkpoint.py +122 -0
- generate.py +161 -0
adapter_config.json
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"base_model_name_or_path": "decapoda-research/llama-7b-hf",
|
3 |
+
"bias": "none",
|
4 |
+
"enable_lora": null,
|
5 |
+
"fan_in_fan_out": false,
|
6 |
+
"inference_mode": true,
|
7 |
+
"lora_alpha": 16,
|
8 |
+
"lora_dropout": 0.05,
|
9 |
+
"merge_weights": false,
|
10 |
+
"modules_to_save": null,
|
11 |
+
"peft_type": "LORA",
|
12 |
+
"r": 8,
|
13 |
+
"target_modules": [
|
14 |
+
"q_proj",
|
15 |
+
"v_proj"
|
16 |
+
],
|
17 |
+
"task_type": "CAUSAL_LM"
|
18 |
+
}
|
adapter_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:013bd82a9a0cb574fd7491687d86a9af096a7fa597499d7d32cadb11a69a3afa
|
3 |
+
size 16822989
|
export_hf_checkpoint.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from peft import PeftModel, LoraConfig
|
6 |
+
|
7 |
+
import transformers
|
8 |
+
|
9 |
+
assert (
|
10 |
+
"LlamaTokenizer" in transformers._import_structure["models.llama"]
|
11 |
+
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
|
12 |
+
from transformers import LlamaTokenizer, LlamaForCausalLM
|
13 |
+
|
14 |
+
|
15 |
+
LORA_WEIGHTS = os.environ.get("LORA_WEIGTHS", "tloen/alpaca-lora-7b")
|
16 |
+
OUTPUT_DIR = os.environ.get("OUTPUT_DIR", "./hf_ckpt")
|
17 |
+
|
18 |
+
tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
|
19 |
+
|
20 |
+
base_model = LlamaForCausalLM.from_pretrained(
|
21 |
+
"decapoda-research/llama-7b-hf",
|
22 |
+
load_in_8bit=False,
|
23 |
+
torch_dtype=torch.float16,
|
24 |
+
device_map={"": "cpu"},
|
25 |
+
)
|
26 |
+
|
27 |
+
first_weight = base_model.model.layers[0].self_attn.q_proj.weight
|
28 |
+
first_weight_old = first_weight.clone()
|
29 |
+
|
30 |
+
lora_model = PeftModel.from_pretrained(
|
31 |
+
base_model,
|
32 |
+
LORA_WEIGHTS,
|
33 |
+
device_map={"": "cpu"},
|
34 |
+
torch_dtype=torch.float16,
|
35 |
+
)
|
36 |
+
|
37 |
+
lora_weight = lora_model.base_model.model.model.layers[0].self_attn.q_proj.weight
|
38 |
+
|
39 |
+
assert torch.allclose(first_weight_old, first_weight)
|
40 |
+
|
41 |
+
# merge weights
|
42 |
+
for layer in lora_model.base_model.model.model.layers:
|
43 |
+
layer.self_attn.q_proj.merge_weights = True
|
44 |
+
layer.self_attn.v_proj.merge_weights = True
|
45 |
+
|
46 |
+
lora_model.train(False)
|
47 |
+
|
48 |
+
# did we do anything?
|
49 |
+
assert not torch.allclose(first_weight_old, first_weight)
|
50 |
+
|
51 |
+
lora_model_sd = lora_model.state_dict()
|
52 |
+
deloreanized_sd = {
|
53 |
+
k.replace("base_model.model.", ""): v
|
54 |
+
for k, v in lora_model_sd.items()
|
55 |
+
if "lora" not in k
|
56 |
+
}
|
57 |
+
|
58 |
+
LlamaForCausalLM.save_pretrained(
|
59 |
+
base_model, OUTPUT_DIR, state_dict=deloreanized_sd, max_shard_size="400MB"
|
60 |
+
)
|
export_state_dict_checkpoint.py
ADDED
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from peft import PeftModel, LoraConfig
|
6 |
+
|
7 |
+
import transformers
|
8 |
+
|
9 |
+
assert (
|
10 |
+
"LlamaTokenizer" in transformers._import_structure["models.llama"]
|
11 |
+
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
|
12 |
+
from transformers import LlamaTokenizer, LlamaForCausalLM
|
13 |
+
|
14 |
+
tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
|
15 |
+
|
16 |
+
LORA_WEIGHTS = os.environ.get("LORA_WEIGHTS", "tloen/alpaca-lora-7b")
|
17 |
+
OUTPUT_DIR = os.environ.get("OUTPUT_DIR", "./ckpt")
|
18 |
+
|
19 |
+
base_model = LlamaForCausalLM.from_pretrained(
|
20 |
+
"decapoda-research/llama-7b-hf",
|
21 |
+
load_in_8bit=False,
|
22 |
+
torch_dtype=torch.float16,
|
23 |
+
device_map={"": "cpu"},
|
24 |
+
)
|
25 |
+
|
26 |
+
lora_model = PeftModel.from_pretrained(
|
27 |
+
base_model,
|
28 |
+
LORA_WEIGHTS,
|
29 |
+
device_map={"": "cpu"},
|
30 |
+
torch_dtype=torch.float16,
|
31 |
+
)
|
32 |
+
|
33 |
+
# merge weights
|
34 |
+
for layer in lora_model.base_model.model.model.layers:
|
35 |
+
layer.self_attn.q_proj.merge_weights = True
|
36 |
+
layer.self_attn.v_proj.merge_weights = True
|
37 |
+
|
38 |
+
lora_model.train(False)
|
39 |
+
|
40 |
+
lora_model_sd = lora_model.state_dict()
|
41 |
+
|
42 |
+
params = {
|
43 |
+
"dim": 4096,
|
44 |
+
"multiple_of": 256,
|
45 |
+
"n_heads": 32,
|
46 |
+
"n_layers": 32,
|
47 |
+
"norm_eps": 1e-06,
|
48 |
+
"vocab_size": -1,
|
49 |
+
}
|
50 |
+
n_layers = params["n_layers"]
|
51 |
+
n_heads = params["n_heads"]
|
52 |
+
dim = params["dim"]
|
53 |
+
dims_per_head = dim // n_heads
|
54 |
+
base = 10000.0
|
55 |
+
inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
|
56 |
+
|
57 |
+
|
58 |
+
def permute(w):
|
59 |
+
return (
|
60 |
+
w.view(n_heads, dim // n_heads // 2, 2, dim).transpose(1, 2).reshape(dim, dim)
|
61 |
+
)
|
62 |
+
|
63 |
+
|
64 |
+
def unpermute(w):
|
65 |
+
return (
|
66 |
+
w.view(n_heads, 2, dim // n_heads // 2, dim).transpose(1, 2).reshape(dim, dim)
|
67 |
+
)
|
68 |
+
|
69 |
+
|
70 |
+
def translate_state_dict_key(k):
|
71 |
+
k = k.replace("base_model.model.", "")
|
72 |
+
if k == "model.embed_tokens.weight":
|
73 |
+
return "tok_embeddings.weight"
|
74 |
+
elif k == "model.norm.weight":
|
75 |
+
return "norm.weight"
|
76 |
+
elif k == "lm_head.weight":
|
77 |
+
return "output.weight"
|
78 |
+
elif k.startswith("model.layers."):
|
79 |
+
layer = k.split(".")[2]
|
80 |
+
if k.endswith(".self_attn.q_proj.weight"):
|
81 |
+
return f"layers.{layer}.attention.wq.weight"
|
82 |
+
elif k.endswith(".self_attn.k_proj.weight"):
|
83 |
+
return f"layers.{layer}.attention.wk.weight"
|
84 |
+
elif k.endswith(".self_attn.v_proj.weight"):
|
85 |
+
return f"layers.{layer}.attention.wv.weight"
|
86 |
+
elif k.endswith(".self_attn.o_proj.weight"):
|
87 |
+
return f"layers.{layer}.attention.wo.weight"
|
88 |
+
elif k.endswith(".mlp.gate_proj.weight"):
|
89 |
+
return f"layers.{layer}.feed_forward.w1.weight"
|
90 |
+
elif k.endswith(".mlp.down_proj.weight"):
|
91 |
+
return f"layers.{layer}.feed_forward.w2.weight"
|
92 |
+
elif k.endswith(".mlp.up_proj.weight"):
|
93 |
+
return f"layers.{layer}.feed_forward.w3.weight"
|
94 |
+
elif k.endswith(".input_layernorm.weight"):
|
95 |
+
return f"layers.{layer}.attention_norm.weight"
|
96 |
+
elif k.endswith(".post_attention_layernorm.weight"):
|
97 |
+
return f"layers.{layer}.ffn_norm.weight"
|
98 |
+
elif k.endswith("rotary_emb.inv_freq") or "lora" in k:
|
99 |
+
return None
|
100 |
+
else:
|
101 |
+
print(layer, k)
|
102 |
+
raise NotImplementedError
|
103 |
+
else:
|
104 |
+
print(k)
|
105 |
+
raise NotImplementedError
|
106 |
+
|
107 |
+
|
108 |
+
new_state_dict = {}
|
109 |
+
for k, v in lora_model_sd.items():
|
110 |
+
new_k = translate_state_dict_key(k)
|
111 |
+
if new_k is not None:
|
112 |
+
if "wq" in new_k or "wk" in new_k:
|
113 |
+
new_state_dict[new_k] = unpermute(v)
|
114 |
+
else:
|
115 |
+
new_state_dict[new_k] = v
|
116 |
+
|
117 |
+
os.makedirs(OUTPUT_DIR, exist_ok=True)
|
118 |
+
|
119 |
+
torch.save(new_state_dict, OUTPUT_DIR + "/consolidated.00.pth")
|
120 |
+
|
121 |
+
with open(OUTPUT_DIR + "/params.json", "w") as f:
|
122 |
+
json.dump(params, f)
|
generate.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import torch
|
3 |
+
from peft import PeftModel
|
4 |
+
import transformers
|
5 |
+
import gradio as gr
|
6 |
+
|
7 |
+
assert (
|
8 |
+
"LlamaTokenizer" in transformers._import_structure["models.llama"]
|
9 |
+
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
|
10 |
+
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
|
11 |
+
|
12 |
+
tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
|
13 |
+
|
14 |
+
BASE_MODEL = "decapoda-research/llama-7b-hf"
|
15 |
+
LORA_WEIGHTS = os.environ.get("LORA_WEIGTHS", "tloen/alpaca-lora-7b")
|
16 |
+
|
17 |
+
if torch.cuda.is_available():
|
18 |
+
device = "cuda"
|
19 |
+
else:
|
20 |
+
device = "cpu"
|
21 |
+
|
22 |
+
try:
|
23 |
+
if torch.backends.mps.is_available():
|
24 |
+
device = "mps"
|
25 |
+
except:
|
26 |
+
pass
|
27 |
+
|
28 |
+
if device == "cuda":
|
29 |
+
model = LlamaForCausalLM.from_pretrained(
|
30 |
+
BASE_MODEL,
|
31 |
+
load_in_8bit=True,
|
32 |
+
torch_dtype=torch.float16,
|
33 |
+
device_map="auto",
|
34 |
+
)
|
35 |
+
model = PeftModel.from_pretrained(model, LORA_WEIGHTS, torch_dtype=torch.float16)
|
36 |
+
elif device == "mps":
|
37 |
+
model = LlamaForCausalLM.from_pretrained(
|
38 |
+
BASE_MODEL,
|
39 |
+
device_map={"": device},
|
40 |
+
torch_dtype=torch.float16,
|
41 |
+
)
|
42 |
+
model = PeftModel.from_pretrained(
|
43 |
+
model,
|
44 |
+
LORA_WEIGHTS,
|
45 |
+
device_map={"": device},
|
46 |
+
torch_dtype=torch.float16,
|
47 |
+
)
|
48 |
+
else:
|
49 |
+
model = LlamaForCausalLM.from_pretrained(
|
50 |
+
BASE_MODEL, device_map={"": device}, low_cpu_mem_usage=True
|
51 |
+
)
|
52 |
+
model = PeftModel.from_pretrained(
|
53 |
+
model,
|
54 |
+
LORA_WEIGHTS,
|
55 |
+
device_map={"": device},
|
56 |
+
)
|
57 |
+
|
58 |
+
|
59 |
+
def generate_prompt(instruction, input=None):
|
60 |
+
if input:
|
61 |
+
return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
62 |
+
|
63 |
+
### Instruction:
|
64 |
+
{instruction}
|
65 |
+
|
66 |
+
### Input:
|
67 |
+
{input}
|
68 |
+
|
69 |
+
### Response:"""
|
70 |
+
else:
|
71 |
+
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
72 |
+
|
73 |
+
### Instruction:
|
74 |
+
{instruction}
|
75 |
+
|
76 |
+
### Response:"""
|
77 |
+
|
78 |
+
|
79 |
+
model.eval()
|
80 |
+
if torch.__version__ >= "2":
|
81 |
+
model = torch.compile(model)
|
82 |
+
|
83 |
+
|
84 |
+
def evaluate(
|
85 |
+
instruction,
|
86 |
+
input=None,
|
87 |
+
temperature=0.1,
|
88 |
+
top_p=0.75,
|
89 |
+
top_k=40,
|
90 |
+
num_beams=4,
|
91 |
+
max_new_tokens=128,
|
92 |
+
**kwargs,
|
93 |
+
):
|
94 |
+
prompt = generate_prompt(instruction, input)
|
95 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
96 |
+
input_ids = inputs["input_ids"].to(device)
|
97 |
+
generation_config = GenerationConfig(
|
98 |
+
temperature=temperature,
|
99 |
+
top_p=top_p,
|
100 |
+
top_k=top_k,
|
101 |
+
num_beams=num_beams,
|
102 |
+
**kwargs,
|
103 |
+
)
|
104 |
+
with torch.no_grad():
|
105 |
+
generation_output = model.generate(
|
106 |
+
input_ids=input_ids,
|
107 |
+
generation_config=generation_config,
|
108 |
+
return_dict_in_generate=True,
|
109 |
+
output_scores=True,
|
110 |
+
max_new_tokens=max_new_tokens,
|
111 |
+
)
|
112 |
+
s = generation_output.sequences[0]
|
113 |
+
output = tokenizer.decode(s)
|
114 |
+
return output.split("### Response:")[1].strip()
|
115 |
+
|
116 |
+
|
117 |
+
gr.Interface(
|
118 |
+
fn=evaluate,
|
119 |
+
inputs=[
|
120 |
+
gr.components.Textbox(
|
121 |
+
lines=2, label="Instruction", placeholder="Tell me about alpacas."
|
122 |
+
),
|
123 |
+
gr.components.Textbox(lines=2, label="Input", placeholder="none"),
|
124 |
+
gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
|
125 |
+
gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
|
126 |
+
gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
|
127 |
+
gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label="Beams"),
|
128 |
+
gr.components.Slider(
|
129 |
+
minimum=1, maximum=2000, step=1, value=128, label="Max tokens"
|
130 |
+
),
|
131 |
+
],
|
132 |
+
outputs=[
|
133 |
+
gr.inputs.Textbox(
|
134 |
+
lines=5,
|
135 |
+
label="Output",
|
136 |
+
)
|
137 |
+
],
|
138 |
+
title="🦙🌲 Alpaca-LoRA",
|
139 |
+
description="Alpaca-LoRA is a 7B-parameter LLaMA model finetuned to follow instructions. It is trained on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset and makes use of the Huggingface LLaMA implementation. For more information, please visit [the project's website](https://github.com/tloen/alpaca-lora).",
|
140 |
+
).launch(share=True)
|
141 |
+
|
142 |
+
# Old testing code follows.
|
143 |
+
|
144 |
+
"""
|
145 |
+
if __name__ == "__main__":
|
146 |
+
# testing code for readme
|
147 |
+
for instruction in [
|
148 |
+
"Tell me about alpacas.",
|
149 |
+
"Tell me about the president of Mexico in 2019.",
|
150 |
+
"Tell me about the king of France in 2019.",
|
151 |
+
"List all Canadian provinces in alphabetical order.",
|
152 |
+
"Write a Python program that prints the first 10 Fibonacci numbers.",
|
153 |
+
"Write a program that prints the numbers from 1 to 100. But for multiples of three print 'Fizz' instead of the number and for the multiples of five print 'Buzz'. For numbers which are multiples of both three and five print 'FizzBuzz'.",
|
154 |
+
"Tell me five words that rhyme with 'shock'.",
|
155 |
+
"Translate the sentence 'I have no mouth but I must scream' into Spanish.",
|
156 |
+
"Count up from 1 to 500.",
|
157 |
+
]:
|
158 |
+
print("Instruction:", instruction)
|
159 |
+
print("Response:", evaluate(instruction))
|
160 |
+
print()
|
161 |
+
"""
|