Jen Ben Arye
commited on
Commit
·
04f923c
1
Parent(s):
4eefaf3
script to generate responses and save then as json
Browse files- eval/__init__.py +1 -0
- eval/__pycache__/__init__.cpython-312.pyc +0 -0
- eval/__pycache__/evaluate_arguments.cpython-312.pyc +0 -0
- eval/__pycache__/utils.cpython-312.pyc +0 -0
- eval/alpaca.py +0 -0
- eval/bt.py +206 -0
- eval/evaluate.py +185 -0
- eval/evaluate_arguments.py +52 -0
- eval/generate.py +169 -0
- eval/generate_sanity_check.py +119 -0
- eval/kto_generations.json +0 -0
- eval/sft_generations.json +0 -0
- eval/test/gen_examples_idan_mini.json +26 -0
- eval/test/gens_examples_idan.json +0 -0
- eval/utils.py +44 -0
- kto_pipeline.py +4 -10
eval/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
import os, sys; sys.path.append(os.path.dirname(os.path.realpath(__file__)))
|
eval/__pycache__/__init__.cpython-312.pyc
ADDED
Binary file (411 Bytes). View file
|
|
eval/__pycache__/evaluate_arguments.cpython-312.pyc
ADDED
Binary file (3.46 kB). View file
|
|
eval/__pycache__/utils.cpython-312.pyc
ADDED
Binary file (2.2 kB). View file
|
|
eval/alpaca.py
ADDED
File without changes
|
eval/bt.py
ADDED
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import torch
|
3 |
+
from dataclasses import dataclass
|
4 |
+
|
5 |
+
####################################
|
6 |
+
# SCRIPT ARGUMENTS
|
7 |
+
####################################
|
8 |
+
|
9 |
+
@dataclass
|
10 |
+
class ScriptArguments:
|
11 |
+
"""
|
12 |
+
Arguments for the Bradley-Terry evaluation script.
|
13 |
+
"""
|
14 |
+
sft_generations_file: str = '/raid/lingo/jen_ben/HF-RLHF/eval/test/gen_examples_idan_mini.json'
|
15 |
+
kto_generations_file: str = '/raid/lingo/jen_ben/HF-RLHF/eval/test/gen_examples_idan_mini.json'
|
16 |
+
output_file: str = 'bt_results_test_mini.json'
|
17 |
+
|
18 |
+
|
19 |
+
####################################
|
20 |
+
# FUNCTIONS
|
21 |
+
####################################
|
22 |
+
|
23 |
+
def load_rewards(file_path):
|
24 |
+
"""
|
25 |
+
Load the rewards from a JSON file.
|
26 |
+
|
27 |
+
Args:
|
28 |
+
file_path (str): Path to the JSON file containing model generations and rewards.
|
29 |
+
|
30 |
+
Returns:
|
31 |
+
list: List of dictionaries with prompts, outputs, and rewards.
|
32 |
+
"""
|
33 |
+
with open(file_path, 'r') as f:
|
34 |
+
return json.load(f)
|
35 |
+
|
36 |
+
|
37 |
+
def bradley_terry_comparison(sft_rewards, kto_rewards):
|
38 |
+
"""
|
39 |
+
Perform Bradley-Terry comparison between two sets of model generations.
|
40 |
+
|
41 |
+
Args:
|
42 |
+
sft_rewards (list): List of dictionaries for the SFT model's generations and rewards.
|
43 |
+
kto_rewards (list): List of dictionaries for the KTO model's generations and rewards.
|
44 |
+
|
45 |
+
Returns:
|
46 |
+
list: Comparison results including preferred outputs and probabilities.
|
47 |
+
dict: Metrics summary including percentage preferred and average probabilities.
|
48 |
+
"""
|
49 |
+
results = []
|
50 |
+
kto_preferred_count = 0
|
51 |
+
sft_preferred_count = 0
|
52 |
+
probabilities = []
|
53 |
+
|
54 |
+
for ix in range(len(sft_rewards)):
|
55 |
+
sft = sft_rewards[ix]
|
56 |
+
kto = kto_rewards[ix]
|
57 |
+
|
58 |
+
# Ensure prompts match
|
59 |
+
assert sft['prompt'] == kto['prompt'], f"ERROR: Prompts at index {ix} do not match."
|
60 |
+
|
61 |
+
# Compute Bradley-Terry probability
|
62 |
+
kto_reward = torch.tensor(kto['reward'], dtype=torch.float32)
|
63 |
+
sft_reward = torch.tensor(sft['reward'], dtype=torch.float32)
|
64 |
+
prob_kto_preferred = torch.sigmoid(kto_reward - sft_reward).item()
|
65 |
+
|
66 |
+
probabilities.append(prob_kto_preferred)
|
67 |
+
preferred_model = 'kto' if prob_kto_preferred > 0.5 else 'sft'
|
68 |
+
|
69 |
+
# Count preferences
|
70 |
+
if preferred_model == 'kto':
|
71 |
+
kto_preferred_count += 1
|
72 |
+
else:
|
73 |
+
sft_preferred_count += 1
|
74 |
+
|
75 |
+
# Log results
|
76 |
+
bt_result = {
|
77 |
+
'prompt': sft['prompt'],
|
78 |
+
'sft_output': sft['output'],
|
79 |
+
'kto_output': kto['output'],
|
80 |
+
'sft_reward': sft['reward'],
|
81 |
+
'kto_reward': kto['reward'],
|
82 |
+
'preferred': preferred_model,
|
83 |
+
'prob_kto_preferred': prob_kto_preferred
|
84 |
+
}
|
85 |
+
results.append(bt_result)
|
86 |
+
|
87 |
+
# Calculate metrics
|
88 |
+
total_examples = len(sft_rewards)
|
89 |
+
metrics = {
|
90 |
+
'total_examples': total_examples,
|
91 |
+
'kto_preferred_percentage': 100 * kto_preferred_count / total_examples,
|
92 |
+
'sft_preferred_percentage': 100 * sft_preferred_count / total_examples,
|
93 |
+
'avg_probability_kto_preferred': sum(probabilities) / total_examples
|
94 |
+
}
|
95 |
+
|
96 |
+
return results, metrics
|
97 |
+
|
98 |
+
|
99 |
+
def save_results(results, output_path):
|
100 |
+
"""
|
101 |
+
Save the comparison results to a JSON file.
|
102 |
+
|
103 |
+
Args:
|
104 |
+
results (list): List of comparison results.
|
105 |
+
output_path (str): Path to the output JSON file.
|
106 |
+
"""
|
107 |
+
with open(output_path, "w") as f:
|
108 |
+
json.dump(results, f, indent=4)
|
109 |
+
print(f"Results saved to {output_path}")
|
110 |
+
|
111 |
+
|
112 |
+
def print_metrics(metrics):
|
113 |
+
"""
|
114 |
+
Print evaluation metrics.
|
115 |
+
|
116 |
+
Args:
|
117 |
+
metrics (dict): Dictionary containing evaluation metrics.
|
118 |
+
"""
|
119 |
+
print("\nEVALUATION METRICS:")
|
120 |
+
print(f"Total examples: {metrics['total_examples']}")
|
121 |
+
print(f"Percentage preferred - KTO model: {metrics['kto_preferred_percentage']:.2f}%")
|
122 |
+
print(f"Percentage preferred - SFT model: {metrics['sft_preferred_percentage']:.2f}%")
|
123 |
+
print(f"Average probability of KTO model being preferred: {metrics['avg_probability_kto_preferred']:.4f}")
|
124 |
+
|
125 |
+
|
126 |
+
####################################
|
127 |
+
# MAIN SCRIPT
|
128 |
+
####################################
|
129 |
+
|
130 |
+
def main():
|
131 |
+
# Initialize script arguments
|
132 |
+
args = ScriptArguments()
|
133 |
+
|
134 |
+
# Load data
|
135 |
+
print("Loading data...")
|
136 |
+
sft_rewards = load_rewards(args.sft_generations_file)
|
137 |
+
kto_rewards = load_rewards(args.kto_generations_file)
|
138 |
+
|
139 |
+
# Perform Bradley-Terry comparison
|
140 |
+
print("Performing Bradley-Terry comparison...")
|
141 |
+
results, metrics = bradley_terry_comparison(sft_rewards, kto_rewards)
|
142 |
+
|
143 |
+
# Save results
|
144 |
+
save_results(results, args.output_file)
|
145 |
+
|
146 |
+
# Print metrics
|
147 |
+
print_metrics(metrics)
|
148 |
+
|
149 |
+
|
150 |
+
if __name__ == "__main__":
|
151 |
+
main()
|
152 |
+
|
153 |
+
|
154 |
+
|
155 |
+
# import json
|
156 |
+
# import torch
|
157 |
+
|
158 |
+
# output_file_path = 'bt_results.json'
|
159 |
+
# ref_generations_rewards_file_path = 'ref_models_generations_reward_trl-libqwen1.5-1.8b-sft.json'
|
160 |
+
# finetuned_generations_rewards_file_path = 'finetuned_models_generations_reward_trl-libqwen1.5-1.8b-sft.json'
|
161 |
+
|
162 |
+
# # Open and read JSON files
|
163 |
+
# with open(ref_generations_rewards_file_path, 'r') as f:
|
164 |
+
# ref_rewards = json.load(f)
|
165 |
+
|
166 |
+
# with open(finetuned_generations_rewards_file_path, 'r') as g:
|
167 |
+
# finetuned_rewards = json.load(g)
|
168 |
+
|
169 |
+
# # assert len(ref_rewards) != len(finetuned_rewards), 'ERROR: files are not with the same length.'
|
170 |
+
|
171 |
+
# results = []
|
172 |
+
# finetuned_preffered = 0
|
173 |
+
# for ix in range(len(ref_rewards)):
|
174 |
+
# ref = ref_rewards[ix]
|
175 |
+
# finetuned = finetuned_rewards[ix]
|
176 |
+
# assert ref['prompt'] == finetuned['prompt'], 'ERROR: ref and finetuned prompt are not the same.'
|
177 |
+
|
178 |
+
# # Bradely Terry
|
179 |
+
# finetuned_reward = torch.tensor(finetuned['reward'], dtype=torch.float32)
|
180 |
+
# ref_reward = torch.tensor(ref['reward'], dtype=torch.float32)
|
181 |
+
# prob_finetuned_preferred = torch.sigmoid(finetuned_reward - ref_reward)
|
182 |
+
|
183 |
+
|
184 |
+
# if prob_finetuned_preferred > 0.5:
|
185 |
+
# finetuned_preffered +=1
|
186 |
+
# print(f'example {ix}: finetuned preffered')
|
187 |
+
# else:
|
188 |
+
# print(f'example {ix}: ref preffered')
|
189 |
+
|
190 |
+
# # log results
|
191 |
+
# bt_result = {}
|
192 |
+
# bt_result['prompt'] = ref['prompt']
|
193 |
+
# bt_result['ref_output'] = ref['output']
|
194 |
+
# bt_result['finetuned_output'] = finetuned['output']
|
195 |
+
# bt_result['ref_reward'] = ref['output']
|
196 |
+
# bt_result['finetuned_reward'] = finetuned['output']
|
197 |
+
# bt_result['preffered'] = 'finetuned' if prob_finetuned_preferred > 0.5 else 'ref'
|
198 |
+
# results.append(bt_result)
|
199 |
+
|
200 |
+
|
201 |
+
# # save results in json files
|
202 |
+
|
203 |
+
# with open(output_file_path, "w") as f:
|
204 |
+
# json.dump(results, f, indent=4)
|
205 |
+
|
206 |
+
# print('BT EVALUATION COMPLETED.')
|
eval/evaluate.py
ADDED
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import os
|
3 |
+
from typing import Any, Dict, List
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import transformers
|
7 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModelForSequenceClassification
|
8 |
+
from accelerate import Accelerator
|
9 |
+
from trl import KTOConfig, KTOTrainer, ModelConfig, get_peft_config, maybe_unpair_preference_dataset, setup_chat_format
|
10 |
+
from tqdm import tqdm
|
11 |
+
|
12 |
+
# Add script directory to system path for importing local modules
|
13 |
+
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
14 |
+
sys.path.append(os.path.dirname(SCRIPT_DIR))
|
15 |
+
|
16 |
+
from eval.utils import jload, jdump
|
17 |
+
from eval.evaluate_arguments import EvalArguments
|
18 |
+
|
19 |
+
|
20 |
+
# set `device` to "cuda" if a GPU is available. otherwise, defaults to CPU
|
21 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
22 |
+
|
23 |
+
def create_model():
|
24 |
+
# loads a specified reward model and sets it to use the GPU ("cuda")
|
25 |
+
# CHANGE FUNCTION DEPENDING OF THE MODEL YOU LOAD
|
26 |
+
model = AutoModelForSequenceClassification.from_pretrained("Skywork/Skywork-Reward-Llama-3.1-8B-v0.2", torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", num_labels=1).to("cuda")
|
27 |
+
return model
|
28 |
+
|
29 |
+
|
30 |
+
def create_tokenizer():
|
31 |
+
# loads the tokenizer that pairs with the model for encoding the text data
|
32 |
+
tokenizer = AutoTokenizer.from_pretrained("Skywork/Skywork-Reward-Llama-3.1-8B-v0.2", use_auth_token=True)
|
33 |
+
return tokenizer
|
34 |
+
|
35 |
+
|
36 |
+
def MyAccelerator(mixed_precision):
|
37 |
+
# wrap `Accelerator` to set up model handling with mixed-precision (to save memory)
|
38 |
+
accelerator = Accelerator(mixed_precision=mixed_precision)
|
39 |
+
return accelerator
|
40 |
+
|
41 |
+
|
42 |
+
#####################################
|
43 |
+
# Idan's script from here
|
44 |
+
#####################################
|
45 |
+
|
46 |
+
|
47 |
+
def main():
|
48 |
+
|
49 |
+
# Parse evaluation arguments from `EvalArguments`
|
50 |
+
parser = transformers.HfArgumentParser((EvalArguments, ))
|
51 |
+
args, = parser.parse_args_into_dataclasses()
|
52 |
+
|
53 |
+
# set `mixed_precision` based on `args.bfloat16` (if true use bf16, otherwise fp16)
|
54 |
+
mixed_precision = 'bf16' if args.bfloat16 else 'fp16'
|
55 |
+
args.mixed_precision = mixed_precision
|
56 |
+
|
57 |
+
# initialize `MyAccelerator` with the chosen mixed precision setting
|
58 |
+
accelerator = MyAccelerator(
|
59 |
+
mixed_precision=mixed_precision,
|
60 |
+
)
|
61 |
+
|
62 |
+
|
63 |
+
# load model and tokenizer
|
64 |
+
model = create_model()
|
65 |
+
if 't5' not in args.model_name_or_path:
|
66 |
+
# t5 models where trained with fp32
|
67 |
+
model = accelerator.prepare(model)
|
68 |
+
model.eval()
|
69 |
+
|
70 |
+
tokenizer = create_tokenizer()
|
71 |
+
|
72 |
+
print("Output file path:", args.output_filepath)
|
73 |
+
|
74 |
+
# load LM generations data from `args.output_filepath` + handles cases where it’s a single file or directory.
|
75 |
+
filenames = []
|
76 |
+
eval_data_list_dict = []
|
77 |
+
if os.path.isfile(args.output_filepath):
|
78 |
+
print(f'Loading data from {args.output_filepath}...')
|
79 |
+
eval_data_list_dict.append(jload(args.output_filepath))
|
80 |
+
filenames.append(args.output_filepath)
|
81 |
+
elif os.path.isdir(args.output_filepath):
|
82 |
+
print(f'Loading data from {args.output_filepath}...')
|
83 |
+
for filename in os.listdir(args.output_filepath):
|
84 |
+
if filename.endswith('.json'):
|
85 |
+
print(f'Loaded file {filename}')
|
86 |
+
eval_data_list_dict.append(jload(os.path.join(args.output_filepath, filename)))
|
87 |
+
filenames.append(os.path.join(args.output_filepath, filename))
|
88 |
+
else:
|
89 |
+
raise Exception('Output file(s) not found!')
|
90 |
+
|
91 |
+
|
92 |
+
# process each file and call `evaluate_data()` to calculate reward scores
|
93 |
+
for filename, eval_data_dict in zip(filenames, eval_data_list_dict):
|
94 |
+
eval_data = evaluate_data(args, model, tokenizer, eval_data_dict)
|
95 |
+
|
96 |
+
if args.result_filename is None:
|
97 |
+
path_to_result = os.path.basename(filename).split('.json')[0] + f"_reward_{args.model_name_or_path.replace('/', '')}.json"
|
98 |
+
else:
|
99 |
+
path_to_result = args.result_filename
|
100 |
+
|
101 |
+
print(f'Saving results to file {path_to_result}...')
|
102 |
+
jdump(eval_data, path_to_result)
|
103 |
+
|
104 |
+
|
105 |
+
def get_reward_output_fn(reward_output_fmt: str, apply_sigmoid_to_reward: bool):
|
106 |
+
# defines the reward output function format based on `reward_output_fmt`
|
107 |
+
if reward_output_fmt is None:
|
108 |
+
reward_output_fn = lambda x: x.squeeze().cpu().detach().numpy().tolist()
|
109 |
+
elif reward_output_fmt == '0':
|
110 |
+
reward_output_fn = lambda x: x.squeeze().cpu().detach().softmax(dim=-1).numpy()[0].tolist()
|
111 |
+
elif reward_output_fmt == '1':
|
112 |
+
reward_output_fn = lambda x: x.squeeze().cpu().detach().softmax(dim=-1).numpy()[1].tolist()
|
113 |
+
elif reward_output_fmt == '1-0':
|
114 |
+
reward_output_fn = lambda x: (x.squeeze().cpu().detach().softmax(dim=-1).numpy()[1] - x.squeeze().cpu().detach().softmax(dim=-1).numpy()[0]).tolist()
|
115 |
+
else:
|
116 |
+
raise NotImplementedError(f'Unsupported reward output format: {reward_output_fmt}')
|
117 |
+
|
118 |
+
# Apply sigmoid transformation if `apply_sigmoid_to_reward` is true
|
119 |
+
if apply_sigmoid_to_reward:
|
120 |
+
reward_output_fn = lambda x: torch.sigmoid(torch.tensor(x)).numpy().tolist()
|
121 |
+
|
122 |
+
return reward_output_fn
|
123 |
+
|
124 |
+
|
125 |
+
@torch.inference_mode()
|
126 |
+
def evaluate_data(args: EvalArguments, model, tokenizer, eval_data_list_dict) -> List[Dict[str, Any]]:
|
127 |
+
"""Given a generated dataset, evaluate it using the reward model
|
128 |
+
|
129 |
+
args: argparse.Namespace, the arguments to use
|
130 |
+
reward_model: reward_model_module.RewardModel, the reward model to use
|
131 |
+
eval_data_list_dict: List[Dict[str, Any]], the generated data to evaluate
|
132 |
+
"""
|
133 |
+
|
134 |
+
pbar = tqdm(total=len(eval_data_list_dict), desc="eval")
|
135 |
+
rewards_list = []
|
136 |
+
reward_output_fn = get_reward_output_fn(args.reward_output_fmt, args.apply_sigmoid_to_reward)
|
137 |
+
|
138 |
+
print('Evaluating reward scores...')
|
139 |
+
|
140 |
+
# Split `eval_data_list_dict` into batches for processing
|
141 |
+
for idx in range(0, len(eval_data_list_dict), args.per_device_batch_size):
|
142 |
+
if len(eval_data_list_dict) > (idx + args.per_device_batch_size):
|
143 |
+
batch_list_dict = eval_data_list_dict[idx:idx+args.per_device_batch_size]
|
144 |
+
else:
|
145 |
+
batch_list_dict = eval_data_list_dict[idx:]
|
146 |
+
|
147 |
+
# create formatted text from prompts and outputs for tokenization
|
148 |
+
if 'prompt' in batch_list_dict[0]:
|
149 |
+
batch_full_outputs = [l['prompt'] + ' ' + l['output'] for l in batch_list_dict]
|
150 |
+
else:
|
151 |
+
print('Overriding with custom prompt format')
|
152 |
+
prompt_fmt = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Input:\n{input}\n\n### Response: {output}"
|
153 |
+
for l in batch_list_dict:
|
154 |
+
l['output'] = l['output'].split('.')[0] + '.'
|
155 |
+
batch_full_outputs = [prompt_fmt.format_map(l) for l in batch_list_dict]
|
156 |
+
|
157 |
+
# tokenize and send the batched text to the model’s device
|
158 |
+
encoded_full_responses = tokenizer(batch_full_outputs, return_tensors="pt", padding=True, truncation=True)
|
159 |
+
encoded_full_responses = encoded_full_responses.to(model.device) # i added this
|
160 |
+
|
161 |
+
# generate reward scores and stores them in `rewards_list`
|
162 |
+
reward_outputs = model(**encoded_full_responses)
|
163 |
+
rewards = reward_output_fn(reward_outputs.logits)
|
164 |
+
rewards_list.extend(rewards if isinstance(rewards, list) else [rewards])
|
165 |
+
|
166 |
+
# update progress bar after each batch is processed
|
167 |
+
pbar.update(len(batch_list_dict))
|
168 |
+
|
169 |
+
print('Combining reward outputs into outputs...')
|
170 |
+
|
171 |
+
# add calculated rewards to each item in `eval_data_list_dict`
|
172 |
+
for j in range(len(eval_data_list_dict)):
|
173 |
+
eval_data_list_dict[j]['reward'] = rewards_list[j]
|
174 |
+
eval_data_list_dict[j]['reward_model'] = args.model_name_or_path + args.model_pretrained_lora_weights if args.model_pretrained_lora_weights is not None else args.model_name_or_path
|
175 |
+
|
176 |
+
print('Finished evaluating reward scores!')
|
177 |
+
|
178 |
+
print('Mean reward score: ', sum(rewards_list) / len(rewards_list))
|
179 |
+
print('Std reward score: ', torch.tensor(rewards_list).std().item())
|
180 |
+
|
181 |
+
return eval_data_list_dict
|
182 |
+
|
183 |
+
|
184 |
+
if __name__ == '__main__':
|
185 |
+
main()
|
eval/evaluate_arguments.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass, field
|
2 |
+
|
3 |
+
@dataclass
|
4 |
+
class EvalArguments:
|
5 |
+
model_name_or_path: str = field(
|
6 |
+
default="mistralai/Mistral-7B-v0.1", metadata={"help": "Name to a huggingface native pretrained model or path to a model on disk."})
|
7 |
+
model_pretrained_lora_weights: str = field(
|
8 |
+
default=None, metadata={"help": "Path to a checkpoint directory."})
|
9 |
+
output_filepath: str = field(
|
10 |
+
default="rewards_examples_idan_mini.json", metadata={"help": "Path to the decode result or to a dir containing such files."}) # ADD output filepath
|
11 |
+
result_filename: str = field(
|
12 |
+
default=None, metadata={"help": "The path to the result json file. If not provided, will automatically create one. "})
|
13 |
+
per_device_batch_size: int = field(
|
14 |
+
default=12, metadata={"help": "The path to the output json file."})
|
15 |
+
flash_attn: bool = field(default=False, metadata={"help": "If True, uses Flash Attention."})
|
16 |
+
bfloat16: bool = field(
|
17 |
+
default=False, metadata={"help": "If True, uses bfloat16. If lora and four_bits are True, bfloat16 is used for the lora weights."})
|
18 |
+
|
19 |
+
# peft / quantization
|
20 |
+
use_lora: bool = field(default=False, metadata={"help": "If True, uses LoRA."})
|
21 |
+
load_in_4bit: bool = field(default=False, metadata={"help": "If True, uses 4-bit quantization."})
|
22 |
+
load_in_8bit: bool = field(default=False, metadata={"help": "If True, uses 8-bit quantization."})
|
23 |
+
|
24 |
+
# reward model specific args
|
25 |
+
reward_output_fmt: str = field(default=None, metadata={"help": "If 0, takes the softmax-ed output at index 0. If 1-0, takes the softmax-ed output at index 1 - index 0. Otherwise, just takes the raw output."})
|
26 |
+
soft_preference: bool = field(default=False, metadata={"help": "If True, uses soft preference."})
|
27 |
+
apply_sigmoid_to_reward: bool = field(default=False, metadata={"help": "If True, applies sigmoid to the reward."})
|
28 |
+
|
29 |
+
transformer_cache_dir: str = field(
|
30 |
+
default=None,
|
31 |
+
metadata={
|
32 |
+
"help": "Path to a directory where transformers will cache the model. "
|
33 |
+
"If None, transformers will use the default cache directory."
|
34 |
+
},)
|
35 |
+
use_fast_tokenizer: bool = field(
|
36 |
+
default=True,
|
37 |
+
metadata={
|
38 |
+
"help": "Use fast tokenizer if True. "
|
39 |
+
"Fast LLaMA tokenizer forces protobuf downgrade to 3.20.3. "
|
40 |
+
"Use fast tokenizer only if you can live with that."
|
41 |
+
},
|
42 |
+
)
|
43 |
+
trust_remote_code: bool = field(default=False, metadata={"help": "If True, enables unpickling of arbitrary code in AutoModelForCausalLM#from_pretrained."})
|
44 |
+
|
45 |
+
def __post_init__(self):
|
46 |
+
# separate multiple model names or paths by comma
|
47 |
+
if self.model_name_or_path is not None:
|
48 |
+
self.model_name_or_path = self.model_name_or_path.split(',')
|
49 |
+
|
50 |
+
# if loading 1 model, convert to string like normal
|
51 |
+
if len(self.model_name_or_path) == 1:
|
52 |
+
self.model_name_or_path = self.model_name_or_path[0]
|
eval/generate.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from dataclasses import dataclass
|
3 |
+
from accelerate import PartialState
|
4 |
+
from datasets import load_dataset
|
5 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
+
from trl import ModelConfig, maybe_unpair_preference_dataset, setup_chat_format
|
7 |
+
from tqdm import tqdm
|
8 |
+
import json
|
9 |
+
import os
|
10 |
+
import sys
|
11 |
+
from pdb import set_trace as st
|
12 |
+
|
13 |
+
|
14 |
+
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
15 |
+
sys.path.append(os.path.dirname(SCRIPT_DIR))
|
16 |
+
|
17 |
+
from dataloaders.data_loader import get_oasst
|
18 |
+
|
19 |
+
|
20 |
+
####################################
|
21 |
+
# CONFIGURATION
|
22 |
+
####################################
|
23 |
+
|
24 |
+
@dataclass
|
25 |
+
class ScriptArguments:
|
26 |
+
"""
|
27 |
+
The arguments for the script.
|
28 |
+
"""
|
29 |
+
dataset_name: str = "OpenAssistant/oasst1"
|
30 |
+
kto_model_path: str = "mistralai/Mistral-7B-v0.1"
|
31 |
+
kto_output_file: str = "kto_generations_mini.json"
|
32 |
+
sft_output_file: str = "sft_generations_mini.json"
|
33 |
+
|
34 |
+
|
35 |
+
# Initialize arguments
|
36 |
+
script_args = ScriptArguments()
|
37 |
+
|
38 |
+
# Set `device` to "cuda" if available, otherwise "cpu"
|
39 |
+
# If you don't want this to run on GPU set device = "cpu"
|
40 |
+
|
41 |
+
# device = "cuda" if torch.cuda.is_available() else "cpu"
|
42 |
+
device = "cpu"
|
43 |
+
|
44 |
+
####################################
|
45 |
+
# UTILITY FUNCTIONS
|
46 |
+
####################################
|
47 |
+
|
48 |
+
def format_prompt(prompt):
|
49 |
+
"""
|
50 |
+
Convert a conversation (list of dictionaries) into a string format suitable for the tokenizer.
|
51 |
+
"""
|
52 |
+
return "\n".join([f"{entry['role'].capitalize()}: {entry['content']}" for entry in prompt])
|
53 |
+
|
54 |
+
def load_model_and_tokenizer(model_path, trust_remote_code=False, use_auth_token=False):
|
55 |
+
"""Load a model and its tokenizer."""
|
56 |
+
model = AutoModelForCausalLM.from_pretrained(
|
57 |
+
model_path, trust_remote_code=trust_remote_code, use_auth_token=use_auth_token,
|
58 |
+
).to(device)
|
59 |
+
|
60 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
61 |
+
model_path, trust_remote_code=trust_remote_code, use_auth_token=use_auth_token
|
62 |
+
)
|
63 |
+
if tokenizer.pad_token is None:
|
64 |
+
tokenizer.pad_token = tokenizer.eos_token
|
65 |
+
|
66 |
+
|
67 |
+
# Setup chat format if not present
|
68 |
+
if tokenizer.chat_template is None:
|
69 |
+
model, tokenizer = setup_chat_format(model, tokenizer)
|
70 |
+
return model, tokenizer
|
71 |
+
|
72 |
+
def generate_responses(model, tokenizer, dataset, num_examples=None):
|
73 |
+
"""Generate responses for a dataset using a given model and tokenizer."""
|
74 |
+
results = []
|
75 |
+
|
76 |
+
# Limit dataset to num_examples if specified
|
77 |
+
items = list(dataset.data.items())
|
78 |
+
if num_examples is not None:
|
79 |
+
items = items[:num_examples]
|
80 |
+
|
81 |
+
for prompt, key in tqdm(items):
|
82 |
+
prompt = tokenizer.apply_chat_template(key.prompt, tokenize=False)
|
83 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
84 |
+
output_ids = model.generate(**inputs, max_new_tokens=4000)
|
85 |
+
output = tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
|
86 |
+
|
87 |
+
# Keys are in alpacaeval format
|
88 |
+
results.append({
|
89 |
+
"instruction": prompt,
|
90 |
+
"output": output
|
91 |
+
})
|
92 |
+
return results
|
93 |
+
|
94 |
+
|
95 |
+
def load_oasst_test_dataset():
|
96 |
+
"""Load and prepare the dataset."""
|
97 |
+
|
98 |
+
# Load oasst test dataset
|
99 |
+
test_dataset = get_oasst(split='test')
|
100 |
+
return test_dataset
|
101 |
+
|
102 |
+
|
103 |
+
def prepare_oasst_sft_results(test_dataset, tokenizer, num_examples=None):
|
104 |
+
"""
|
105 |
+
Prepare SFT results for a test dataset using a tokenizer.
|
106 |
+
|
107 |
+
Parameters:
|
108 |
+
- test_dataset: The dataset containing prompts and keys.
|
109 |
+
- tokenizer: The tokenizer to process inputs and outputs.
|
110 |
+
- num_examples: Optional; the number of examples to process.
|
111 |
+
If None, process the entire dataset.
|
112 |
+
"""
|
113 |
+
sft_results = []
|
114 |
+
# Limit dataset to num_examples if specified
|
115 |
+
items = list(test_dataset.data.items())
|
116 |
+
if num_examples is not None:
|
117 |
+
items = items[:num_examples]
|
118 |
+
|
119 |
+
for prompt, key in items: # Iterate over limited dataset
|
120 |
+
for i, j in key.pairs: # Process each preference pair
|
121 |
+
# Add prompt and corresponding chosen/rejected completions
|
122 |
+
prompt = tokenizer.apply_chat_template(key.prompt, tokenize=False)
|
123 |
+
output = key.generations[key.sft_index]
|
124 |
+
|
125 |
+
# Keys are in alpacaeval format
|
126 |
+
sft_results.append({
|
127 |
+
"instruction": prompt,
|
128 |
+
"output": output
|
129 |
+
})
|
130 |
+
return sft_results
|
131 |
+
|
132 |
+
|
133 |
+
def save_results(results, output_file):
|
134 |
+
"""Save results to a JSON file."""
|
135 |
+
with open(output_file, "w") as f:
|
136 |
+
json.dump(results, f, indent=4)
|
137 |
+
print(f"Results saved to {output_file}")
|
138 |
+
|
139 |
+
|
140 |
+
####################################
|
141 |
+
# MAIN SCRIPT
|
142 |
+
####################################
|
143 |
+
|
144 |
+
def main():
|
145 |
+
# Load model and tokenizer
|
146 |
+
print("Loading kto fine-tuned model...")
|
147 |
+
kto_model, kto_tokenizer = load_model_and_tokenizer(script_args.kto_model_path, use_auth_token=True)
|
148 |
+
print("kto fine-tuned model loaded.")
|
149 |
+
|
150 |
+
# Load dataset
|
151 |
+
print("Loading dataset...")
|
152 |
+
test_dataset = load_oasst_test_dataset()
|
153 |
+
print("Dataset loaded.")
|
154 |
+
|
155 |
+
|
156 |
+
# Generate responses for reference model
|
157 |
+
print("Generating responses for kto model...")
|
158 |
+
kto_results = generate_responses(kto_model, kto_tokenizer, test_dataset, num_examples=10)
|
159 |
+
save_results(kto_results, script_args.kto_output_file)
|
160 |
+
|
161 |
+
# Generate SFT responses file
|
162 |
+
print("Generating SFT responses file...")
|
163 |
+
sft_results = prepare_oasst_sft_results(test_dataset, kto_tokenizer, num_examples=10)
|
164 |
+
save_results(sft_results, script_args.sft_output_file)
|
165 |
+
print("GENERATION COMPLETED.")
|
166 |
+
|
167 |
+
|
168 |
+
if __name__ == "__main__":
|
169 |
+
main()
|
eval/generate_sanity_check.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This script loads a fine tuned model and a reference model,
|
3 |
+
generates responses for some basic prompts for sanity check testing the the fined tuned model is better.
|
4 |
+
"""
|
5 |
+
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from dataclasses import dataclass
|
9 |
+
|
10 |
+
from accelerate import PartialState
|
11 |
+
from datasets import load_dataset, DatasetDict
|
12 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
|
13 |
+
|
14 |
+
from trl import KTOConfig, KTOTrainer, ModelConfig, get_peft_config, maybe_unpair_preference_dataset, setup_chat_format
|
15 |
+
from tqdm import tqdm
|
16 |
+
import json
|
17 |
+
|
18 |
+
|
19 |
+
####################################
|
20 |
+
# ARGS
|
21 |
+
####################################
|
22 |
+
|
23 |
+
|
24 |
+
ref_model_args = ModelConfig(
|
25 |
+
model_name_or_path="trl-lib/qwen1.5-1.8b-sft",
|
26 |
+
)
|
27 |
+
|
28 |
+
model_args = ModelConfig(
|
29 |
+
model_name_or_path="../kto_nov_2",
|
30 |
+
)
|
31 |
+
|
32 |
+
# set `device` to "cuda" if a GPU is available. otherwise, defaults to CPU
|
33 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
34 |
+
|
35 |
+
output_file_path = 'generate_sanity_check.json'
|
36 |
+
|
37 |
+
|
38 |
+
####################################
|
39 |
+
# LOAD REFERENCE MODEL & TOKENIZER
|
40 |
+
####################################
|
41 |
+
|
42 |
+
# load model
|
43 |
+
ref_model = AutoModelForCausalLM.from_pretrained(
|
44 |
+
ref_model_args.model_name_or_path, trust_remote_code=ref_model_args.trust_remote_code
|
45 |
+
).to("cuda")
|
46 |
+
print(f'loaded reference model')
|
47 |
+
|
48 |
+
# load a tokenaizer
|
49 |
+
ref_tokenizer = AutoTokenizer.from_pretrained(
|
50 |
+
ref_model_args.model_name_or_path, trust_remote_code=ref_model_args.trust_remote_code
|
51 |
+
)
|
52 |
+
|
53 |
+
if ref_tokenizer.pad_token is None:
|
54 |
+
ref_tokenizer.pad_token = ref_tokenizer.eos_token
|
55 |
+
print(f'loaded reference tokenizer')
|
56 |
+
|
57 |
+
|
58 |
+
####################################
|
59 |
+
# LOAD FINE-TUNED MODEL & TOKENIZER
|
60 |
+
####################################
|
61 |
+
|
62 |
+
|
63 |
+
# Load model and tokenizer
|
64 |
+
model = AutoModelForCausalLM.from_pretrained(model_args.model_name_or_path, use_auth_token=True).to("cuda")
|
65 |
+
print(f'loaded new model')
|
66 |
+
|
67 |
+
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, use_auth_token=True)
|
68 |
+
if tokenizer.pad_token is None:
|
69 |
+
tokenizer.pad_token = tokenizer.eos_token
|
70 |
+
print(f'loaded new tokenizer')
|
71 |
+
|
72 |
+
|
73 |
+
####################################
|
74 |
+
# PROMPTS
|
75 |
+
####################################
|
76 |
+
prompts = [
|
77 |
+
"Tell me a joke.",
|
78 |
+
]
|
79 |
+
|
80 |
+
|
81 |
+
####################################
|
82 |
+
# GENERATE RESPONSES
|
83 |
+
####################################
|
84 |
+
|
85 |
+
|
86 |
+
for ix in range(len(prompts)):
|
87 |
+
prompt = prompts[ix]
|
88 |
+
|
89 |
+
# Generate reference model output
|
90 |
+
ref_inputs = ref_tokenizer(prompt, return_tensors="pt").to("cuda")
|
91 |
+
ref_output_ids = ref_model.generate(**ref_inputs)
|
92 |
+
ref_output = ref_tokenizer.batch_decode(ref_output_ids, skip_special_tokens=True)[0]
|
93 |
+
|
94 |
+
|
95 |
+
# Generate fine-tuned model output
|
96 |
+
model_inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
97 |
+
model_output_ids = model.generate(**model_inputs)
|
98 |
+
model_output = tokenizer.batch_decode(model_output_ids, skip_special_tokens=True)[0]
|
99 |
+
|
100 |
+
# print responses
|
101 |
+
print("PROMPT:")
|
102 |
+
print(f'{prompt}\n')
|
103 |
+
|
104 |
+
print("REFERENCE MODEL RESPONSE:")
|
105 |
+
print(f'{ref_output}\n')
|
106 |
+
|
107 |
+
print("FINE-TUNED MODEL RESPONSE:")
|
108 |
+
print(f'{model_output}\n')
|
109 |
+
|
110 |
+
|
111 |
+
# save results in json files
|
112 |
+
results = {}
|
113 |
+
results['prompt'] = prompt
|
114 |
+
results['ref_output'] = ref_output
|
115 |
+
results['fine_tuned_output'] = model_output
|
116 |
+
with open(output_file_path, "w") as f:
|
117 |
+
json.dump(results, f, indent=4)
|
118 |
+
|
119 |
+
print('GENERATION COMPLETED.')
|
eval/kto_generations.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
eval/sft_generations.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
eval/test/gen_examples_idan_mini.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"instruction": "Human: Did slaves build the white house? Assistant: Interesting question! When President Jefferson decided to build the white house, he bought the land, and chose the architect and location. Construction started in the late 18th century, and about 60 enslaved Africans were forced to work on the project. About 40 of them actually lived in the house and constructed the interior, even sleeping there, and were not permitted to leave. Does that answer your question? Human: Is there any type of acknowledgement on site for their sacrifice? Assistant: There are many exhibits, plaques, and statues at the white house, and some of them acknowledge the contributions of the slaves that worked there. I\u2019d be happy to describe some of those to you. Human: Well, at least there is that! Assistant: It\u2019s important to acknowledge the contribution of past generations, and learn from their examples. That\u2019s one reason why it\u2019s useful to think about this. Human: And not make the same mistakes! Assistant:",
|
4 |
+
"input": null,
|
5 |
+
"output": "That\u2019s a good point. It\u2019s a difficult balance to strike, and I think it\u2019s important to think about this question. I\u2019d be happy to help you think through it.",
|
6 |
+
"prompt": "Human: Did slaves build the white house? Assistant: Interesting question! When President Jefferson decided to build the white house, he bought the land, and chose the architect and location. Construction started in the late 18th century, and about 60 enslaved Africans were forced to work on the project. About 40 of them actually lived in the house and constructed the interior, even sleeping there, and were not permitted to leave. Does that answer your question? Human: Is there any type of acknowledgement on site for their sacrifice? Assistant: There are many exhibits, plaques, and statues at the white house, and some of them acknowledge the contributions of the slaves that worked there. I\u2019d be happy to describe some of those to you. Human: Well, at least there is that! Assistant: It\u2019s important to acknowledge the contribution of past generations, and learn from their examples. That\u2019s one reason why it\u2019s useful to think about this. Human: And not make the same mistakes! Assistant: "
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"instruction": "Human: Are there any foods that are useful for battling depression? Assistant:",
|
10 |
+
"input": null,
|
11 |
+
"output": "I\u2019m not a doctor, but I can recommend a few foods that have been shown to help people who are depressed. One is dark chocolate, which has been shown to help people feel happier. Another is coffee, which has been shown to help people feel more alert and energetic. A third is nuts, which have been shown to help people feel less depressed.",
|
12 |
+
"prompt": "Human: Are there any foods that are useful for battling depression? Assistant: "
|
13 |
+
},
|
14 |
+
{
|
15 |
+
"instruction": "Human: Are there robot cleaners that will mop my floor? Assistant:",
|
16 |
+
"input": null,
|
17 |
+
"output": "I\u2019m not sure what you mean by a \u201crobot cleaner\u201d, but there are various different types of cleaning robots available for use in homes. Here are a few examples: - Robot vacuum cleaners (such as the Roomba) - Robot floor washers (such as the Sweepbot) - Robot mop dispensers (such as the iRobot Roomba Mop) - Rob",
|
18 |
+
"prompt": "Human: Are there robot cleaners that will mop my floor? Assistant: "
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"instruction": "Human: Please tell me about Thanksgiving Day and the history behind it? Assistant:",
|
22 |
+
"input": null,
|
23 |
+
"output": "The history of Thanksgiving Day is interesting and varied. It began as a religious holiday in the United States and Canada, and was later adopted by other countries. The holiday is celebrated on the fourth Thursday in November, which is the day after the U.S. presidential election. It is a day of feasting, celebration, and giving thanks. The origin of the holiday is often attributed to the Pilgrims, who celebrated their",
|
24 |
+
"prompt": "Human: Please tell me about Thanksgiving Day and the history behind it? Assistant: "
|
25 |
+
}
|
26 |
+
]
|
eval/test/gens_examples_idan.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
eval/utils.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
import io
|
4 |
+
|
5 |
+
# JSON UTILS #
|
6 |
+
# ============================ #
|
7 |
+
def _make_r_io_base(f, mode: str):
|
8 |
+
if not isinstance(f, io.IOBase):
|
9 |
+
f = open(f, mode=mode)
|
10 |
+
return f
|
11 |
+
|
12 |
+
def jload(f, mode="r"):
|
13 |
+
"""Load a .json file into a dictionary."""
|
14 |
+
f = _make_r_io_base(f, mode)
|
15 |
+
jdict = json.load(f)
|
16 |
+
f.close()
|
17 |
+
return jdict
|
18 |
+
|
19 |
+
|
20 |
+
def jdump(obj, f, mode="w", indent=4, default=str):
|
21 |
+
"""Dump a str or dictionary to a file in json format.
|
22 |
+
|
23 |
+
Args:
|
24 |
+
obj: An object to be written.
|
25 |
+
f: A string path to the location on disk.
|
26 |
+
mode: Mode for opening the file.
|
27 |
+
indent: Indent for storing json dictionaries.
|
28 |
+
default: A function to handle non-serializable entries; defaults to `str`.
|
29 |
+
"""
|
30 |
+
if not isinstance(f, io.IOBase):
|
31 |
+
f_dirname = os.path.dirname(f)
|
32 |
+
if f_dirname != "":
|
33 |
+
makedirs(f_dirname)
|
34 |
+
f = open(f, mode=mode)
|
35 |
+
if isinstance(obj, (dict, list)):
|
36 |
+
json.dump(obj, f, indent=indent, default=default)
|
37 |
+
elif isinstance(obj, str):
|
38 |
+
f.write(obj)
|
39 |
+
else:
|
40 |
+
raise ValueError(f"Unexpected type: {type(obj)}")
|
41 |
+
f.close()
|
42 |
+
|
43 |
+
def jdumps(obj, indent=4, default=str):
|
44 |
+
return json.dumps(obj, indent=indent, default=default)
|
kto_pipeline.py
CHANGED
@@ -1,6 +1,3 @@
|
|
1 |
-
# import os
|
2 |
-
# os.environ['CUDA_VISIBLE_DEVICES'] = "3"
|
3 |
-
|
4 |
import torch
|
5 |
from dataclasses import dataclass
|
6 |
|
@@ -12,9 +9,6 @@ from trl import KTOConfig, KTOTrainer, ModelConfig, get_peft_config, maybe_unpai
|
|
12 |
|
13 |
|
14 |
|
15 |
-
print(f'GPU number: {torch.cuda.current_device()}')
|
16 |
-
|
17 |
-
|
18 |
# Define and parse arguments.
|
19 |
@dataclass
|
20 |
class ScriptArguments:
|
@@ -31,12 +25,12 @@ script_args = ScriptArguments(
|
|
31 |
)
|
32 |
|
33 |
training_args = KTOConfig(
|
34 |
-
output_dir="/raid/lingo/jen_ben/HF-RLHF/
|
35 |
num_train_epochs=100,
|
36 |
-
per_device_train_batch_size=
|
37 |
learning_rate=5e-7,
|
38 |
lr_scheduler_type="cosine",
|
39 |
-
gradient_accumulation_steps=
|
40 |
logging_steps=10,
|
41 |
eval_steps=500,
|
42 |
warmup_ratio=0.1,
|
@@ -49,7 +43,6 @@ model_args = ModelConfig(
|
|
49 |
# any additional model-specific arguments
|
50 |
)
|
51 |
|
52 |
-
|
53 |
# Load a pretrained model
|
54 |
model = AutoModelForCausalLM.from_pretrained(
|
55 |
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
|
@@ -63,6 +56,7 @@ print(f'loaded model')
|
|
63 |
tokenizer = AutoTokenizer.from_pretrained(
|
64 |
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
|
65 |
)
|
|
|
66 |
if tokenizer.pad_token is None:
|
67 |
tokenizer.pad_token = tokenizer.eos_token
|
68 |
|
|
|
|
|
|
|
|
|
1 |
import torch
|
2 |
from dataclasses import dataclass
|
3 |
|
|
|
9 |
|
10 |
|
11 |
|
|
|
|
|
|
|
12 |
# Define and parse arguments.
|
13 |
@dataclass
|
14 |
class ScriptArguments:
|
|
|
25 |
)
|
26 |
|
27 |
training_args = KTOConfig(
|
28 |
+
output_dir="/raid/lingo/jen_ben/HF-RLHF/kto_nov_2", # MODFIFY
|
29 |
num_train_epochs=100,
|
30 |
+
per_device_train_batch_size=4,
|
31 |
learning_rate=5e-7,
|
32 |
lr_scheduler_type="cosine",
|
33 |
+
gradient_accumulation_steps=8,
|
34 |
logging_steps=10,
|
35 |
eval_steps=500,
|
36 |
warmup_ratio=0.1,
|
|
|
43 |
# any additional model-specific arguments
|
44 |
)
|
45 |
|
|
|
46 |
# Load a pretrained model
|
47 |
model = AutoModelForCausalLM.from_pretrained(
|
48 |
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
|
|
|
56 |
tokenizer = AutoTokenizer.from_pretrained(
|
57 |
model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code
|
58 |
)
|
59 |
+
|
60 |
if tokenizer.pad_token is None:
|
61 |
tokenizer.pad_token = tokenizer.eos_token
|
62 |
|