AnnaPalatkina commited on
Commit
92cb663
1 Parent(s): 5943c14

add finetuning

Browse files
Files changed (2) hide show
  1. fine_tune.py +236 -0
  2. requirements.txt +28 -0
fine_tune.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, AdamW, get_linear_schedule_with_warmup
2
+ from sklearn.metrics import classification_report, f1_score
3
+ from torch.utils.data import Dataset, DataLoader
4
+ from argparse import ArgumentParser
5
+ from str2bool import str2bool
6
+ from torch import nn
7
+ import pandas as pd
8
+ import numpy as np
9
+ import torch
10
+
11
+
12
+ parser = ArgumentParser()
13
+ parser.add_argument("-dataframe", required=True, help="Path to dataframe with columns ['text', 'label', 'split']") # 'data/small_dataset.csv'
14
+ parser.add_argument("-model",required=True, help='Pre-traied model from huggingface or path to local folder with config.json') # '../norbert3-x-small/'
15
+ parser.add_argument("-custom_wrapper", default=False, type=lambda x: bool(str2bool(x)), help='Boolean argument - True if use custom wrapper, False if use AutoModelForSequenceClassification') # True
16
+ parser.add_argument("-lr", default='1e-05', help='Learning rate.')
17
+ parser.add_argument("-max_length", default='512', help='Max lenght of the sequence in tokens.')
18
+ parser.add_argument("-warmup", default='2', help='The number of steps for the warmup phase.')
19
+ parser.add_argument("-batch_size", default='4', help='Batch size.')
20
+ parser.add_argument("-epochs", default='20', help='Number of epochs for training.')
21
+ args = parser.parse_args()
22
+
23
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
+
25
+ class Dataset(Dataset):
26
+ def __init__(self, texts, targets, tokenizer, max_len):
27
+ self.texts = texts
28
+ self.targets = targets
29
+ self.tokenizer = tokenizer
30
+ self.max_len = max_len
31
+
32
+ def __len__(self):
33
+ return len(self.texts)
34
+
35
+ def __getitem__(self, item):
36
+ text = str(self.texts[item])
37
+ target = self.targets[item]
38
+ encoding = self.tokenizer.encode_plus(
39
+ text,
40
+ add_special_tokens=True,
41
+ max_length=self.max_len,
42
+ return_token_type_ids=False,
43
+ pad_to_max_length=True,
44
+ return_attention_mask=True,
45
+ truncation=True,
46
+ return_tensors='pt',
47
+ )
48
+
49
+ return {
50
+ 'text': text,
51
+ 'input_ids': encoding['input_ids'].flatten(),
52
+ 'attention_mask': encoding['attention_mask'].flatten(),
53
+ 'targets': torch.tensor(target, dtype=torch.long)
54
+ }
55
+
56
+
57
+ def create_data_loader(df, tokenizer, max_len, batch_size):
58
+ ds = Dataset(
59
+ texts=df.text.to_numpy(),
60
+ targets=df.label.to_numpy(),
61
+ tokenizer=tokenizer,
62
+ max_len=max_len
63
+ )
64
+ return DataLoader(
65
+ ds,
66
+ batch_size=batch_size
67
+ )
68
+
69
+ class SentimentClassifier(nn.Module):
70
+
71
+ def __init__(self, n_classes):
72
+ super(SentimentClassifier, self).__init__()
73
+
74
+ if not args.custom_wrapper:
75
+ self.bert = AutoModelForSequenceClassification.from_pretrained(args.model, num_labels=n_classes, ignore_mismatched_sizes=True)
76
+ if args.custom_wrapper:
77
+ from modeling_norbert import NorbertForSequenceClassification
78
+ self.bert = NorbertForSequenceClassification.from_pretrained(args.model, num_labels=n_classes, ignore_mismatched_sizes=True)
79
+
80
+ def forward(self, input_ids, attention_mask):
81
+
82
+ bert_output = self.bert(
83
+ input_ids=input_ids,
84
+ attention_mask=attention_mask,
85
+ return_dict=True
86
+ )
87
+
88
+ logits = bert_output.logits
89
+
90
+ return logits
91
+
92
+
93
+ def train_epoch(
94
+ model,
95
+ data_loader,
96
+ loss_fn,
97
+ optimizer,
98
+ device,
99
+ scheduler,
100
+ n_examples
101
+ ):
102
+
103
+ y_true, y_pred = [], []
104
+ model = model.train()
105
+ losses = []
106
+ correct_predictions = 0
107
+
108
+ for d in data_loader:
109
+ input_ids = d["input_ids"].to(device)
110
+ attention_mask = d["attention_mask"].to(device)
111
+ targets = d["targets"].to(device)
112
+ y_true += targets.tolist()
113
+ outputs = model(
114
+ input_ids=input_ids,
115
+ attention_mask=attention_mask
116
+ )
117
+ preds_idxs = torch.max(outputs, dim=1).indices
118
+ y_pred += preds_idxs.numpy().tolist()
119
+ loss = loss_fn(outputs, targets)
120
+ correct_predictions += torch.sum(preds_idxs == targets)
121
+
122
+ losses.append(loss.item())
123
+ loss.backward()
124
+ nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
125
+ optimizer.step()
126
+ scheduler.step()
127
+ optimizer.zero_grad()
128
+ f1 = f1_score(y_true, y_pred, average='macro')
129
+
130
+ return correct_predictions.double() / n_examples, np.mean(losses), f1
131
+
132
+ def eval_model(model, data_loader, loss_fn, device, n_examples):
133
+ model = model.eval()
134
+ losses = []
135
+ correct_predictions = 0
136
+ y_true, y_pred = [], []
137
+ with torch.no_grad():
138
+ for d in data_loader:
139
+ input_ids = d["input_ids"].to(device)
140
+ attention_mask = d["attention_mask"].to(device)
141
+ targets = d["targets"].to(device)
142
+ y_true += targets.tolist()
143
+ outputs = model(
144
+ input_ids=input_ids,
145
+ attention_mask=attention_mask
146
+ )
147
+ _, preds = torch.max(outputs, dim=1)
148
+ y_pred += preds.tolist()
149
+ loss = loss_fn(outputs, targets)
150
+ correct_predictions += torch.sum(preds == targets)
151
+ losses.append(loss.item())
152
+ f1 = f1_score(y_true, y_pred, average='macro')
153
+ report = classification_report(y_true, y_pred)
154
+ return correct_predictions.double() / n_examples, np.mean(losses), f1, report
155
+
156
+
157
+ df = pd.read_csv(args.dataframe)
158
+
159
+ df_train = df[df['split'] == 'train']
160
+ df_val = df[df['split'] == 'dev']
161
+ df_test = df[df['split'] == 'test']
162
+
163
+ print(f'Train samples: {len(df_train)}')
164
+ print(f'Validation samples: {len(df_val)}')
165
+ print(f'Test samples: {len(df_test)}')
166
+
167
+ tokenizer = AutoTokenizer.from_pretrained(args.model)
168
+
169
+ max_length = int(args.max_length)
170
+ batch_size = int(args.batch_size)
171
+ epochs = int(args.epochs)
172
+
173
+ train_data_loader = create_data_loader(df_train, tokenizer, max_length, batch_size)
174
+ val_data_loader = create_data_loader(df_val, tokenizer, max_length, batch_size)
175
+ test_data_loader = create_data_loader(df_test, tokenizer, max_length, batch_size)
176
+
177
+ class_names = df.label.unique()
178
+ model = SentimentClassifier(len(class_names))
179
+ model = model.to(device)
180
+
181
+ loss_fn = nn.CrossEntropyLoss().to(device)
182
+ optimizer = torch.optim.AdamW(model.parameters(), lr=float(args.lr))
183
+ total_steps = len(train_data_loader) * epochs
184
+ scheduler = get_linear_schedule_with_warmup(
185
+ optimizer,
186
+ num_warmup_steps=int(args.warmup),
187
+ num_training_steps=total_steps
188
+ )
189
+
190
+ for epoch in range(epochs):
191
+ print(f'Epoch {epoch + 1}/{epochs}')
192
+ print('-' * 10)
193
+ train_acc, train_loss, train_f1 = train_epoch(
194
+ model,
195
+ train_data_loader,
196
+ loss_fn,
197
+ optimizer,
198
+ device,
199
+ scheduler,
200
+ len(df_train)
201
+ )
202
+ print()
203
+ print(f'Train loss -- {train_loss} -- accuracy {train_acc} -- f1 {train_f1}')
204
+
205
+ # save model
206
+ # !!!!!!!!!!!!!!!!
207
+ model_name = args.model.split('/')[-1] if args.model.split('/')[-1] != '' else args.model.split('/')[-2]
208
+
209
+ torch.save(model.state_dict(),f'saved_models/{model_name}_epoch_{epochs}.bin')
210
+
211
+ val_acc, val_loss, val_f1, report = eval_model(
212
+ model,
213
+ val_data_loader,
214
+ loss_fn,
215
+ device,
216
+ len(df_val)
217
+ )
218
+ print()
219
+ print(f'Val loss {val_loss} -- accuracy -- {val_acc} -- f1 {val_f1}')
220
+ print(report)
221
+
222
+
223
+ test_acc, test_loss, test_f1, test_report = eval_model(
224
+ model,
225
+ test_data_loader,
226
+ loss_fn,
227
+ device,
228
+ len(df_test)
229
+ )
230
+
231
+
232
+ print()
233
+ print('-------------TESTINGS-----------------')
234
+ print()
235
+ print(f'Test accuracy {test_acc}, f1 {test_f1}')
236
+ print(test_report)
requirements.txt ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ certifi==2022.12.7
2
+ charset-normalizer==3.0.1
3
+ docopt==0.6.2
4
+ filelock==3.9.0
5
+ huggingface-hub==0.11.1
6
+ idna==3.4
7
+ joblib==1.2.0
8
+ numpy==1.24.1
9
+ packaging==23.0
10
+ pandas==1.5.2
11
+ pipreqs==0.4.11
12
+ python-dateutil==2.8.2
13
+ pytz==2022.7
14
+ PyYAML==6.0
15
+ regex==2022.10.31
16
+ requests==2.28.2
17
+ scikit-learn==1.2.0
18
+ scipy==1.10.0
19
+ six==1.16.0
20
+ str2bool==1.1
21
+ threadpoolctl==3.1.0
22
+ tokenizers==0.13.2
23
+ torch==1.13.1
24
+ tqdm==4.64.1
25
+ transformers==4.25.1
26
+ typing_extensions==4.4.0
27
+ urllib3==1.26.14
28
+ yarg==0.1.9