-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathroberta.py
More file actions
208 lines (171 loc) · 7.61 KB
/
roberta.py
File metadata and controls
208 lines (171 loc) · 7.61 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
import json
import torch
import torch.nn as nn
import pandas as pd
import random
import ast
import numpy as np
import os
from datasets import Dataset
from transformers import RobertaTokenizer, RobertaForSequenceClassification, TrainingArguments, Trainer
from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score, cohen_kappa_score, classification_report
from collections import Counter
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('social_stigma')
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
seed_everything(42)
train_df = pd.read_csv('./dataset/train.csv')
test_df = pd.read_csv('./dataset/test.csv')
train_dataset = Dataset.from_pandas(train_df)
test_dataset = Dataset.from_pandas(test_df)
model_id = 'hf_models/roberta-base'
tokenizer = RobertaTokenizer.from_pretrained(model_id)
model = RobertaForSequenceClassification.from_pretrained(model_id, num_labels=8)
label_dict = {'Non-stigmatized': 0,
'Stigmatized (responsibility)': 1,
'Stigmatized (social distance)': 2,
'Stigmatized (fear/dangerousness)': 3,
'Stigmatized (anger)': 4,
'Stigmatized (coercion segregation)': 5,
'Stigmatized (helping)': 6,
'Stigmatized (pity)': 7}
id2label_custom = {v: k for k, v in label_dict.items()} # {0: 'Non-stigmatized', 1: 'Stigmatized (responsibility)', ...}
model.config.id2label = id2label_custom
model.config.label2id = label_dict
def encoder_text(examples):
texts = []
for conv in examples["conversations"]:
conv = ast.literal_eval(conv)
conversation_text = " ".join([f"{msg['role']}: {msg['content']}" for msg in conv])
texts.append(conversation_text)
return tokenizer(texts, max_length=512, padding='max_length', truncation=True)
def encoder_label(examples):
examples['label'] = label_dict[examples['label']]
return examples
train_dataset = train_dataset.map(encoder_text, batched=True)
train_dataset = train_dataset.map(encoder_label)
test_dataset = test_dataset.map(encoder_text, batched=True)
test_dataset = test_dataset.map(encoder_label)
device = "cuda:0"
def classification_metrics(y_true, y_pred, labels):
classification_scores = {}
class_counts = Counter(y_true) # Count occurrences of each class
overall_accuracy = accuracy_score(y_true, y_pred)
sum_precision = sum_recall = sum_f1 = 0
for label in labels:
y_true_binary = [1 if y == label else 0 for y in y_true]
y_pred_binary = [1 if y == label else 0 for y in y_pred]
kappa = cohen_kappa_score(y_true_binary, y_pred_binary)
recall_val = recall_score(y_true_binary, y_pred_binary)
precision_val = precision_score(y_true_binary, y_pred_binary)
f1_val = f1_score(y_true_binary, y_pred_binary)
support = class_counts[label]
acc = accuracy_score(y_true_binary, y_pred_binary)
sum_precision += precision_val
sum_recall += recall_val
sum_f1 += f1_val
classification_scores[label] = {
'precision': precision_val,
'recall': recall_val,
'f1-score': f1_val,
'support': support,
'cohen_kappa': kappa,
'accuracy': acc
}
macro_avg_precision = sum_precision / len(labels)
macro_avg_recall = sum_recall / len(labels)
macro_avg_f1 = sum_f1 / len(labels)
weighted_avg_precision = np.average([score['precision'] for score in classification_scores.values()],
weights=[score['support'] for score in classification_scores.values()])
weighted_avg_recall = np.average([score['recall'] for score in classification_scores.values()],
weights=[score['support'] for score in classification_scores.values()])
weighted_avg_f1 = np.average([score['f1-score'] for score in classification_scores.values()],
weights=[score['support'] for score in classification_scores.values()])
classification_scores['accuracy'] = overall_accuracy
classification_scores['macro avg'] = {'precision': macro_avg_precision, 'recall': macro_avg_recall, 'f1-score': macro_avg_f1}
classification_scores['weighted avg'] = {'precision': weighted_avg_precision, 'recall': weighted_avg_recall, 'f1-score': weighted_avg_f1}
return classification_scores
def compute_metrics(eval_pred):
logits, labels = eval_pred
predictions = np.argmax(logits, axis=-1)
f1 = f1_score(predictions, labels, average='weighted')
recall_val = recall_score(predictions, labels, average='weighted')
precision_val = precision_score(predictions, labels, average='weighted')
accuracy_val = accuracy_score(predictions, labels)
cr = classification_report(predictions, labels)
cohen_kappa_val = cohen_kappa_score(predictions, labels)
return {'f1_score': f1,
'recall_score': recall_val,
'precison_score': precision_val,
'accuracy_score': accuracy_val,
'classification_report': cr,
'cohen_kappa': cohen_kappa_val}
train_arg = TrainingArguments(
output_dir='./results/{}'.format(model_id),
num_train_epochs=3,
per_device_train_batch_size=12,
per_device_eval_batch_size=12,
logging_dir='./logs/{}'.format(model_id),
use_mps_device=False,
gradient_accumulation_steps=4,
seed=42,
learning_rate=1e-5,
eval_steps=50,
save_strategy='steps',
save_steps=50,
evaluation_strategy='steps',
load_best_model_at_end=True, # load the best model at the end of training
metric_for_best_model='f1_score',
report_to='tensorboard',
)
logger.info(train_arg)
trainer = Trainer(
model=model,
args=train_arg,
train_dataset=train_dataset,
eval_dataset=test_dataset,
compute_metrics=compute_metrics
)
logger.info('Training in progress')
trainer.train()
logger.info('Finish Training')
logger.info('Evaluating in progress')
trainer.evaluate()
logger.info('Finish Evaluating')
model.save_pretrained('./model_directory/{}'.format(model_id))
### prediction
predictions = trainer.predict(test_dataset)
predicted_labels = [model.config.id2label[pred] for pred in predictions.predictions.argmax(axis=1)]
true_labels = test_dataset['label']
true_labels_str = [model.config.id2label[label] for label in true_labels]
classification_rep = classification_report(true_labels_str, predicted_labels, target_names=list(label_dict.keys()))
cohen_kappa_val = cohen_kappa_score(true_labels_str, predicted_labels)
print(classification_rep)
print(cohen_kappa_val)
print('classification_metrics------------------',
classification_metrics(y_true=true_labels, y_pred=[label_dict[label] for label in predicted_labels], labels=label_dict.values()))
def conversation_to_text(conv_str):
try:
conv = ast.literal_eval(conv_str)
return " ".join([f"{msg['role']}: {msg['content']}" for msg in conv])
except Exception as e:
logger.error("Error parsing conversation: %s", e)
return ""
test_texts = [conversation_to_text(example["conversations"]) for example in test_dataset]
output_data = {
'text': test_texts,
'ground_truth': true_labels_str,
'pred_label': predicted_labels
}
print(predicted_labels)
output_df = pd.DataFrame(output_data)
output_df.to_csv('./results/roberta_predictions.csv', index=False, encoding='utf-8')
print('Saved successfully!')