forked from LAION-AI/Open-Assistant
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patheval_rm.py
95 lines (79 loc) · 3.46 KB
/
eval_rm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import argparse
from collections import defaultdict
import numpy as np
import torch
from model_training.custom_datasets.rank_datasets import HellaSwagDataset, HFDataset, SHPDataset
from model_training.custom_datasets.ranking_collator import RankingDataCollator
from model_training.metrics import RewardMetrics
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from transformers.trainer_utils import EvalPrediction
from utils import write_to_json
DATASETS = ["SHP", "Hellaswag", "HFdataset"]
def get_ranking_dataset(dataset, split):
dataset = dataset.lower()
if dataset == "shp":
return SHPDataset(split=split)
elif dataset == "hellaswag":
return HellaSwagDataset(split=split)
elif dataset == "hfdataset":
return HFDataset(split=split)
else:
raise ValueError(f"Invalid dataset name, available {DATASETS}")
def batch_inference(inputs, model):
batch, cu_lens = inputs
batch = {k: v.to(model.device) for k, v in batch.items()}
logits = (
model(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
)
.logits.detach()
.cpu()
.numpy()
)
labels = []
for i, (s, e) in enumerate(zip(cu_lens[:-1], cu_lens[1:])):
labels.extend([i] * (e - s))
labels = np.array(labels).reshape(-1, 1)
return EvalPrediction(predictions=logits.T, label_ids=labels.T)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument("--dataset", type=str, help="name of evaluation dataset")
parser.add_argument("--split", type=str, help="dataset splits seperated by comma", default="train")
parser.add_argument("--model", type=str, help="Path or url of the model file")
parser.add_argument("--metrics", type=str, help="metrics to evaluate", default="accuracy")
parser.add_argument("--batch_size", type=int, help="Batch Size", default=8)
parser.add_argument("--device", type=str, help="device", default="cuda")
args = parser.parse_args().__dict__
if args.get("device") != "cpu":
device = torch.device(args.get("device")) if torch.cuda.is_available() else torch.device("cpu")
else:
device = torch.device("cpu")
model_name = args.get("model")
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name)
model.eval()
model.to(device)
max_length = args.get("max_length") or model.config.max_position_embeddings
splits = args.get("split").split(",")
dataset = get_ranking_dataset(args.get("dataset"), split=splits)
collate_fn = RankingDataCollator(tokenizer)
dataset = DataLoader(dataset, collate_fn=collate_fn, batch_size=args.get("batch_size"))
metrics = args.get("metrics").split(",")
compute_metrics = RewardMetrics(metrics)
score_dict = defaultdict(float)
for i, data in enumerate(dataset):
eval_pred = batch_inference(data, model)
results = compute_metrics(eval_pred)
for metric in metrics:
score_dict[metric] += results.get(metric)
score_dict = {k: str(round(v / len(dataset), 3)) for k, v in score_dict.items()}
results = {
"model": model_name,
"dataset": args.get("dataset"),
"split": splits,
}
results.update(score_dict)
print("RESULTS", results)
write_to_json(f"rm-eval-{model_name.split('/')[-1]}-results.json", results)