-
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbatch_evaluate.py
More file actions
86 lines (72 loc) · 2.81 KB
/
batch_evaluate.py
File metadata and controls
86 lines (72 loc) · 2.81 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from copy import deepcopy
import json
from pathlib import Path
import tqdm
import yaml
import evaluate
def load_yaml(path: Path):
with open(path, "r") as f:
data = yaml.safe_load(f)
return data
def load_json(path: Path):
with open(path, "r") as f:
data = json.load(f)
return data
def batch_evaluate(splits=["val", "test"], min_scores=[0.3, 0.5, 0.7, 0.9], save_test_images=True) -> None:
dataset_dir = Path("../../data/fleckenzwerg_dataset_imagepose_cleaned").resolve()
ray_results_dir = Path("./ray_results")
experiments = [
"DatasetVersions",
"HeadNetworks",
"LossFunctions",
"Augmentations",
"Longer",
]
runs = {
name: [
dir
for dir in (ray_results_dir / name).iterdir()
if dir.is_dir()
and not (dir / "error.txt").is_file()
# and (not (dir / f"val_0.9_metrics.json").is_file() or "val" not in splits) # Skip already evaluated.
# and (not (dir / f"test_0.9_metrics.json").is_file() or "test" not in splits) # Skip already evaluated.
]
for name in experiments
}
# Running the parser once with dummy dataset path to get the defaults.
defaults = evaluate.parser.parse_args(["dummy"]).__dict__
args_list = []
for experiment, folders in runs.items():
for folder in folders:
args = deepcopy(defaults)
args.update(load_yaml(folder / "args.yaml"))
args["checkpoint"] = (folder / "last.pth.tar").as_posix()
args["root"] = dataset_dir.as_posix()
args["workers"] = 4
args["batch_size"] = 16
for min_score in min_scores:
args_score = deepcopy(args)
args_score["min_score"] = min_score
for split in splits:
args_split = deepcopy(args_score)
args_split["split"] = split
if save_test_images and split == "test" and min_score in [0.7, 0.9]:
args_split["save_vis2d"] = True
args_split["save_vis3d"] = True
args_list.append(args_split)
for args in tqdm.tqdm(args_list, desc="Evaluating"):
evaluate.evaluate(args)
for experiment, folders in runs.items():
for folder in folders:
for split in splits:
if not all(
[
(folder / f"{split}_{min_score}_details.json").is_file(),
(folder / f"{split}_{min_score}_metrics.json").is_file(),
]
):
print(
f"WARNING: {experiment} / {folder.name} / {split} did not evaluate!"
)
if __name__ == "__main__":
batch_evaluate(save_test_images=True)