forked from jsiekier/SPN_LSM
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathmain_analysis.py
More file actions
173 lines (124 loc) · 6.01 KB
/
main_analysis.py
File metadata and controls
173 lines (124 loc) · 6.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
import pickle as pkl
import argparse
from counterfactuals.analyse_model import analyse_pipeline
from end_to_end_train import load_dataset
from evaluation.counterfactual_evaluation import eval_counterfactuals, df_to_latex_summary, \
performance_to_latex_summary, cf_performance_to_latex_summary, z_eval_cf_performance_to_latex_summary, plot_cfs
import os
import pandas as pd
# Set the cache directory for TensorFlow Hub
os.environ["TFHUB_CACHE_DIR"] = "tfhub_modules"
def get_performance_metrics(model_names, fold_idxs, num_train_eval_runs=3, grid_idx=0, add_info=1,path=''):
all_results_clf = []
all_results_rec = []
all_params = {}
for model_name in model_names:
print(model_name)
data_path_grid = save_folder + model_name + '/grid' + str(grid_idx) + '/'
params_path = os.path.join(data_path_grid, 'grid_params.pkl')
if not os.path.exists(params_path):
print(f"Parameter file not found for {model_name}, skipping...")
continue
params = pkl.load(open(params_path, 'rb'))
all_params[model_name] = params
# Check if results already exist
rec_path = os.path.join(data_path_grid, "rec.csv")
clf_path = os.path.join(data_path_grid, "clf.csv")
if os.path.exists(rec_path) and os.path.exists(clf_path):
print(f"Loading existing results for {model_name}")
result_rec = pd.read_csv(rec_path).to_dict('records')
result_clf = pd.read_csv(clf_path).to_dict('records')
# print(pd.read_csv(rec_path)[:5])
# print(result_rec[:5])
else:
print(f"Running analysis for {model_name}")
result_clf, result_rec = analyse_pipeline(dataset_name, params, num_train_eval_runs, data_path_grid,
add_info, fold_idxs, model_name,path=path)
# Save results
pd.DataFrame(result_rec).to_csv(rec_path, index=False)
pd.DataFrame(result_clf).to_csv(clf_path, index=False)
print('Results saved')
all_results_rec.extend(result_rec)
all_results_clf.extend(result_clf)
# Print to LaTeX
performance_to_latex_summary(all_results_clf, all_results_rec, all_params)
def get_cf_metrics(model_names,possible_values,fold_idxs):
all_results= []
all_results_z_eval=[]
all_params = {}
for model_name in model_names:
print(model_name)
#num_train_eval_runs=3
grid_idx=0
data_path_grid=save_folder + model_name+'/grid'+str(grid_idx)+'/'
params = pkl.load(open(data_path_grid + 'grid_params.pkl', 'rb'))
train, test, num_classes = load_dataset(dataset_name, binary=False,
load_net=params.load_pretrain_model,
machine=params.machine,grid_params=params)
# Check if results already exist
cf_path = os.path.join(data_path_grid, "cf_new.csv")#1
z_eval_path = os.path.join(data_path_grid, "z_eval_new.csv")#1
#'''
if os.path.exists(cf_path):
with open(cf_path, "r") as f:
if f.read(1).replace('\n','').replace('\t','')== "":
print("File is empty")
print(f"Running analysis for {model_name}")
cf_result,results_z_eval = eval_counterfactuals(dataset_name, data_path_grid, model_name, fold_idxs=fold_idxs,
possible_values=possible_values,test=test)
pd.DataFrame(cf_result).to_csv(cf_path, index=False)
pd.DataFrame(results_z_eval).to_csv(z_eval_path, index=False)
print('Results saved')
else:
print("File is not empty")
print(f"Loading existing results for {model_name}")
cf_result = pd.read_csv(cf_path).to_dict('records')
results_z_eval= pd.read_csv(z_eval_path).to_dict('records')
else:
#'''
print(f"Running analysis for {model_name}")
cf_result,results_z_eval = eval_counterfactuals(dataset_name, data_path_grid, model_name, fold_idxs=fold_idxs,possible_values=possible_values,test=test)
print(cf_result)
pd.DataFrame(cf_result).to_csv(cf_path, index=False)
pd.DataFrame(results_z_eval).to_csv(z_eval_path, index=False)
print('Results saved')
all_results.extend(cf_result)
all_results_z_eval.extend(results_z_eval)
all_params[model_name]=params
cf_performance_to_latex_summary(all_results, all_params)
z_eval_cf_performance_to_latex_summary(all_results_z_eval, all_params)
def plot_generated_cfs(model_names,possible_values):
for model_name in model_names:
print(model_name)
#num_train_eval_runs=3
grid_idx=0
data_path_grid=save_folder + model_name+'/grid'+str(grid_idx)+'/'
params = pkl.load(open(data_path_grid + 'grid_params.pkl', 'rb'))
plot_cfs(dataset_name,
data_path_grid,
model_name,
fold_idxs=[fold_idx],
possible_values=possible_values)
if __name__ == '__main__':
# Create the argument parser
parser = argparse.ArgumentParser(description="Process a file path.")
# Add the --filepath argument
parser.add_argument(
"--filepath",
type=str,
default='test_model',
#required=True,
help="Path to the input file."
)
# Parse the command-line arguments
args = parser.parse_args()
# Access the filepath
model_name= args.filepath
dataset_name='chexpert'
fold_idxs = list(range(3))
save_folder ='cnn_spn_models/'
model_names=['test_model']
get_performance_metrics(model_names,fold_idxs,path='')
possible_values=[0,1]
get_cf_metrics(model_names,possible_values,fold_idxs)
plot_generated_cfs(model_names,possible_values)