forked from mmichelis/dense-object-nets-python3
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
126 lines (83 loc) · 4.9 KB
/
main.py
File metadata and controls
126 lines (83 loc) · 4.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
### This file replaces the Jupyter Notebooks that you could run.
# You can pick if you want to train/evaluate or not.
TRAIN = False
EVALUATE = True
import sys, os
sys.path.append('modules')
sys.path.append('dense_correspondence/dataset')
### Set a few environment variables that would've been normally set in Docker
os.environ["DC_SOURCE_DIR"] = os.getcwd()
# Assuming you put the data dir in dense-correspondence/Data.
os.environ["DC_DATA_DIR"] = os.path.join(os.getcwd(), "Data", "pdc")
import dense_correspondence_manipulation.utils.utils as utils
utils.add_dense_correspondence_to_python_path()
from dense_correspondence.training.training import *
import logging
utils.set_default_cuda_visible_devices()
utils.set_cuda_visible_devices([0]) # use this to manually set CUDA_VISIBLE_DEVICES
from dense_correspondence.training.training import DenseCorrespondenceTraining
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset
logging.basicConfig(level=logging.INFO)
from dense_correspondence.evaluation.evaluation import DenseCorrespondenceEvaluation
import numpy as np
np.random.seed(42) # Even this doesn't help... It's absolute chaos random.
config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'dataset', 'composite', 'caterpillar_upright.yaml')
config = utils.getDictFromYamlFilename(config_filename)
train_config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence',
'training', 'training.yaml')
train_config = utils.getDictFromYamlFilename(train_config_file)
dataset = SpartanDataset(config=config)
logging_dir = "trained_models/tutorials"
num_iterations = 3500
d = 3 # the descriptor dimension
name = "caterpillar_%d" %(d)
train_config["training"]["logging_dir_name"] = name
train_config["training"]["logging_dir"] = logging_dir
train_config["dense_correspondence_network"]["descriptor_dimension"] = d
train_config["training"]["num_iterations"] = num_iterations
# All of the saved data for this network will be located in the
# code/data/pdc/trained_models/tutorials/caterpillar_3 folder
if TRAIN:
#print "training descriptor of dimension %d" %(d)
train = DenseCorrespondenceTraining(dataset=dataset, config=train_config)
train.run()
#print "finished training descriptor of dimension %d" %(d)
model_folder = os.path.join(logging_dir, name)
model_folder = utils.convert_data_relative_path_to_absolute_path(model_folder)
if EVALUATE:
DCE = DenseCorrespondenceEvaluation
num_image_pairs = 100
DCE.run_evaluation_on_network(model_folder, num_image_pairs=num_image_pairs)
from dense_correspondence.evaluation.evaluation import DenseCorrespondenceEvaluationPlotter as DCEP
import matplotlib.pyplot as plt
dc_data_dir = utils.get_data_dir()
folder_name = "tutorials"
net_to_plot = os.path.join(folder_name, "caterpillar_3")
network_name = net_to_plot
path_to_csv = os.path.join(dc_data_dir, "trained_models", network_name, "analysis/train/data.csv")
fig_axes = DCEP.run_on_single_dataframe(path_to_csv, label=network_name, save=False)
path_to_csv = os.path.join(dc_data_dir, "trained_models", network_name, "analysis/train/data.csv")
fig_axes = DCEP.run_on_single_dataframe(path_to_csv, label=network_name, previous_fig_axes=fig_axes, save=False)
plt.savefig("Training.png")
path_to_csv = os.path.join(dc_data_dir, "trained_models", network_name, "analysis/test/data.csv")
fig_axes = DCEP.run_on_single_dataframe(path_to_csv, label=network_name, save=False)
path_to_csv = os.path.join(dc_data_dir, "trained_models", network_name, "analysis/test/data.csv")
fig_axes = DCEP.run_on_single_dataframe(path_to_csv, label=network_name, previous_fig_axes=fig_axes, save=False)
plt.savefig("Test.png")
path_to_csv = os.path.join(dc_data_dir, "trained_models", network_name, "analysis/cross_scene/data.csv")
fig_axes = DCEP.run_on_single_dataframe(path_to_csv, label=network_name, save=False)
path_to_csv = os.path.join(dc_data_dir, "trained_models", network_name, "analysis/cross_scene/data.csv")
fig_axes = DCEP.run_on_single_dataframe(path_to_csv, label=network_name, previous_fig_axes=fig_axes, save=False)
plt.savefig("CrossScene.png")
config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config',
'dense_correspondence', 'evaluation', 'evaluation.yaml')
config = utils.getDictFromYamlFilename(config_filename)
default_config = utils.get_defaults_config()
# utils.set_cuda_visible_devices([0])
dce = DenseCorrespondenceEvaluation(config)
DCE = DenseCorrespondenceEvaluation
network_name = "caterpillar_3"
dcn = dce.load_network_from_config(network_name)
dataset = dcn.load_training_dataset()
DenseCorrespondenceEvaluation.evaluate_network_qualitative(dcn, dataset=dataset, randomize=True)