-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain.py
More file actions
99 lines (78 loc) · 2.93 KB
/
train.py
File metadata and controls
99 lines (78 loc) · 2.93 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from pytorch_lightning import seed_everything, Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.profiler import PyTorchProfiler
from sparse_vae import *
from hparam_presets import hparam_presets
from omegaconf import OmegaConf
from warnings import filterwarnings
import sys
import torch
def main(args):
model_str = args[1]
seed_everything(7295) # Reproducibility
config = OmegaConf.create({
# Override Trainer defaults but still allow them to be overridden by the command line
'trainer': {
'accumulate_grad_batches': 2,
'checkpoint_callback': False,
'precision': 16
}
})
filterwarnings('ignore', category=UserWarning, module='torch')
hparam_class = None
model_class = None
experiment = None
if model_str == 'lstm-vae':
hparam_class = LSTMVAEHparams
model_class = LSTMVAE
experiment = 'lstm-vae'
elif model_str == 'lstm-lm':
hparam_class = LSTMLanguageModelHparams
model_class = LSTMLanguageModel
experiment = 'lstm-lm'
elif model_str == 'transformer-lm':
hparam_class = TransformerHparams
model_class = TransformerLanguageModel
experiment = 'transformer-lm'
elif model_str == 'transformer-vae':
hparam_class = TransformerVAEHparams
model_class = TransformerVAE
experiment = 'transformer-vae'
else:
print(f"Unrecognized model type '{model_str}'.")
exit(1)
# config.data = OmegaConf.structured(TextDataModuleHparams)
config.model = OmegaConf.structured(hparam_class)
config.merge_with_dotlist(args[2:])
if preset := config.get('preset'):
preset_config = hparam_presets.get(preset)
assert preset_config, f"Preset name '{preset}' not recognized."
config.merge_with(preset_config)
if torch.cuda.is_available() and 'gpus' not in config.trainer:
config.trainer.gpus = [select_best_gpu()]
if config.get('anomaly_detection'):
torch.autograd.set_detect_anomaly(True)
print(f"Training {experiment}...")
if ckpt_name := config.get('from_checkpoint'):
config.trainer.resume_from_checkpoint = str(get_checkpoint_path_for_name(experiment, ckpt_name))
model = model_class(config.model)
data = TextDataModule(**config.get('data', {}))
if config.get('fp16_weights'):
torch.set_default_dtype(torch.float16)
if config.get('no_log'):
logger = False
else:
logger = TensorBoardLogger(
save_dir='sparse-vae-logs',
name=experiment,
version=config.get('name')
)
# profiler = PyTorchProfiler(
# profile_memory=True,
# sort_by_key='cuda_memory_usage',
# use_cuda=True
# ) if config.get('profile') else None
trainer = Trainer(**config.trainer, logger=logger)
trainer.fit(model, datamodule=data)
if __name__ == "__main__":
main(sys.argv)