-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathdiffmodeler_train.json
More file actions
66 lines (64 loc) · 2.03 KB
/
diffmodeler_train.json
File metadata and controls
66 lines (64 loc) · 2.03 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
{
"phase": "train",
"data": {
"input_channel": [0],
"output_channel":[0] //to support multi-channel training examples you have
},
"model": {
"path": "best_model/diffusion_best.pth.tar",
"unet": {
"in_channel": 3,
"out_channel": 1,
"inner_channel": 32,
"norm_groups": 16,
"channel_multiplier": [
1,
2,
4,
8,
8 //waiting to be adjusted if box size adjusted to 32.
],
"attn_res": [
8 //if not use, we can change to any number out of numbers in "channel_multiplier"
],
"res_blocks": 2,
"dropout": 0.2
},
"diffusion": {
"box_size": 64,
"stride": 32, //to save time, for better performance, you can choose 16.
"channels": 1, //sample channel for denoising
"loss_type": "solo_dens", //did not impact since we already fixed in the released version
"conditional": true // unconditional generation or unconditional generation(super_resolution)
},
"beta_schedule": { // use munual beta_schedule for acceleration
"train": {
"n_timestep": 100,
"infer_clip":1
},
"val": {
"n_timestep": 100,
"infer_clip":1
}
}
},
"train": {
"num_workers": 4,
"batch_size": 16, //please adjust it based on your GPU size, should not be smaller than 8. If GPU too small, consider gradient accumulation choice.
"epoch": 30,
"clip_grad": 1,
"rand_seed": 888,
"portion": 0.8,
"save_checkpoint_freq": 1,
"print_freq": 200,
"optimizer": {
"type": "adam",
"lr": 1e-4,
"min_lr": 1e-6
}
},
"resume": {
"flag": false,
"path": "" //specify this path and change flag to true to resume training
}
}