Tune config for 32GB GPU memory
This commit is contained in:
@@ -7,7 +7,7 @@
|
|||||||
"out_dir": "./results",
|
"out_dir": "./results",
|
||||||
"device": "auto",
|
"device": "auto",
|
||||||
"timesteps": 600,
|
"timesteps": 600,
|
||||||
"batch_size": 32,
|
"batch_size": 24,
|
||||||
"seq_len": 128,
|
"seq_len": 128,
|
||||||
"epochs": 10,
|
"epochs": 10,
|
||||||
"max_batches": 4000,
|
"max_batches": 4000,
|
||||||
@@ -26,16 +26,16 @@
|
|||||||
"use_tanh_eps": false,
|
"use_tanh_eps": false,
|
||||||
"eps_scale": 1.0,
|
"eps_scale": 1.0,
|
||||||
"model_time_dim": 128,
|
"model_time_dim": 128,
|
||||||
"model_hidden_dim": 768,
|
"model_hidden_dim": 640,
|
||||||
"model_num_layers": 2,
|
"model_num_layers": 2,
|
||||||
"model_dropout": 0.1,
|
"model_dropout": 0.1,
|
||||||
"model_ff_mult": 2,
|
"model_ff_mult": 2,
|
||||||
"model_pos_dim": 64,
|
"model_pos_dim": 64,
|
||||||
"model_use_pos_embed": true,
|
"model_use_pos_embed": true,
|
||||||
"backbone_type": "transformer",
|
"backbone_type": "transformer",
|
||||||
"transformer_num_layers": 4,
|
"transformer_num_layers": 3,
|
||||||
"transformer_nhead": 4,
|
"transformer_nhead": 4,
|
||||||
"transformer_ff_dim": 1024,
|
"transformer_ff_dim": 768,
|
||||||
"transformer_dropout": 0.1,
|
"transformer_dropout": 0.1,
|
||||||
"disc_mask_scale": 0.9,
|
"disc_mask_scale": 0.9,
|
||||||
"cont_loss_weighting": "inv_std",
|
"cont_loss_weighting": "inv_std",
|
||||||
|
|||||||
Reference in New Issue
Block a user