Skip to content

Commit

Permalink
Remove useless tests for speedup the aux_tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Edresson committed Mar 10, 2022
1 parent 247da8e commit a436fe4
Show file tree
Hide file tree
Showing 4 changed files with 17 additions and 16 deletions.
21 changes: 11 additions & 10 deletions tests/aux_tests/test_speaker_encoder_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,20 +24,21 @@ def run_test_train():

config = SpeakerEncoderConfig(
batch_size=4,
num_classes_in_batch=2,
num_classes_in_batch=4,
num_utter_per_class=2,
eval_num_classes_in_batch=2,
eval_num_classes_in_batch=4,
eval_num_utter_per_class=2,
num_loader_workers=1,
epochs=2,
epochs=1,
print_step=1,
save_step=1,
save_step=2,
print_eval=True,
run_eval=True,
audio=BaseAudioConfig(num_mels=80),
)
config.audio.do_trim_silence = True
config.audio.trim_db = 60
config.loss = "ge2e"
config.save_json(config_path)

print(config)
Expand Down Expand Up @@ -72,14 +73,14 @@ def run_test_train():
shutil.rmtree(continue_path)

# test model with ge2e loss function
config.loss = "ge2e"
config.save_json(config_path)
run_test_train()
# config.loss = "ge2e"
# config.save_json(config_path)
# run_test_train()

# test model with angleproto loss function
config.loss = "angleproto"
config.save_json(config_path)
run_test_train()
# config.loss = "angleproto"
# config.save_json(config_path)
# run_test_train()

# test model with softmaxproto loss function
config.loss = "softmaxproto"
Expand Down
4 changes: 2 additions & 2 deletions tests/inputs/test_glow_tts.json
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@
"use_mas": false, // use Monotonic Alignment Search if true. Otherwise use pre-computed attention alignments.

// TRAINING
"batch_size": 2, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size":1,
"batch_size": 8, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size": 8,
"r": 1, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
"loss_masking": true, // enable / disable loss masking against the sequence padding.
"data_dep_init_iter": 1,
Expand Down
4 changes: 2 additions & 2 deletions tests/inputs/test_tacotron2_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@
"reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.

// TRAINING
"batch_size": 1, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size":1,
"batch_size": 8, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size": 8,
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
"gradual_training": [[0, 7, 4], [1, 5, 2]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
"loss_masking": true, // enable / disable loss masking against the sequence padding.
Expand Down
4 changes: 2 additions & 2 deletions tests/inputs/test_tacotron_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@
"reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.

// TRAINING
"batch_size": 1, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size":1,
"batch_size": 8, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size": 8,
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
"gradual_training": [[0, 7, 4], [1, 5, 2]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
"loss_masking": true, // enable / disable loss masking against the sequence padding.
Expand Down

0 comments on commit a436fe4

Please sign in to comment.