From ff274a5d4358eb34b5d023552350d32fb8bab57f Mon Sep 17 00:00:00 2001 From: aarora8 Date: Sun, 17 Nov 2019 15:17:19 -0500 Subject: [PATCH 1/2] 'Adding chime6 with paderborn gss' --- egs/chime5/s5b/local/nnet3/compare_wer.sh | 1 + egs/chime5/s5b/local/nnet3/decode.sh | 4 +- egs/chime5/s5b/local/run_recog.sh | 62 ++- egs/chime5/s5b/local/run_wpe.sh | 3 +- egs/chime6/s5_track1/RESULTS | 21 + egs/chime6/s5_track1/cmd.sh | 15 + egs/chime6/s5_track1/conf/beamformit.cfg | 50 +++ egs/chime6/s5_track1/conf/mfcc.conf | 2 + egs/chime6/s5_track1/conf/mfcc_hires.conf | 10 + egs/chime6/s5_track1/conf/online_cmvn.conf | 1 + egs/chime6/s5_track1/conf/queue.conf | 10 + .../s5_track1/local/chain/compare_wer.sh | 131 ++++++ .../local/chain/multi_condition/run_tdnn.sh | 1 + .../chain/multi_condition/run_tdnn_lstm.sh | 1 + .../tuning/run_cnn_tdnn_lstm_1a.sh | 329 +++++++++++++++ .../multi_condition/tuning/run_tdnn_1a.sh | 286 +++++++++++++ .../multi_condition/tuning/run_tdnn_1b.sh | 311 +++++++++++++++ .../tuning/run_tdnn_lstm_1a.sh | 323 +++++++++++++++ egs/chime6/s5_track1/local/chain/run_tdnn.sh | 1 + .../chain/tuning/run_cnn_tdnn_lstm_1a.sh | 304 ++++++++++++++ .../local/chain/tuning/run_tdnn_1a.sh | 270 +++++++++++++ .../local/chain/tuning/run_tdnn_1b.sh | 247 ++++++++++++ .../local/chain/tuning/run_tdnn_lstm_1a.sh | 297 ++++++++++++++ egs/chime6/s5_track1/local/check_tools.sh | 71 ++++ .../s5_track1/local/copy_lat_dir_parallel.sh | 97 +++++ egs/chime6/s5_track1/local/distant_audio_list | 376 ++++++++++++++++++ egs/chime6/s5_track1/local/extract_noises.py | 83 ++++ .../s5_track1/local/extract_vad_weights.sh | 86 ++++ egs/chime6/s5_track1/local/get_location.py | 52 +++ .../s5_track1/local/install_pb_chime5.sh | 21 + egs/chime6/s5_track1/local/json2text.py | 88 ++++ egs/chime6/s5_track1/local/make_noise_list.py | 17 + .../s5_track1/local/nnet3/compare_wer.sh | 132 ++++++ egs/chime6/s5_track1/local/nnet3/decode.sh | 164 ++++++++ .../multi_condition/run_ivector_common.sh | 194 +++++++++ .../local/nnet3/run_ivector_common.sh | 151 +++++++ egs/chime6/s5_track1/local/prepare_data.sh | 151 +++++++ egs/chime6/s5_track1/local/prepare_dict.sh | 124 ++++++ egs/chime6/s5_track1/local/replace_uttid.py | 20 + .../s5_track1/local/reverberate_lat_dir.sh | 93 +++++ egs/chime6/s5_track1/local/run_beamformit.sh | 87 ++++ egs/chime6/s5_track1/local/run_gss.sh | 54 +++ egs/chime6/s5_track1/local/run_recog.sh | 192 +++++++++ egs/chime6/s5_track1/local/run_wpe.py | 59 +++ egs/chime6/s5_track1/local/run_wpe.sh | 86 ++++ egs/chime6/s5_track1/local/score.sh | 1 + .../s5_track1/local/score_for_submit.sh | 123 ++++++ egs/chime6/s5_track1/local/train_lms_srilm.sh | 261 ++++++++++++ egs/chime6/s5_track1/local/wer_output_filter | 25 ++ egs/chime6/s5_track1/local/worn_audio_list | 64 +++ egs/chime6/s5_track1/path.sh | 7 + egs/chime6/s5_track1/run.sh | 329 +++++++++++++++ egs/chime6/s5_track1/steps | 1 + egs/chime6/s5_track1/utils | 1 + 54 files changed, 5853 insertions(+), 37 deletions(-) mode change 100755 => 100644 egs/chime5/s5b/local/nnet3/compare_wer.sh create mode 100644 egs/chime6/s5_track1/RESULTS create mode 100644 egs/chime6/s5_track1/cmd.sh create mode 100755 egs/chime6/s5_track1/conf/beamformit.cfg create mode 100644 egs/chime6/s5_track1/conf/mfcc.conf create mode 100644 egs/chime6/s5_track1/conf/mfcc_hires.conf create mode 100644 egs/chime6/s5_track1/conf/online_cmvn.conf create mode 100644 egs/chime6/s5_track1/conf/queue.conf create mode 100755 egs/chime6/s5_track1/local/chain/compare_wer.sh create mode 120000 egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn.sh create mode 120000 egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn_lstm.sh create mode 100755 egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_cnn_tdnn_lstm_1a.sh create mode 100755 egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_tdnn_1a.sh create mode 100755 egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_tdnn_1b.sh create mode 100755 egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_tdnn_lstm_1a.sh create mode 120000 egs/chime6/s5_track1/local/chain/run_tdnn.sh create mode 100755 egs/chime6/s5_track1/local/chain/tuning/run_cnn_tdnn_lstm_1a.sh create mode 100755 egs/chime6/s5_track1/local/chain/tuning/run_tdnn_1a.sh create mode 100755 egs/chime6/s5_track1/local/chain/tuning/run_tdnn_1b.sh create mode 100755 egs/chime6/s5_track1/local/chain/tuning/run_tdnn_lstm_1a.sh create mode 100755 egs/chime6/s5_track1/local/check_tools.sh create mode 100755 egs/chime6/s5_track1/local/copy_lat_dir_parallel.sh create mode 100644 egs/chime6/s5_track1/local/distant_audio_list create mode 100755 egs/chime6/s5_track1/local/extract_noises.py create mode 100755 egs/chime6/s5_track1/local/extract_vad_weights.sh create mode 100755 egs/chime6/s5_track1/local/get_location.py create mode 100755 egs/chime6/s5_track1/local/install_pb_chime5.sh create mode 100755 egs/chime6/s5_track1/local/json2text.py create mode 100755 egs/chime6/s5_track1/local/make_noise_list.py create mode 100755 egs/chime6/s5_track1/local/nnet3/compare_wer.sh create mode 100755 egs/chime6/s5_track1/local/nnet3/decode.sh create mode 100755 egs/chime6/s5_track1/local/nnet3/multi_condition/run_ivector_common.sh create mode 100755 egs/chime6/s5_track1/local/nnet3/run_ivector_common.sh create mode 100755 egs/chime6/s5_track1/local/prepare_data.sh create mode 100755 egs/chime6/s5_track1/local/prepare_dict.sh create mode 100755 egs/chime6/s5_track1/local/replace_uttid.py create mode 100755 egs/chime6/s5_track1/local/reverberate_lat_dir.sh create mode 100755 egs/chime6/s5_track1/local/run_beamformit.sh create mode 100755 egs/chime6/s5_track1/local/run_gss.sh create mode 100755 egs/chime6/s5_track1/local/run_recog.sh create mode 100755 egs/chime6/s5_track1/local/run_wpe.py create mode 100755 egs/chime6/s5_track1/local/run_wpe.sh create mode 120000 egs/chime6/s5_track1/local/score.sh create mode 100755 egs/chime6/s5_track1/local/score_for_submit.sh create mode 100755 egs/chime6/s5_track1/local/train_lms_srilm.sh create mode 100755 egs/chime6/s5_track1/local/wer_output_filter create mode 100644 egs/chime6/s5_track1/local/worn_audio_list create mode 100644 egs/chime6/s5_track1/path.sh create mode 100755 egs/chime6/s5_track1/run.sh create mode 120000 egs/chime6/s5_track1/steps create mode 120000 egs/chime6/s5_track1/utils diff --git a/egs/chime5/s5b/local/nnet3/compare_wer.sh b/egs/chime5/s5b/local/nnet3/compare_wer.sh old mode 100755 new mode 100644 index 095e85cc338..fa627acd27b --- a/egs/chime5/s5b/local/nnet3/compare_wer.sh +++ b/egs/chime5/s5b/local/nnet3/compare_wer.sh @@ -130,3 +130,4 @@ done echo echo + diff --git a/egs/chime5/s5b/local/nnet3/decode.sh b/egs/chime5/s5b/local/nnet3/decode.sh index 7af09f36a13..8fa54e0d4a6 100755 --- a/egs/chime5/s5b/local/nnet3/decode.sh +++ b/egs/chime5/s5b/local/nnet3/decode.sh @@ -35,6 +35,8 @@ post_decode_acwt=1.0 # important to change this when using chain models extra_left_context_initial=0 extra_right_context_final=0 +graph_affix= + score_opts="--min-lmwt 6 --max-lmwt 13" . ./cmd.sh @@ -94,7 +96,7 @@ if [ $stage -le 2 ]; then fi fi -decode_dir=$dir/decode_${data_set}${affix} +decode_dir=$dir/decode${graph_affix}_${data_set}${affix} # generate the lattices if [ $stage -le 3 ]; then echo "Generating lattices, stage 1" diff --git a/egs/chime5/s5b/local/run_recog.sh b/egs/chime5/s5b/local/run_recog.sh index 5c74c9ff242..989a5f95d01 100755 --- a/egs/chime5/s5b/local/run_recog.sh +++ b/egs/chime5/s5b/local/run_recog.sh @@ -28,8 +28,8 @@ json_dir=${chime5_corpus}/transcriptions audio_dir=${chime5_corpus}/audio # training and test data -train_set=train_worn_u100k -test_sets="eval_${enhancement}_ref" +train_set=train_worn_simu_u400k +test_sets="eval_${enhancement}_dereverb_ref" # This script also needs the phonetisaurus g2p, srilm, beamformit ./local/check_tools.sh || exit 1 @@ -38,18 +38,27 @@ if [ $stage -le 4 ]; then # Beamforming using reference arrays # enhanced WAV directory enhandir=enhan + dereverb_dir=${PWD}/wav/wpe/ for dset in eval; do for mictype in u01 u02 u03 u04 u05 u06; do - local/run_beamformit.sh --cmd "$train_cmd" \ + local/run_wpe.sh --nj 4 --cmd "$train_cmd --mem 120G" \ ${audio_dir}/${dset} \ + ${dereverb_dir}/${dset} \ + ${mictype} + done + done + for dset in dev eval; do + for mictype in u01 u02 u03 u04 u05 u06; do + local/run_beamformit.sh --cmd "$train_cmd" \ + ${dereverb_dir}/${dset} \ ${enhandir}/${dset}_${enhancement}_${mictype} \ ${mictype} done done - + for dset in eval; do local/prepare_data.sh --mictype ref "$PWD/${enhandir}/${dset}_${enhancement}_u0*" \ - ${json_dir}/${dset} data/${dset}_${enhancement}_ref + ${json_dir}/${dset} data/${dset}_${enhancement}_dereverb_ref done fi @@ -92,28 +101,13 @@ if [ $stage -le 7 ]; then done fi -if [ $stage -le 17 ]; then - nnet3_affix=_${train_set}_cleaned - for datadir in ${test_sets}; do - utils/copy_data_dir.sh data/$datadir data/${datadir}_hires - done - for datadir in ${test_sets}; do - steps/make_mfcc.sh --nj 20 --mfcc-config conf/mfcc_hires.conf \ - --cmd "$train_cmd" data/${datadir}_hires || exit 1; - steps/compute_cmvn_stats.sh data/${datadir}_hires || exit 1; - utils/fix_data_dir.sh data/${datadir}_hires || exit 1; - done - for data in $test_sets; do - steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj 20 \ - data/${data}_hires exp/nnet3${nnet3_affix}/extractor \ - exp/nnet3${nnet3_affix}/ivectors_${data}_hires - done -fi +nnet3_affix=_${train_set}_cleaned_rvb + +lm_suffix= if [ $stage -le 18 ]; then # First the options that are passed through to run_ivector_common.sh # (some of which are also used in this script directly). - lm_suffix= # The rest are configs specific to this script. Most of the parameters # are just hardcoded at this level, in the commands below. @@ -138,16 +132,14 @@ if [ $stage -le 18 ]; then for data in $test_sets; do ( - steps/nnet3/decode.sh \ - --acwt 1.0 --post-decode-acwt 10.0 \ - --extra-left-context $chunk_left_context \ - --extra-right-context $chunk_right_context \ - --extra-left-context-initial 0 \ - --extra-right-context-final 0 \ - --frames-per-chunk $frames_per_chunk \ - --nj 8 --cmd "$decode_cmd" --num-threads 4 \ - --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ - $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + local/nnet3/decode.sh --affix 2stage --pass2-decode-opts "--min-active 1000" \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --frames-per-chunk 150 --nj $decode_nj \ + --ivector-dir exp/nnet3${nnet3_affix} \ + --graph-affix ${lm_suffix} \ + data/${data} data/lang${lm_suffix} \ + $tree_dir/graph${lm_suffix} \ + exp/chain${nnet3_affix}/tdnn1b_sp ) || touch $dir/.error & done wait @@ -159,6 +151,6 @@ if [ $stage -le 20 ]; then # please specify both dev and eval set directories so that the search parameters # (insertion penalty and language model weight) will be tuned using the dev set local/score_for_submit.sh \ - --dev exp/chain_${train_set}_cleaned/tdnn1a_sp/decode_dev_${enhancement}_ref \ - --eval exp/chain_${train_set}_cleaned/tdnn1a_sp/decode_eval_${enhancement}_ref + --dev exp/chain${nnet3_affix}/tdnn1b_sp/decode${lm_suffix}_dev_${enhancement}_dereverb_ref_2stage \ + --eval exp/chain${nnet3_affix}/tdnn1b_sp/decode${lm_suffix}_eval_${enhancement}_dereverb_ref_2stage fi diff --git a/egs/chime5/s5b/local/run_wpe.sh b/egs/chime5/s5b/local/run_wpe.sh index 1c4b1c80291..ed512e69aae 100755 --- a/egs/chime5/s5b/local/run_wpe.sh +++ b/egs/chime5/s5b/local/run_wpe.sh @@ -33,7 +33,8 @@ set -o pipefail miniconda_dir=$HOME/miniconda3/ if [ ! -d $miniconda_dir ]; then - echo "$miniconda_dir does not exist. Please run '../../../tools/extras/install_miniconda.sh' and '../../../tools/extras/install_wpe.sh';" + echo "$miniconda_dir does not exist. Please run '$KALDI_ROOT/tools/extras/install_miniconda.sh'." + exit 1 fi # check if WPE is installed diff --git a/egs/chime6/s5_track1/RESULTS b/egs/chime6/s5_track1/RESULTS new file mode 100644 index 00000000000..00cc98401bc --- /dev/null +++ b/egs/chime6/s5_track1/RESULTS @@ -0,0 +1,21 @@ + +# tri2 +%WER 88.52 [ 52121 / 58881, 2023 ins, 30285 del, 19813 sub ] exp/tri2/decode_dev_gss/wer_17_0.5 + +# tri3 +%WER 85.72 [ 50471 / 58881, 3079 ins, 23787 del, 23605 sub ] exp/tri3/decode_dev_gss/wer_17_0.5 + +# nnet3 tdnn+chain +%WER 41.21 [ 24267 / 58881, 2428 ins, 7606 del, 14233 sub ] exp/chain_train_worn_simu_u400k_cleaned_rvb/tdnn1b_sp/decode_dev_worn_2stage/wer_11_0.0 +%WER 62.30 [ 36684 / 58881, 3048 ins, 17964 del, 15672 sub ] exp/chain_train_worn_simu_u400k_cleaned_rvb/tdnn1b_sp/decode_dev_gss_2stage/wer_11_0.0 + +# result with the challenge submission format (Nov 17, 2019) +# after the fix of speaker ID across arrays +==== development set ==== +session S02 room dining: #words 8288, #errors 5106, wer 61.60 % +session S02 room kitchen: #words 12696, #errors 9177, wer 72.28 % +session S02 room living: #words 15460, #errors 8388, wer 54.25 % +session S09 room dining: #words 5766, #errors 3821, wer 66.26 % +session S09 room kitchen: #words 8911, #errors 5594, wer 62.77 % +session S09 room living: #words 7760, #errors 4598, wer 59.25 % +overall: #words 58881, #errors 36684, wer 62.30 % diff --git a/egs/chime6/s5_track1/cmd.sh b/egs/chime6/s5_track1/cmd.sh new file mode 100644 index 00000000000..9702501f1a7 --- /dev/null +++ b/egs/chime6/s5_track1/cmd.sh @@ -0,0 +1,15 @@ +# you can change cmd.sh depending on what type of queue you are using. +# If you have no queueing system and want to run on a local machine, you +# can change all instances 'queue.pl' to run.pl (but be careful and run +# commands one by one: most recipes will exhaust the memory on your +# machine). queue.pl works with GridEngine (qsub). slurm.pl works +# with slurm. Different queues are configured differently, with different +# queue names and different ways of specifying things like memory; +# to account for these differences you can create and edit the file +# conf/queue.conf to match your queue's configuration. Search for +# conf/queue.conf in http://kaldi-asr.org/doc/queue.html for more information, +# or search for the string 'default_config' in utils/queue.pl or utils/slurm.pl. + +export train_cmd="retry.pl queue.pl --mem 2G" +export decode_cmd="queue.pl --mem 4G" + diff --git a/egs/chime6/s5_track1/conf/beamformit.cfg b/egs/chime6/s5_track1/conf/beamformit.cfg new file mode 100755 index 00000000000..70fdd858651 --- /dev/null +++ b/egs/chime6/s5_track1/conf/beamformit.cfg @@ -0,0 +1,50 @@ +#BeamformIt sample configuration file for AMI data (http://groups.inf.ed.ac.uk/ami/download/) + +# scrolling size to compute the delays +scroll_size = 250 + +# cross correlation computation window size +window_size = 500 + +#amount of maximum points for the xcorrelation taken into account +nbest_amount = 4 + +#flag wether to apply an automatic noise thresholding +do_noise_threshold = 1 + +#Percentage of frames with lower xcorr taken as noisy +noise_percent = 10 + +######## acoustic modelling parameters + +#transition probabilities weight for multichannel decoding +trans_weight_multi = 25 +trans_weight_nbest = 25 + +### + +#flag wether to print the feaures after setting them, or not +print_features = 1 + +#flag wether to use the bad frames in the sum process +do_avoid_bad_frames = 1 + +#flag to use the best channel (SNR) as a reference +#defined from command line +do_compute_reference = 1 + +#flag wether to use a uem file or not(process all the file) +do_use_uem_file = 0 + +#flag wether to use an adaptative weights scheme or fixed weights +do_adapt_weights = 1 + +#flag wether to output the sph files or just run the system to create the auxiliary files +do_write_sph_files = 1 + +####directories where to store/retrieve info#### +#channels_file = ./cfg-files/channels + +#show needs to be passed as argument normally, here a default one is given just in case +#show_id = Ttmp + diff --git a/egs/chime6/s5_track1/conf/mfcc.conf b/egs/chime6/s5_track1/conf/mfcc.conf new file mode 100644 index 00000000000..32988403b00 --- /dev/null +++ b/egs/chime6/s5_track1/conf/mfcc.conf @@ -0,0 +1,2 @@ +--use-energy=false +--sample-frequency=16000 diff --git a/egs/chime6/s5_track1/conf/mfcc_hires.conf b/egs/chime6/s5_track1/conf/mfcc_hires.conf new file mode 100644 index 00000000000..fd64b62eb16 --- /dev/null +++ b/egs/chime6/s5_track1/conf/mfcc_hires.conf @@ -0,0 +1,10 @@ +# config for high-resolution MFCC features, intended for neural network training. +# Note: we keep all cepstra, so it has the same info as filterbank features, +# but MFCC is more easily compressible (because less correlated) which is why +# we prefer this method. +--use-energy=false # use average of log energy, not energy. +--sample-frequency=16000 +--num-mel-bins=40 +--num-ceps=40 +--low-freq=40 +--high-freq=-400 diff --git a/egs/chime6/s5_track1/conf/online_cmvn.conf b/egs/chime6/s5_track1/conf/online_cmvn.conf new file mode 100644 index 00000000000..7748a4a4dd3 --- /dev/null +++ b/egs/chime6/s5_track1/conf/online_cmvn.conf @@ -0,0 +1 @@ +# configuration file for apply-cmvn-online, used in the script ../local/run_online_decoding.sh diff --git a/egs/chime6/s5_track1/conf/queue.conf b/egs/chime6/s5_track1/conf/queue.conf new file mode 100644 index 00000000000..73103195684 --- /dev/null +++ b/egs/chime6/s5_track1/conf/queue.conf @@ -0,0 +1,10 @@ +command qsub -v PATH -cwd -S /bin/bash -j y -l arch=*64* +option mem=* -l mem_free=$0,ram_free=$0 +option mem=0 # Do not add anything to qsub_opts +option num_threads=* -pe smp $0 +option num_threads=1 # Do not add anything to qsub_opts +option max_jobs_run=* -tc $0 +default gpu=0 +option gpu=0 -q all.q -l hostname='!b19*' +option gpu=* -l gpu=$0 -q g.q -l hostname='!b19*' + diff --git a/egs/chime6/s5_track1/local/chain/compare_wer.sh b/egs/chime6/s5_track1/local/chain/compare_wer.sh new file mode 100755 index 00000000000..cd6be14ed88 --- /dev/null +++ b/egs/chime6/s5_track1/local/chain/compare_wer.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +# this script is used for comparing decoding results between systems. +# e.g. local/chain/compare_wer.sh exp/chain/tdnn_{c,d}_sp +# For use with discriminatively trained systems you specify the epochs after a colon: +# for instance, +# local/chain/compare_wer.sh exp/chain/tdnn_c_sp exp/chain/tdnn_c_sp_smbr:{1,2,3} + + +if [ $# == 0 ]; then + echo "Usage: $0: [--looped] [--online] [ ... ]" + echo "e.g.: $0 exp/chain/tdnn_{b,c}_sp" + echo "or (with epoch numbers for discriminative training):" + echo "$0 exp/chain/tdnn_b_sp_disc:{1,2,3}" + exit 1 +fi + +echo "# $0 $*" + +include_looped=false +if [ "$1" == "--looped" ]; then + include_looped=true + shift +fi +include_online=false +if [ "$1" == "--online" ]; then + include_online=true + shift +fi + + +used_epochs=false + +# this function set_names is used to separate the epoch-related parts of the name +# [for discriminative training] and the regular parts of the name. +# If called with a colon-free directory name, like: +# set_names exp/chain/tdnn_lstm1e_sp_bi_smbr +# it will set dir=exp/chain/tdnn_lstm1e_sp_bi_smbr and epoch_infix="" +# If called with something like: +# set_names exp/chain/tdnn_d_sp_smbr:3 +# it will set dir=exp/chain/tdnn_d_sp_smbr and epoch_infix="_epoch3" + + +set_names() { + if [ $# != 1 ]; then + echo "compare_wer_general.sh: internal error" + exit 1 # exit the program + fi + dirname=$(echo $1 | cut -d: -f1) + epoch=$(echo $1 | cut -s -d: -f2) + if [ -z $epoch ]; then + epoch_infix="" + else + used_epochs=true + epoch_infix=_epoch${epoch} + fi +} + + + +echo -n "# System " +for x in $*; do printf "% 10s" " $(basename $x)"; done +echo + +strings=( + "#WER dev_clean_2 (tgsmall) " + "#WER dev_clean_2 (tglarge) ") + +for n in 0 1; do + echo -n "${strings[$n]}" + for x in $*; do + set_names $x # sets $dirname and $epoch_infix + decode_names=(tgsmall_dev_clean_2 tglarge_dev_clean_2) + + wer=$(cat $dirname/decode_${decode_names[$n]}/wer_* | utils/best_wer.sh | awk '{print $2}') + printf "% 10s" $wer + done + echo + if $include_looped; then + echo -n "# [looped:] " + for x in $*; do + set_names $x # sets $dirname and $epoch_infix + wer=$(cat $dirname/decode_looped_${decode_names[$n]}/wer_* | utils/best_wer.sh | awk '{print $2}') + printf "% 10s" $wer + done + echo + fi + if $include_online; then + echo -n "# [online:] " + for x in $*; do + set_names $x # sets $dirname and $epoch_infix + wer=$(cat ${dirname}_online/decode_${decode_names[$n]}/wer_* | utils/best_wer.sh | awk '{print $2}') + printf "% 10s" $wer + done + echo + fi +done + + +if $used_epochs; then + exit 0; # the diagnostics aren't comparable between regular and discriminatively trained systems. +fi + + +echo -n "# Final train prob " +for x in $*; do + prob=$(grep Overall $x/log/compute_prob_train.final.log | grep -v xent | awk '{printf("%.4f", $8)}') + printf "% 10s" $prob +done +echo + +echo -n "# Final valid prob " +for x in $*; do + prob=$(grep Overall $x/log/compute_prob_valid.final.log | grep -v xent | awk '{printf("%.4f", $8)}') + printf "% 10s" $prob +done +echo + +echo -n "# Final train prob (xent)" +for x in $*; do + prob=$(grep Overall $x/log/compute_prob_train.final.log | grep -w xent | awk '{printf("%.4f", $8)}') + printf "% 10s" $prob +done +echo + +echo -n "# Final valid prob (xent)" +for x in $*; do + prob=$(grep Overall $x/log/compute_prob_valid.final.log | grep -w xent | awk '{printf("%.4f", $8)}') + printf "% 10s" $prob +done +echo diff --git a/egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn.sh b/egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn.sh new file mode 120000 index 00000000000..61f8f499182 --- /dev/null +++ b/egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn.sh @@ -0,0 +1 @@ +tuning/run_tdnn_1b.sh \ No newline at end of file diff --git a/egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn_lstm.sh b/egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn_lstm.sh new file mode 120000 index 00000000000..8e647598556 --- /dev/null +++ b/egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn_lstm.sh @@ -0,0 +1 @@ +tuning/run_tdnn_lstm_1a.sh \ No newline at end of file diff --git a/egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_cnn_tdnn_lstm_1a.sh b/egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_cnn_tdnn_lstm_1a.sh new file mode 100755 index 00000000000..65d7caed24c --- /dev/null +++ b/egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_cnn_tdnn_lstm_1a.sh @@ -0,0 +1,329 @@ +#!/bin/bash + +# Set -e here so that we catch if any executable fails immediately +set -euo pipefail + +# First the options that are passed through to run_ivector_common.sh +# (some of which are also used in this script directly). +stage=0 +nj=96 +decode_nj=40 +train_set_clean=train_worn_cleaned +train_set_noisy=train_u400k_cleaned +combined_train_set=train_worn_u400k_cleaned +test_sets="dev_worn" +gmm=tri3_cleaned +nnet3_affix=_train_worn_u400k_cleaned_rvb +lm_suffix= +noise_list= + +# The rest are configs specific to this script. Most of the parameters +# are just hardcoded at this level, in the commands below. +affix=_rvb_1a # affix for the TDNN directory name +tree_affix= +train_stage=-10 +get_egs_stage=-10 +decode_iter= + +common_egs_dir= + +hidden_dim=1024 +cell_dim=1024 +projection_dim=256 + +# training options +num_epochs=2 # 2 works better than 4 +chunk_width=140,100,160 +chunk_left_context=40 +chunk_right_context=0 +dropout_schedule='0,0@0.20,0.3@0.50,0' +xent_regularize=0.025 +label_delay=5 + +# decode options +extra_left_context=50 +extra_right_context=0 + +# training options +srand=0 +remove_egs=true + +#decode options +test_online_decoding=false # if true, it will run the last decoding stage. + + +# End configuration section. +echo "$0 $@" # Print the command line for logging + +. ./cmd.sh +. ./path.sh +. ./utils/parse_options.sh + +if ! cuda-compiled; then + cat <$lang/topo + fi +fi + +lat_dir_clean=exp/chain${nnet3_affix}/${gmm}_${train_set_clean}_sp_lats +lat_dir_noisy=exp/chain${nnet3_affix}/${gmm}_${train_set_noisy}_sp_lats +lat_dir=exp/chain${nnet3_affix}/${gmm}_${combined_train_set}_sp_rvb_lats + +if [ $stage -le 11 ]; then + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ + data/${train_set_noisy}_sp data/lang $gmm_dir $lat_dir_noisy || exit 1 + rm $lat_dir_noisy/fsts.*.gz +fi + +if [ $stage -le 12 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ + data/${train_set_clean}_sp data/lang $gmm_dir $lat_dir_clean || exit 1 + rm $lat_dir_clean/fsts.*.gz +fi + +if [ $stage -le 13 ]; then + local/reverberate_lat_dir.sh --cmd "$train_cmd" --num-data-reps 2 \ + data/${combined_train_set}_sp_rvb_hires $lat_dir_noisy \ + $lat_dir_clean $lat_dir || exit 1 +fi + +if [ $stage -le 14 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. The num-leaves is always somewhat less than the num-leaves from + # the GMM baseline. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh \ + --frame-subsampling-factor 3 \ + --context-opts "--context-width=2 --central-position=1" \ + --cmd "$train_cmd" 3500 data/${train_set_clean}_sp \ + $lang $lat_dir_clean $tree_dir +fi + +if [ $stage -le 15 ]; then + mkdir -p $dir + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + + lstm_opts="decay-time=40" + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + idct-layer name=idct input=input dim=40 cepstral-lifter=22 affine-transform-file=$dir/configs/idct.mat + + conv-relu-batchnorm-layer name=cnn1 input=idct height-in=40 height-out=20 height-subsample-out=2 time-offsets=-1,0,1 height-offsets=-1,0,1 num-filters-out=256 learning-rate-factor=0.333 max-change=0.25 + conv-relu-batchnorm-layer name=cnn2 input=cnn1 height-in=20 height-out=20 time-offsets=-1,0,1 height-offsets=-1,0,1 num-filters-out=128 + + relu-batchnorm-layer name=affine1 input=lda dim=512 + + # the first splicing is moved before the lda layer, so no splicing here + relu-batchnorm-layer name=tdnn1 input=cnn2 dim=1024 + relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1,affine1) dim=1024 + relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1) dim=1024 + + # check steps/libs/nnet3/xconfig/lstm.py for the other options and defaults + fast-lstmp-layer name=lstm1 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=1024 + relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=1024 + relu-batchnorm-layer name=tdnn6 input=Append(-3,0,3) dim=1024 + fast-lstmp-layer name=lstm2 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn7 input=Append(-3,0,3) dim=1024 + relu-batchnorm-layer name=tdnn8 input=Append(-3,0,3) dim=1024 + relu-batchnorm-layer name=tdnn9 input=Append(-3,0,3) dim=1024 + fast-lstmp-layer name=lstm3 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts + + ## adding the layers for chain branch + output-layer name=output input=lstm3 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5 + + # adding the layers for xent branch + # This block prints the configs for a separate output that will be + # trained with a cross-entropy objective in the 'chain' models... this + # has the effect of regularizing the hidden parts of the model. we use + # 0.5 / args.xent_regularize as the learning rate factor- the factor of + # 0.5 / args.xent_regularize is suitable as it means the xent + # final-layer learns at a rate independent of the regularization + # constant; and the 0.5 was tuned so as to make the relative progress + # similar in the xent and regular final layers. + output-layer name=output-xent input=lstm3 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 + +EOF + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 16 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + fi + + mkdir -p $dir/egs + touch $dir/egs/.nodelete # keep egs around when that run dies. + + steps/nnet3/chain/train.py --stage=$train_stage \ + --cmd="$train_cmd --mem 4G" \ + --feat.online-ivector-dir=$train_ivector_dir \ + --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient=0.1 \ + --chain.l2-regularize=0.00005 \ + --chain.apply-deriv-weights=false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --trainer.dropout-schedule $dropout_schedule \ + --trainer.num-chunk-per-minibatch 32 \ + --trainer.frames-per-iter 1500000 \ + --trainer.max-param-change 2.0 \ + --trainer.num-epochs $num_epochs \ + --trainer.srand=$srand \ + --trainer.optimization.shrink-value 0.99 \ + --trainer.optimization.num-jobs-initial=3 \ + --trainer.optimization.num-jobs-final=16 \ + --trainer.optimization.initial-effective-lrate=0.001 \ + --trainer.optimization.final-effective-lrate=0.0001 \ + --trainer.optimization.momentum=0.0 \ + --trainer.deriv-truncate-margin 8 \ + --egs.stage $get_egs_stage \ + --egs.opts="--frames-overlap-per-eg 0" \ + --egs.chunk-width=$chunk_width \ + --egs.chunk-left-context=$chunk_left_context \ + --egs.chunk-right-context=$chunk_right_context \ + --egs.chunk-left-context-initial=0 \ + --egs.chunk-right-context-final=0 \ + --egs.dir="$common_egs_dir" \ + --cleanup.remove-egs=$remove_egs \ + --feat-dir=$train_data_dir \ + --tree-dir=$tree_dir \ + --lat-dir=$lat_dir \ + --dir=$dir || exit 1; +fi + +if [ $stage -le 17 ]; then + # Note: it's not important to give mkgraph.sh the lang directory with the + # matched topology (since it gets the topology file from the model). + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; +fi + +if [ -z "$extra_left_context" ]; then + extra_left_context=$chunk_left_context +fi +if [ -z "$extra_right_context" ]; then + extra_right_context=$chunk_right_context +fi + +if [ $stage -le 18 ]; then + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + steps/nnet3/decode.sh \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --extra-left-context $extra_left_context \ + --extra-right-context $extra_right_context \ + --extra-left-context-initial 0 \ + --extra-right-context-final 0 \ + --frames-per-chunk $frames_per_chunk \ + --nj $decode_nj --cmd "$decode_cmd" --num-threads 4 \ + --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ + $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +# Not testing the 'looped' decoding separately, because for +# TDNN systems it would give exactly the same results as the +# normal decoding. + +if $test_online_decoding && [ $stage -le 19 ]; then + # note: if the features change (e.g. you add pitch features), you will have to + # change the options of the following command line. + steps/online/nnet3/prepare_online_decoding.sh \ + --mfcc-config conf/mfcc_hires.conf \ + $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online + + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + nspk=$(wc -l $lang/topo + fi +fi + +lat_dir_clean=exp/chain${nnet3_affix}/${gmm}_${train_set_clean}_sp_lats +lat_dir_noisy=exp/chain${nnet3_affix}/${gmm}_${train_set_noisy}_sp_lats +lat_dir=exp/chain${nnet3_affix}/${gmm}_${combined_train_set}_sp_rvb_lats + +if [ $stage -le 11 ]; then + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ + data/${train_set_noisy}_sp data/lang $gmm_dir $lat_dir_noisy || exit 1 + rm $lat_dir_noisy/fsts.*.gz +fi + +if [ $stage -le 12 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ + data/${train_set_clean}_sp data/lang $gmm_dir $lat_dir_clean || exit 1 + rm $lat_dir_clean/fsts.*.gz +fi + +if [ $stage -le 13 ]; then + local/reverberate_lat_dir.sh --cmd "$train_cmd" --num-data-reps 2 \ + data/${combined_train_set}_sp_rvb_hires $lat_dir_noisy \ + $lat_dir_clean $lat_dir || exit 1 +fi + +if [ $stage -le 14 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. The num-leaves is always somewhat less than the num-leaves from + # the GMM baseline. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh \ + --frame-subsampling-factor 3 \ + --cmd "$train_cmd" 3500 data/${train_set_clean}_sp \ + $lang $lat_dir_clean $tree_dir +fi + +if [ $stage -le 15 ]; then + mkdir -p $dir + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + opts="l2-regularize=0.05" + output_opts="l2-regularize=0.01 bottleneck-dim=320" + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + + # the first splicing is moved before the lda layer, so no splicing here + relu-batchnorm-layer name=tdnn1 $opts dim=$hidden_dim + relu-batchnorm-layer name=tdnn2 $opts dim=$hidden_dim input=Append(-1,0,1) + relu-batchnorm-layer name=tdnn3 $opts dim=$hidden_dim + relu-batchnorm-layer name=tdnn4 $opts dim=$hidden_dim input=Append(-1,0,1) + relu-batchnorm-layer name=tdnn5 $opts dim=$hidden_dim + relu-batchnorm-layer name=tdnn6 $opts dim=$hidden_dim input=Append(-3,0,3) + relu-batchnorm-layer name=tdnn7 $opts dim=$hidden_dim input=Append(-3,0,3) + relu-batchnorm-layer name=tdnn8 $opts dim=$hidden_dim input=Append(-6,-3,0) + + ## adding the layers for chain branch + relu-batchnorm-layer name=prefinal-chain $opts dim=$hidden_dim target-rms=0.5 + output-layer name=output include-log-softmax=false $output_opts dim=$num_targets max-change=1.5 + + # adding the layers for xent branch + # This block prints the configs for a separate output that will be + # trained with a cross-entropy objective in the 'chain' models... this + # has the effect of regularizing the hidden parts of the model. we use + # 0.5 / args.xent_regularize as the learning rate factor- the factor of + # 0.5 / args.xent_regularize is suitable as it means the xent + # final-layer learns at a rate independent of the regularization + # constant; and the 0.5 was tuned so as to make the relative progress + # similar in the xent and regular final layers. + relu-batchnorm-layer name=prefinal-xent input=tdnn8 $opts dim=$hidden_dim target-rms=0.5 + output-layer name=output-xent $output_opts dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 +EOF + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 16 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + fi + + mkdir -p $dir/egs + touch $dir/egs/.nodelete # keep egs around when that run dies. + + steps/nnet3/chain/train.py --stage=$train_stage \ + --cmd="$train_cmd --mem 4G" \ + --feat.online-ivector-dir=$train_ivector_dir \ + --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient=0.1 \ + --chain.l2-regularize=0.00005 \ + --chain.apply-deriv-weights=false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --trainer.num-chunk-per-minibatch 256,128,64 \ + --trainer.frames-per-iter 1500000 \ + --trainer.max-param-change 2.0 \ + --trainer.num-epochs $num_epochs \ + --trainer.srand=$srand \ + --trainer.optimization.num-jobs-initial=3 \ + --trainer.optimization.num-jobs-final=16 \ + --trainer.optimization.initial-effective-lrate=0.001 \ + --trainer.optimization.final-effective-lrate=0.0001 \ + --trainer.optimization.momentum=0.0 \ + --egs.stage $get_egs_stage \ + --egs.opts="--frames-overlap-per-eg 0" \ + --egs.chunk-width=$chunk_width \ + --egs.dir="$common_egs_dir" \ + --cleanup.remove-egs=$remove_egs \ + --feat-dir=$train_data_dir \ + --tree-dir=$tree_dir \ + --lat-dir=$lat_dir \ + --dir=$dir || exit 1; +fi + +if [ $stage -le 17 ]; then + # Note: it's not important to give mkgraph.sh the lang directory with the + # matched topology (since it gets the topology file from the model). + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; +fi + +if [ $stage -le 18 ]; then + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + steps/nnet3/decode.sh \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --frames-per-chunk $frames_per_chunk \ + --nj $decode_nj --cmd "$decode_cmd" --num-threads 4 \ + --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ + $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +# Not testing the 'looped' decoding separately, because for +# TDNN systems it would give exactly the same results as the +# normal decoding. + +if $test_online_decoding && [ $stage -le 19 ]; then + # note: if the features change (e.g. you add pitch features), you will have to + # change the options of the following command line. + steps/online/nnet3/prepare_online_decoding.sh \ + --mfcc-config conf/mfcc_hires.conf \ + $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online + + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + nspk=$(wc -l 6034 combine=-0.058->-0.057 (over 8) xent:train/valid[261,393,final]=(-1.20,-0.897,-0.894/-1.20,-0.919,-0.911) logprob:train/valid[261,393,final]=(-0.090,-0.059,-0.058/-0.098,-0.073,-0.073) + +set -e + +# configs for 'chain' +stage=0 +nj=96 +decode_nj=40 +train_set_clean=train_worn_cleaned +train_set_noisy=train_u400k_cleaned +combined_train_set=train_worn_u400k_cleaned +test_sets="dev_worn dev_beamformit_ref" +gmm=tri3_cleaned +nnet3_affix=_train_worn_u400k_cleaned_rvb +lm_suffix= +noise_list= + +# The rest are configs specific to this script. Most of the parameters +# are just hardcoded at this level, in the commands below. +affix=_rvb_1b # affix for the TDNN directory name +tree_affix= +train_stage=-10 +get_egs_stage=-10 +decode_iter= + +num_epochs=4 +common_egs_dir= +# training options +# training chunk-options +chunk_width=140,100,160 +xent_regularize=0.1 +dropout_schedule='0,0@0.20,0.5@0.50,0' + +# training options +srand=0 +remove_egs=true + +#decode options +test_online_decoding=false # if true, it will run the last decoding stage. + +# End configuration section. +echo "$0 $@" # Print the command line for logging + +. ./cmd.sh +. ./path.sh +. ./utils/parse_options.sh + +if ! cuda-compiled; then + cat <$lang/topo + fi +fi + +lat_dir_clean=exp/chain${nnet3_affix}/${gmm}_${train_set_clean}_sp_lats +lat_dir_noisy=exp/chain${nnet3_affix}/${gmm}_${train_set_noisy}_sp_lats +lat_dir=exp/chain${nnet3_affix}/${gmm}_${combined_train_set}_sp_rvb_lats + +if [ $stage -le 11 ]; then + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ + data/${train_set_noisy}_sp data/lang $gmm_dir $lat_dir_noisy || exit 1 + rm $lat_dir_noisy/fsts.*.gz +fi + +if [ $stage -le 12 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ + data/${train_set_clean}_sp data/lang $gmm_dir $lat_dir_clean || exit 1 + rm $lat_dir_clean/fsts.*.gz +fi + +if [ $stage -le 13 ]; then + local/reverberate_lat_dir.sh --cmd "$train_cmd" --num-data-reps 2 \ + data/${combined_train_set}_sp_rvb_hires $lat_dir_noisy \ + $lat_dir_clean $lat_dir || exit 1 +fi + +if [ $stage -le 14 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. The num-leaves is always somewhat less than the num-leaves from + # the GMM baseline. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh \ + --frame-subsampling-factor 3 \ + --cmd "$train_cmd" 3500 data/${train_set_clean}_sp \ + $lang $lat_dir_clean $tree_dir +fi + +if [ $stage -le 15 ]; then + mkdir -p $dir + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + affine_opts="l2-regularize=0.01 dropout-proportion=0.0 dropout-per-dim=true dropout-per-dim-continuous=true" + tdnnf_opts="l2-regularize=0.01 dropout-proportion=0.0 bypass-scale=0.66" + linear_opts="l2-regularize=0.01 orthonormal-constraint=-1.0" + prefinal_opts="l2-regularize=0.01" + output_opts="l2-regularize=0.002" + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + + # the first splicing is moved before the lda layer, so no splicing here + relu-batchnorm-dropout-layer name=tdnn1 $affine_opts dim=1536 + tdnnf-layer name=tdnnf2 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1 + tdnnf-layer name=tdnnf3 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1 + tdnnf-layer name=tdnnf4 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1 + tdnnf-layer name=tdnnf5 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=0 + tdnnf-layer name=tdnnf6 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf7 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf8 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf9 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf10 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf11 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf12 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf13 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf14 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf15 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + linear-component name=prefinal-l dim=256 $linear_opts + + prefinal-layer name=prefinal-chain input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256 + output-layer name=output include-log-softmax=false dim=$num_targets $output_opts + + prefinal-layer name=prefinal-xent input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256 + output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor $output_opts +EOF + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 16 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + fi + + mkdir -p $dir/egs + touch $dir/egs/.nodelete # keep egs around when that run dies. + + steps/nnet3/chain/train.py --stage=$train_stage \ + --cmd="$train_cmd --mem 4G" \ + --feat.online-ivector-dir=$train_ivector_dir \ + --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient 0.1 \ + --chain.l2-regularize 0.0 \ + --chain.apply-deriv-weights false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --trainer.dropout-schedule "$dropout_schedule" \ + --trainer.add-option="--optimization.memory-compression-level=2" \ + --trainer.num-chunk-per-minibatch 64 \ + --trainer.frames-per-iter 1500000 \ + --trainer.max-param-change 2.0 \ + --trainer.num-epochs $num_epochs \ + --trainer.optimization.num-jobs-initial 3 \ + --trainer.optimization.num-jobs-final 16 \ + --trainer.optimization.initial-effective-lrate 0.00025 \ + --trainer.optimization.final-effective-lrate 0.000025 \ + --egs.stage $get_egs_stage \ + --egs.opts "--frames-overlap-per-eg 0" \ + --egs.chunk-width $chunk_width \ + --egs.dir "$common_egs_dir" \ + --cleanup.remove-egs $remove_egs \ + --feat-dir=$train_data_dir \ + --tree-dir=$tree_dir \ + --lat-dir=$lat_dir \ + --dir=$dir || exit 1; +fi + +if [ $stage -le 17 ]; then + # Note: it's not important to give mkgraph.sh the lang directory with the + # matched topology (since it gets the topology file from the model). + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; +fi + +if [ $stage -le 18 ]; then + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + steps/nnet3/decode.sh \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --frames-per-chunk $frames_per_chunk \ + --nj $decode_nj --cmd "$decode_cmd" --num-threads 4 \ + --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ + $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +# Not testing the 'looped' decoding separately, because for +# TDNN systems it would give exactly the same results as the +# normal decoding. + +if $test_online_decoding && [ $stage -le 19 ]; then + # note: if the features change (e.g. you add pitch features), you will have to + # change the options of the following command line. + steps/online/nnet3/prepare_online_decoding.sh \ + --mfcc-config conf/mfcc_hires.conf \ + $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online + + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + nspk=$(wc -l $lang/topo + fi +fi + +lat_dir_clean=exp/chain${nnet3_affix}/${gmm}_${train_set_clean}_sp_lats +lat_dir_noisy=exp/chain${nnet3_affix}/${gmm}_${train_set_noisy}_sp_lats +lat_dir=exp/chain${nnet3_affix}/${gmm}_${combined_train_set}_sp_rvb_lats + +if [ $stage -le 11 ]; then + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ + data/${train_set_noisy}_sp data/lang $gmm_dir $lat_dir_noisy || exit 1 + rm $lat_dir_noisy/fsts.*.gz +fi + +if [ $stage -le 12 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ + data/${train_set_clean}_sp data/lang $gmm_dir $lat_dir_clean || exit 1 + rm $lat_dir_clean/fsts.*.gz +fi + +if [ $stage -le 13 ]; then + local/reverberate_lat_dir.sh --cmd "$train_cmd" --num-data-reps 2 \ + data/${combined_train_set}_sp_rvb_hires $lat_dir_noisy \ + $lat_dir_clean $lat_dir || exit 1 +fi + +if [ $stage -le 14 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. The num-leaves is always somewhat less than the num-leaves from + # the GMM baseline. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh \ + --frame-subsampling-factor 3 \ + --context-opts "--context-width=2 --central-position=1" \ + --cmd "$train_cmd" 3500 data/${train_set_clean}_sp \ + $lang $lat_dir_clean $tree_dir +fi + +if [ $stage -le 15 ]; then + mkdir -p $dir + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + + lstm_opts="decay-time=40" + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + + # the first splicing is moved before the lda layer, so no splicing here + relu-batchnorm-layer name=tdnn1 dim=$hidden_dim + relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1) dim=$hidden_dim + relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1) dim=$hidden_dim + + fast-lstmp-layer name=lstm1 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=$hidden_dim + relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=$hidden_dim + fast-lstmp-layer name=lstm2 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn6 input=Append(-3,0,3) dim=$hidden_dim + relu-batchnorm-layer name=tdnn7 input=Append(-3,0,3) dim=$hidden_dim + fast-lstmp-layer name=lstm3 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn8 input=Append(-3,0,3) dim=$hidden_dim + relu-batchnorm-layer name=tdnn9 input=Append(-3,0,3) dim=$hidden_dim + fast-lstmp-layer name=lstm4 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts + + ## adding the layers for chain branch + output-layer name=output input=lstm4 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5 + + # adding the layers for xent branch + # This block prints the configs for a separate output that will be + # trained with a cross-entropy objective in the 'chain' models... this + # has the effect of regularizing the hidden parts of the model. we use + # 0.5 / args.xent_regularize as the learning rate factor- the factor of + # 0.5 / args.xent_regularize is suitable as it means the xent + # final-layer learns at a rate independent of the regularization + # constant; and the 0.5 was tuned so as to make the relative progress + # similar in the xent and regular final layers. + output-layer name=output-xent input=lstm4 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 + +EOF + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 16 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + fi + + mkdir -p $dir/egs + touch $dir/egs/.nodelete # keep egs around when that run dies. + + steps/nnet3/chain/train.py --stage=$train_stage \ + --cmd="$train_cmd --mem 4G" \ + --feat.online-ivector-dir=$train_ivector_dir \ + --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient=0.1 \ + --chain.l2-regularize=0.00005 \ + --chain.apply-deriv-weights=false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --trainer.dropout-schedule $dropout_schedule \ + --trainer.num-chunk-per-minibatch 64,32 \ + --trainer.frames-per-iter 1500000 \ + --trainer.max-param-change 2.0 \ + --trainer.num-epochs $num_epochs \ + --trainer.srand=$srand \ + --trainer.optimization.shrink-value 0.99 \ + --trainer.optimization.num-jobs-initial=3 \ + --trainer.optimization.num-jobs-final=16 \ + --trainer.optimization.initial-effective-lrate=0.001 \ + --trainer.optimization.final-effective-lrate=0.0001 \ + --trainer.optimization.momentum=0.0 \ + --trainer.deriv-truncate-margin 8 \ + --egs.stage $get_egs_stage \ + --egs.opts="--frames-overlap-per-eg 0" \ + --egs.chunk-width=$chunk_width \ + --egs.chunk-left-context=$chunk_left_context \ + --egs.chunk-right-context=$chunk_right_context \ + --egs.chunk-left-context-initial=0 \ + --egs.chunk-right-context-final=0 \ + --egs.dir="$common_egs_dir" \ + --cleanup.remove-egs=$remove_egs \ + --feat-dir=$train_data_dir \ + --tree-dir=$tree_dir \ + --lat-dir=$lat_dir \ + --dir=$dir || exit 1; +fi + +if [ $stage -le 17 ]; then + # Note: it's not important to give mkgraph.sh the lang directory with the + # matched topology (since it gets the topology file from the model). + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; +fi + +if [ -z "$extra_left_context" ]; then + extra_left_context=$chunk_left_context +fi +if [ -z "$extra_right_context" ]; then + extra_right_context=$chunk_right_context +fi + +if [ $stage -le 18 ]; then + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + steps/nnet3/decode.sh \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --extra-left-context $extra_left_context \ + --extra-right-context $extra_right_context \ + --extra-left-context-initial 0 \ + --extra-right-context-final 0 \ + --frames-per-chunk $frames_per_chunk \ + --nj $decode_nj --cmd "$decode_cmd" --num-threads 4 \ + --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ + $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +# Not testing the 'looped' decoding separately, because for +# TDNN systems it would give exactly the same results as the +# normal decoding. + +if $test_online_decoding && [ $stage -le 19 ]; then + # note: if the features change (e.g. you add pitch features), you will have to + # change the options of the following command line. + steps/online/nnet3/prepare_online_decoding.sh \ + --mfcc-config conf/mfcc_hires.conf \ + $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online + + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + nspk=$(wc -l $lang/topo + fi +fi + +if [ $stage -le 11 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" ${lores_train_data_dir} \ + data/lang $gmm_dir $lat_dir + rm $lat_dir/fsts.*.gz # save space +fi + +if [ $stage -le 12 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. The num-leaves is always somewhat less than the num-leaves from + # the GMM baseline. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh \ + --frame-subsampling-factor 3 \ + --context-opts "--context-width=2 --central-position=1" \ + --cmd "$train_cmd" 3500 ${lores_train_data_dir} \ + $lang $ali_dir $tree_dir +fi + +if [ $stage -le 13 ]; then + mkdir -p $dir + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + + lstm_opts="decay-time=40" + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + idct-layer name=idct input=input dim=40 cepstral-lifter=22 affine-transform-file=$dir/configs/idct.mat + + conv-relu-batchnorm-layer name=cnn1 input=idct height-in=40 height-out=20 height-subsample-out=2 time-offsets=-1,0,1 height-offsets=-1,0,1 num-filters-out=256 learning-rate-factor=0.333 max-change=0.25 + conv-relu-batchnorm-layer name=cnn2 input=cnn1 height-in=20 height-out=20 time-offsets=-1,0,1 height-offsets=-1,0,1 num-filters-out=128 + + relu-batchnorm-layer name=affine1 input=lda dim=512 + + # the first splicing is moved before the lda layer, so no splicing here + relu-batchnorm-layer name=tdnn1 input=cnn2 dim=1024 + relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1,affine1) dim=1024 + relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1) dim=1024 + + # check steps/libs/nnet3/xconfig/lstm.py for the other options and defaults + fast-lstmp-layer name=lstm1 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=1024 + relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=1024 + relu-batchnorm-layer name=tdnn6 input=Append(-3,0,3) dim=1024 + fast-lstmp-layer name=lstm2 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn7 input=Append(-3,0,3) dim=1024 + relu-batchnorm-layer name=tdnn8 input=Append(-3,0,3) dim=1024 + relu-batchnorm-layer name=tdnn9 input=Append(-3,0,3) dim=1024 + fast-lstmp-layer name=lstm3 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts + + ## adding the layers for chain branch + output-layer name=output input=lstm3 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5 + + # adding the layers for xent branch + # This block prints the configs for a separate output that will be + # trained with a cross-entropy objective in the 'chain' models... this + # has the effect of regularizing the hidden parts of the model. we use + # 0.5 / args.xent_regularize as the learning rate factor- the factor of + # 0.5 / args.xent_regularize is suitable as it means the xent + # final-layer learns at a rate independent of the regularization + # constant; and the 0.5 was tuned so as to make the relative progress + # similar in the xent and regular final layers. + output-layer name=output-xent input=lstm3 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 + +EOF + + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 14 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + fi + + mkdir -p $dir/egs + touch $dir/egs/.nodelete # keep egs around when that run dies. + + steps/nnet3/chain/train.py --stage=$train_stage \ + --cmd="$train_cmd --mem 4G" \ + --feat.online-ivector-dir=$train_ivector_dir \ + --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient=0.1 \ + --chain.l2-regularize=0.00005 \ + --chain.apply-deriv-weights=false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --trainer.dropout-schedule $dropout_schedule \ + --trainer.num-chunk-per-minibatch 64,32 \ + --trainer.frames-per-iter 1500000 \ + --trainer.max-param-change 2.0 \ + --trainer.num-epochs $num_epochs \ + --trainer.srand=$srand \ + --trainer.optimization.shrink-value 0.99 \ + --trainer.optimization.num-jobs-initial=3 \ + --trainer.optimization.num-jobs-final=16 \ + --trainer.optimization.initial-effective-lrate=0.001 \ + --trainer.optimization.final-effective-lrate=0.0001 \ + --trainer.optimization.momentum=0.0 \ + --trainer.deriv-truncate-margin 8 \ + --egs.stage $get_egs_stage \ + --egs.opts="--frames-overlap-per-eg 0" \ + --egs.chunk-width=$chunk_width \ + --egs.chunk-left-context=$chunk_left_context \ + --egs.chunk-right-context=$chunk_right_context \ + --egs.chunk-left-context-initial=0 \ + --egs.chunk-right-context-final=0 \ + --egs.dir="$common_egs_dir" \ + --cleanup.remove-egs=$remove_egs \ + --feat-dir=$train_data_dir \ + --tree-dir=$tree_dir \ + --lat-dir=$lat_dir \ + --dir=$dir || exit 1; +fi + +if [ $stage -le 15 ]; then + # Note: it's not important to give mkgraph.sh the lang directory with the + # matched topology (since it gets the topology file from the model). + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; +fi + +if [ $stage -le 16 ]; then + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + steps/nnet3/decode.sh \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --extra-left-context $chunk_left_context \ + --extra-right-context $chunk_right_context \ + --extra-left-context-initial 0 \ + --extra-right-context-final 0 \ + --frames-per-chunk $frames_per_chunk \ + --nj 8 --cmd "$decode_cmd" --num-threads 4 \ + --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ + $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +# Not testing the 'looped' decoding separately, because for +# TDNN systems it would give exactly the same results as the +# normal decoding. + +if $test_online_decoding && [ $stage -le 17 ]; then + # note: if the features change (e.g. you add pitch features), you will have to + # change the options of the following command line. + steps/online/nnet3/prepare_online_decoding.sh \ + --mfcc-config conf/mfcc_hires.conf \ + $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online + + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + nspk=$(wc -l $lang/topo + fi +fi + +if [ $stage -le 11 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" ${lores_train_data_dir} \ + data/lang $gmm_dir $lat_dir + rm $lat_dir/fsts.*.gz # save space +fi + +if [ $stage -le 12 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. The num-leaves is always somewhat less than the num-leaves from + # the GMM baseline. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh \ + --frame-subsampling-factor 3 \ + --context-opts "--context-width=2 --central-position=1" \ + --cmd "$train_cmd" 3500 ${lores_train_data_dir} \ + $lang $ali_dir $tree_dir +fi + + +if [ $stage -le 13 ]; then + mkdir -p $dir + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + opts="l2-regularize=0.05" + output_opts="l2-regularize=0.01 bottleneck-dim=320" + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + + # the first splicing is moved before the lda layer, so no splicing here + relu-batchnorm-layer name=tdnn1 $opts dim=512 + relu-batchnorm-layer name=tdnn2 $opts dim=512 input=Append(-1,0,1) + relu-batchnorm-layer name=tdnn3 $opts dim=512 + relu-batchnorm-layer name=tdnn4 $opts dim=512 input=Append(-1,0,1) + relu-batchnorm-layer name=tdnn5 $opts dim=512 + relu-batchnorm-layer name=tdnn6 $opts dim=512 input=Append(-3,0,3) + relu-batchnorm-layer name=tdnn7 $opts dim=512 input=Append(-3,0,3) + relu-batchnorm-layer name=tdnn8 $opts dim=512 input=Append(-6,-3,0) + + ## adding the layers for chain branch + relu-batchnorm-layer name=prefinal-chain $opts dim=512 target-rms=0.5 + output-layer name=output include-log-softmax=false $output_opts dim=$num_targets max-change=1.5 + + # adding the layers for xent branch + # This block prints the configs for a separate output that will be + # trained with a cross-entropy objective in the 'chain' models... this + # has the effect of regularizing the hidden parts of the model. we use + # 0.5 / args.xent_regularize as the learning rate factor- the factor of + # 0.5 / args.xent_regularize is suitable as it means the xent + # final-layer learns at a rate independent of the regularization + # constant; and the 0.5 was tuned so as to make the relative progress + # similar in the xent and regular final layers. + relu-batchnorm-layer name=prefinal-xent input=tdnn8 $opts dim=512 target-rms=0.5 + output-layer name=output-xent $output_opts dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 +EOF + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 14 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + fi + + steps/nnet3/chain/train.py --stage=$train_stage \ + --cmd="$decode_cmd" \ + --feat.online-ivector-dir=$train_ivector_dir \ + --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient=0.1 \ + --chain.l2-regularize=0.00005 \ + --chain.apply-deriv-weights=false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --trainer.srand=$srand \ + --trainer.max-param-change=2.0 \ + --trainer.num-epochs=10 \ + --trainer.frames-per-iter=3000000 \ + --trainer.optimization.num-jobs-initial=2 \ + --trainer.optimization.num-jobs-final=4 \ + --trainer.optimization.initial-effective-lrate=0.001 \ + --trainer.optimization.final-effective-lrate=0.0001 \ + --trainer.optimization.shrink-value=1.0 \ + --trainer.num-chunk-per-minibatch=256,128,64 \ + --trainer.optimization.momentum=0.0 \ + --egs.chunk-width=$chunk_width \ + --egs.chunk-left-context=$chunk_left_context \ + --egs.chunk-right-context=$chunk_right_context \ + --egs.chunk-left-context-initial=0 \ + --egs.chunk-right-context-final=0 \ + --egs.dir="$common_egs_dir" \ + --egs.opts="--frames-overlap-per-eg 0" \ + --cleanup.remove-egs=$remove_egs \ + --use-gpu=true \ + --feat-dir=$train_data_dir \ + --tree-dir=$tree_dir \ + --lat-dir=$lat_dir \ + --dir=$dir || exit 1; +fi + +if [ $stage -le 15 ]; then + # Note: it's not important to give mkgraph.sh the lang directory with the + # matched topology (since it gets the topology file from the model). + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; +fi + +if [ $stage -le 16 ]; then + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + steps/nnet3/decode.sh \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --frames-per-chunk $frames_per_chunk \ + --nj 8 --cmd "$decode_cmd" --num-threads 4 \ + --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ + $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +# Not testing the 'looped' decoding separately, because for +# TDNN systems it would give exactly the same results as the +# normal decoding. + +if $test_online_decoding && [ $stage -le 17 ]; then + # note: if the features change (e.g. you add pitch features), you will have to + # change the options of the following command line. + steps/online/nnet3/prepare_online_decoding.sh \ + --mfcc-config conf/mfcc_hires.conf \ + $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online + + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + nspk=$(wc -l 2776 combine=-0.134->-0.133 (over 3) xent:train/valid[285,428,final]=(-2.37,-1.95,-1.95/-2.19,-1.90,-1.91) logprob:train/valid[285,428,final]=(-0.201,-0.125,-0.124/-0.198,-0.147,-0.148) + +set -e + +# configs for 'chain' +stage=0 +nj=96 +train_set=train_worn_u400k +test_sets="dev_worn dev_beamformit_ref" +gmm=tri3 +nnet3_affix=_train_worn_u400k +lm_suffix= + +# The rest are configs specific to this script. Most of the parameters +# are just hardcoded at this level, in the commands below. +affix=1b # affix for the TDNN directory name +tree_affix= +train_stage=-10 +get_egs_stage=-10 +decode_iter= + +num_epochs=4 +common_egs_dir= +# training options +# training chunk-options +chunk_width=140,100,160 +xent_regularize=0.1 +dropout_schedule='0,0@0.20,0.5@0.50,0' + +# training options +srand=0 +remove_egs=true + +#decode options +test_online_decoding=false # if true, it will run the last decoding stage. + +# End configuration section. +echo "$0 $@" # Print the command line for logging + +. ./cmd.sh +. ./path.sh +. ./utils/parse_options.sh + +if ! cuda-compiled; then + cat <$lang/topo + fi +fi + +if [ $stage -le 11 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ + ${lores_train_data_dir} \ + data/lang $gmm_dir $lat_dir + rm $lat_dir/fsts.*.gz # save space +fi + +if [ $stage -le 12 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. The num-leaves is always somewhat less than the num-leaves from + # the GMM baseline. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh \ + --frame-subsampling-factor 3 \ + --cmd "$train_cmd" 3500 ${lores_train_data_dir} \ + $lang $lat_dir $tree_dir +fi + +if [ $stage -le 13 ]; then + mkdir -p $dir + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + affine_opts="l2-regularize=0.01 dropout-proportion=0.0 dropout-per-dim=true dropout-per-dim-continuous=true" + tdnnf_opts="l2-regularize=0.01 dropout-proportion=0.0 bypass-scale=0.66" + linear_opts="l2-regularize=0.01 orthonormal-constraint=-1.0" + prefinal_opts="l2-regularize=0.01" + output_opts="l2-regularize=0.002" + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + + # the first splicing is moved before the lda layer, so no splicing here + relu-batchnorm-dropout-layer name=tdnn1 $affine_opts dim=1536 + tdnnf-layer name=tdnnf2 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1 + tdnnf-layer name=tdnnf3 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1 + tdnnf-layer name=tdnnf4 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1 + tdnnf-layer name=tdnnf5 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=0 + tdnnf-layer name=tdnnf6 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf7 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf8 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf9 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf10 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf11 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf12 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf13 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf14 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + tdnnf-layer name=tdnnf15 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 + linear-component name=prefinal-l dim=256 $linear_opts + + prefinal-layer name=prefinal-chain input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256 + output-layer name=output include-log-softmax=false dim=$num_targets $output_opts + + prefinal-layer name=prefinal-xent input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256 + output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor $output_opts +EOF + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 14 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + fi + + steps/nnet3/chain/train.py --stage $train_stage \ + --cmd "$train_cmd --mem 4G" \ + --feat.online-ivector-dir=$train_ivector_dir \ + --feat.cmvn-opts "--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient 0.1 \ + --chain.l2-regularize 0.0 \ + --chain.apply-deriv-weights false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --trainer.dropout-schedule "$dropout_schedule" \ + --trainer.add-option="--optimization.memory-compression-level=2" \ + --egs.dir "$common_egs_dir" \ + --egs.stage $get_egs_stage \ + --egs.opts "--frames-overlap-per-eg 0" \ + --egs.chunk-width $chunk_width \ + --trainer.num-chunk-per-minibatch 64 \ + --trainer.frames-per-iter 1500000 \ + --trainer.num-epochs $num_epochs \ + --trainer.optimization.num-jobs-initial 3 \ + --trainer.optimization.num-jobs-final 16 \ + --trainer.optimization.initial-effective-lrate 0.00025 \ + --trainer.optimization.final-effective-lrate 0.000025 \ + --trainer.max-param-change 2.0 \ + --cleanup.remove-egs $remove_egs \ + --feat-dir=$train_data_dir \ + --tree-dir=$tree_dir \ + --lat-dir=$lat_dir \ + --dir $dir || exit 1; + +fi + +if [ $stage -le 15 ]; then + # Note: it's not important to give mkgraph.sh the lang directory with the + # matched topology (since it gets the topology file from the model). + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; +fi + +if [ $stage -le 16 ]; then + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + steps/nnet3/decode.sh \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --frames-per-chunk $frames_per_chunk \ + --nj 8 --cmd "$decode_cmd" --num-threads 4 \ + --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ + $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +exit 0; diff --git a/egs/chime6/s5_track1/local/chain/tuning/run_tdnn_lstm_1a.sh b/egs/chime6/s5_track1/local/chain/tuning/run_tdnn_lstm_1a.sh new file mode 100755 index 00000000000..e3d8e6ac4dc --- /dev/null +++ b/egs/chime6/s5_track1/local/chain/tuning/run_tdnn_lstm_1a.sh @@ -0,0 +1,297 @@ +#!/bin/bash + +# Set -e here so that we catch if any executable fails immediately +set -euo pipefail + +# First the options that are passed through to run_ivector_common.sh +# (some of which are also used in this script directly). +stage=0 +nj=96 +train_set=train_worn_u400k_cleaned +test_sets="dev_worn dev_beamformit_ref" +gmm=tri3_cleaned +nnet3_affix=_train_worn_u400k_cleaned +lm_suffix= + +# The rest are configs specific to this script. Most of the parameters +# are just hardcoded at this level, in the commands below. +affix=_1a # affix for the TDNN directory name +tree_affix= +train_stage=-10 +get_egs_stage=-10 +decode_iter= + +common_egs_dir= + +hidden_dim=1024 +cell_dim=1024 +projection_dim=256 + +# training options +num_epochs=2 # 2 works better than 4 +chunk_width=140,100,160 +chunk_left_context=40 +chunk_right_context=0 +dropout_schedule='0,0@0.20,0.3@0.50,0' +xent_regularize=0.025 +label_delay=5 + +# decode options +extra_left_context=50 +extra_right_context=0 + +# training options +srand=0 +remove_egs=true + +#decode options +test_online_decoding=false # if true, it will run the last decoding stage. + + +# End configuration section. +echo "$0 $@" # Print the command line for logging + +. ./cmd.sh +. ./path.sh +. ./utils/parse_options.sh + +if ! cuda-compiled; then + cat <$lang/topo + fi +fi + +if [ $stage -le 11 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" ${lores_train_data_dir} \ + data/lang $gmm_dir $lat_dir + rm $lat_dir/fsts.*.gz # save space +fi + +if [ $stage -le 12 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. The num-leaves is always somewhat less than the num-leaves from + # the GMM baseline. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh \ + --frame-subsampling-factor 3 \ + --context-opts "--context-width=2 --central-position=1" \ + --cmd "$train_cmd" 3500 ${lores_train_data_dir} \ + $lang $ali_dir $tree_dir +fi + +if [ $stage -le 13 ]; then + mkdir -p $dir + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + + lstm_opts="decay-time=40" + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + + # the first splicing is moved before the lda layer, so no splicing here + relu-batchnorm-layer name=tdnn1 dim=$hidden_dim + relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1) dim=$hidden_dim + relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1) dim=$hidden_dim + + fast-lstmp-layer name=lstm1 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=$hidden_dim + relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=$hidden_dim + fast-lstmp-layer name=lstm2 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn6 input=Append(-3,0,3) dim=$hidden_dim + relu-batchnorm-layer name=tdnn7 input=Append(-3,0,3) dim=$hidden_dim + fast-lstmp-layer name=lstm3 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts + relu-batchnorm-layer name=tdnn8 input=Append(-3,0,3) dim=$hidden_dim + relu-batchnorm-layer name=tdnn9 input=Append(-3,0,3) dim=$hidden_dim + fast-lstmp-layer name=lstm4 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts + + ## adding the layers for chain branch + output-layer name=output input=lstm4 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5 + + # adding the layers for xent branch + # This block prints the configs for a separate output that will be + # trained with a cross-entropy objective in the 'chain' models... this + # has the effect of regularizing the hidden parts of the model. we use + # 0.5 / args.xent_regularize as the learning rate factor- the factor of + # 0.5 / args.xent_regularize is suitable as it means the xent + # final-layer learns at a rate independent of the regularization + # constant; and the 0.5 was tuned so as to make the relative progress + # similar in the xent and regular final layers. + output-layer name=output-xent input=lstm4 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 + +EOF + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 14 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage + fi + + mkdir -p $dir/egs + touch $dir/egs/.nodelete # keep egs around when that run dies. + + steps/nnet3/chain/train.py --stage=$train_stage \ + --cmd="$train_cmd --mem 4G" \ + --feat.online-ivector-dir=$train_ivector_dir \ + --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient=0.1 \ + --chain.l2-regularize=0.00005 \ + --chain.apply-deriv-weights=false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --trainer.dropout-schedule $dropout_schedule \ + --trainer.num-chunk-per-minibatch 64,32 \ + --trainer.frames-per-iter 1500000 \ + --trainer.max-param-change 2.0 \ + --trainer.num-epochs $num_epochs \ + --trainer.srand=$srand \ + --trainer.optimization.shrink-value 0.99 \ + --trainer.optimization.num-jobs-initial=3 \ + --trainer.optimization.num-jobs-final=16 \ + --trainer.optimization.initial-effective-lrate=0.001 \ + --trainer.optimization.final-effective-lrate=0.0001 \ + --trainer.optimization.momentum=0.0 \ + --trainer.deriv-truncate-margin 8 \ + --egs.stage $get_egs_stage \ + --egs.opts="--frames-overlap-per-eg 0" \ + --egs.chunk-width=$chunk_width \ + --egs.chunk-left-context=$chunk_left_context \ + --egs.chunk-right-context=$chunk_right_context \ + --egs.chunk-left-context-initial=0 \ + --egs.chunk-right-context-final=0 \ + --egs.dir="$common_egs_dir" \ + --cleanup.remove-egs=$remove_egs \ + --feat-dir=$train_data_dir \ + --tree-dir=$tree_dir \ + --lat-dir=$lat_dir \ + --dir=$dir || exit 1; +fi + +if [ $stage -le 15 ]; then + # Note: it's not important to give mkgraph.sh the lang directory with the + # matched topology (since it gets the topology file from the model). + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; +fi + +if [ $stage -le 16 ]; then + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + steps/nnet3/decode.sh \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --extra-left-context $chunk_left_context \ + --extra-right-context $chunk_right_context \ + --extra-left-context-initial 0 \ + --extra-right-context-final 0 \ + --frames-per-chunk $frames_per_chunk \ + --nj 8 --cmd "$decode_cmd" --num-threads 4 \ + --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ + $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +# Not testing the 'looped' decoding separately, because for +# TDNN systems it would give exactly the same results as the +# normal decoding. + +if $test_online_decoding && [ $stage -le 17 ]; then + # note: if the features change (e.g. you add pitch features), you will have to + # change the options of the following command line. + steps/online/nnet3/prepare_online_decoding.sh \ + --mfcc-config conf/mfcc_hires.conf \ + $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online + + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + nspk=$(wc -l ) + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED +# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, +# MERCHANTABLITY OR NON-INFRINGEMENT. +# See the Apache 2 License for the specific language governing permissions and +# limitations under the License. + +[ -f ./path.sh ] && . ./path.sh + +command -v uconv &>/dev/null \ + || { echo >&2 "uconv not found on PATH. You will have to install ICU4C"; exit 1; } + +command -v ngram &>/dev/null \ + || { echo >&2 "srilm not found on PATH. Please use the script $KALDI_ROOT/tools/extras/install_srilm.sh to install it"; exit 1; } + +if [ -z ${LIBLBFGS} ]; then + echo >&2 "SRILM is not compiled with the support of MaxEnt models." + echo >&2 "You should use the script in \$KALDI_ROOT/tools/install_srilm.sh" + echo >&2 "which will take care of compiling the SRILM with MaxEnt support" + exit 1; +fi + +sox=`command -v sox 2>/dev/null` \ + || { echo >&2 "sox not found on PATH. Please install it manually (you will need version 14.4.0 and higher)."; exit 1; } + +# If sox is found on path, check if the version is correct +if [ ! -z "$sox" ]; then + sox_version=`$sox --version 2>&1| head -1 | sed -e 's?.*: ??' -e 's?.* ??'` + if [[ ! $sox_version =~ v14.4.* ]]; then + echo "Unsupported sox version $sox_version found on path. You will need version v14.4.0 and higher." + exit 1 + fi +fi + +command -v phonetisaurus-align &>/dev/null \ + || { echo >&2 "Phonetisaurus not found on PATH. Please use the script $KALDI_ROOT/tools/extras/install_phonetisaurus.sh to install it"; exit 1; } + +command -v BeamformIt &>/dev/null \ + || { echo >&2 "BeamformIt not found on PATH. Please use the script $KALDI_ROOT/tools/extras/install_beamformit.sh to install it"; exit 1; } + +miniconda_dir=$HOME/miniconda3/ +if [ ! -d $miniconda_dir ]; then + echo "$miniconda_dir does not exist. Please run '../../../tools/extras/install_miniconda.sh'" +fi + +# check if WPE is installed +result=`$miniconda_dir/bin/python -c "\ +try: + import nara_wpe + print('1') +except ImportError: + print('0')"` + +if [ "$result" == "1" ]; then + echo "WPE is installed" +else + echo "WPE is not installed. Please run ../../../tools/extras/install_wpe.sh" + exit 1 +fi + +exit 0 diff --git a/egs/chime6/s5_track1/local/copy_lat_dir_parallel.sh b/egs/chime6/s5_track1/local/copy_lat_dir_parallel.sh new file mode 100755 index 00000000000..82839604c9e --- /dev/null +++ b/egs/chime6/s5_track1/local/copy_lat_dir_parallel.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +cmd=queue.pl +nj=40 +stage=0 +speed_perturb=true + +. ./path.sh +. utils/parse_options.sh + +if [ $# -ne 4 ]; then + echo "Usage: $0 " + exit 1 +fi + +utt_map=$1 +data=$2 +srcdir=$3 +dir=$4 + +mkdir -p $dir + +cp $srcdir/{phones.txt,tree,final.mdl} $dir || exit 1 +cp $srcdir/{final.alimdl,final.occs,splice_opts,cmvn_opts,delta_opts,final.mat,full.mat} 2>/dev/null || true + +nj_src=$(cat $srcdir/num_jobs) || exit 1 + +if [ $stage -le 1 ]; then + $cmd JOB=1:$nj_src $dir/log/copy_lats_orig.JOB.log \ + lattice-copy "ark:gunzip -c $srcdir/lat.JOB.gz |" \ + ark,scp:$dir/lat_orig.JOB.ark,$dir/lat_orig.JOB.scp || exit 1 +fi + +for n in $(seq $nj_src); do + cat $dir/lat_orig.$n.scp +done > $dir/lat_orig.scp || exit 1 + +if $speed_perturb; then + for s in 0.9 1.1; do + awk -v s=$s '{print "sp"s"-"$1" sp"s"-"$2}' $utt_map + done | cat - $utt_map | sort -k1,1 > $dir/utt_map + utt_map=$dir/utt_map +fi + +if [ $stage -le 2 ]; then + utils/filter_scp.pl -f 2 $dir/lat_orig.scp < $utt_map | \ + utils/apply_map.pl -f 2 $dir/lat_orig.scp > \ + $dir/lat.scp || exit 1 + + if [ ! -s $dir/lat.scp ]; then + echo "$0: $dir/lat.scp is empty. Something went wrong!" + exit 1 + fi +fi + +utils/split_data.sh $data $nj + +if [ $stage -le 3 ]; then + $cmd JOB=1:$nj $dir/log/copy_lats.JOB.log \ + lattice-copy "scp:utils/filter_scp.pl $data/split$nj/JOB/utt2spk $dir/lat.scp |" \ + "ark:|gzip -c > $dir/lat.JOB.gz" || exit 1 +fi + +echo $nj > $dir/num_jobs + +if [ -f $srcdir/ali.1.gz ]; then + if [ $stage -le 4 ]; then + $cmd JOB=1:$nj_src $dir/log/copy_ali_orig.JOB.log \ + copy-int-vector "ark:gunzip -c $srcdir/ali.JOB.gz |" \ + ark,scp:$dir/ali_orig.JOB.ark,$dir/ali_orig.JOB.scp || exit 1 + fi + + for n in $(seq $nj_src); do + cat $dir/ali_orig.$n.scp + done > $dir/ali_orig.scp || exit 1 + + if [ $stage -le 5 ]; then + utils/filter_scp.pl -f 2 $dir/ali_orig.scp < $utt_map | \ + utils/apply_map.pl -f 2 $dir/ali_orig.scp > \ + $dir/ali.scp || exit 1 + + if [ ! -s $dir/ali.scp ]; then + echo "$0: $dir/ali.scp is empty. Something went wrong!" + exit 1 + fi + fi + + utils/split_data.sh $data $nj + + if [ $stage -le 6 ]; then + $cmd JOB=1:$nj $dir/log/copy_ali.JOB.log \ + copy-int-vector "scp:utils/filter_scp.pl $data/split$nj/JOB/utt2spk $dir/ali.scp |" \ + "ark:|gzip -c > $dir/ali.JOB.gz" || exit 1 + fi +fi + +rm $dir/lat_orig.*.{ark,scp} $dir/ali_orig.*.{ark,scp} 2>/dev/null || true diff --git a/egs/chime6/s5_track1/local/distant_audio_list b/egs/chime6/s5_track1/local/distant_audio_list new file mode 100644 index 00000000000..fc7aff15cd0 --- /dev/null +++ b/egs/chime6/s5_track1/local/distant_audio_list @@ -0,0 +1,376 @@ +S03_U01.CH1 +S03_U01.CH2 +S03_U01.CH3 +S03_U01.CH4 +S03_U02.CH1 +S03_U02.CH2 +S03_U02.CH3 +S03_U02.CH4 +S03_U03.CH1 +S03_U03.CH2 +S03_U03.CH3 +S03_U03.CH4 +S03_U04.CH1 +S03_U04.CH2 +S03_U04.CH3 +S03_U04.CH4 +S03_U05.CH1 +S03_U05.CH2 +S03_U05.CH3 +S03_U05.CH4 +S03_U06.CH1 +S03_U06.CH2 +S03_U06.CH3 +S03_U06.CH4 +S04_U01.CH1 +S04_U01.CH2 +S04_U01.CH3 +S04_U01.CH4 +S04_U02.CH1 +S04_U02.CH2 +S04_U02.CH3 +S04_U02.CH4 +S04_U03.CH1 +S04_U03.CH2 +S04_U03.CH3 +S04_U03.CH4 +S04_U04.CH1 +S04_U04.CH2 +S04_U04.CH3 +S04_U04.CH4 +S04_U05.CH1 +S04_U05.CH2 +S04_U05.CH3 +S04_U05.CH4 +S04_U06.CH1 +S04_U06.CH2 +S04_U06.CH3 +S04_U06.CH4 +S05_U01.CH1 +S05_U01.CH2 +S05_U01.CH3 +S05_U01.CH4 +S05_U02.CH1 +S05_U02.CH2 +S05_U02.CH3 +S05_U02.CH4 +S05_U04.CH1 +S05_U04.CH2 +S05_U04.CH3 +S05_U04.CH4 +S05_U05.CH1 +S05_U05.CH2 +S05_U05.CH3 +S05_U05.CH4 +S05_U06.CH1 +S05_U06.CH2 +S05_U06.CH3 +S05_U06.CH4 +S06_U01.CH1 +S06_U01.CH2 +S06_U01.CH3 +S06_U01.CH4 +S06_U02.CH1 +S06_U02.CH2 +S06_U02.CH3 +S06_U02.CH4 +S06_U03.CH1 +S06_U03.CH2 +S06_U03.CH3 +S06_U03.CH4 +S06_U04.CH1 +S06_U04.CH2 +S06_U04.CH3 +S06_U04.CH4 +S06_U05.CH1 +S06_U05.CH2 +S06_U05.CH3 +S06_U05.CH4 +S06_U06.CH1 +S06_U06.CH2 +S06_U06.CH3 +S06_U06.CH4 +S07_U01.CH1 +S07_U01.CH2 +S07_U01.CH3 +S07_U01.CH4 +S07_U02.CH1 +S07_U02.CH2 +S07_U02.CH3 +S07_U02.CH4 +S07_U03.CH1 +S07_U03.CH2 +S07_U03.CH3 +S07_U03.CH4 +S07_U04.CH1 +S07_U04.CH2 +S07_U04.CH3 +S07_U04.CH4 +S07_U05.CH1 +S07_U05.CH2 +S07_U05.CH3 +S07_U05.CH4 +S07_U06.CH1 +S07_U06.CH2 +S07_U06.CH3 +S07_U06.CH4 +S08_U01.CH1 +S08_U01.CH2 +S08_U01.CH3 +S08_U01.CH4 +S08_U02.CH1 +S08_U02.CH2 +S08_U02.CH3 +S08_U02.CH4 +S08_U03.CH1 +S08_U03.CH2 +S08_U03.CH3 +S08_U03.CH4 +S08_U04.CH1 +S08_U04.CH2 +S08_U04.CH3 +S08_U04.CH4 +S08_U05.CH1 +S08_U05.CH2 +S08_U05.CH3 +S08_U05.CH4 +S08_U06.CH1 +S08_U06.CH2 +S08_U06.CH3 +S08_U06.CH4 +S12_U01.CH1 +S12_U01.CH2 +S12_U01.CH3 +S12_U01.CH4 +S12_U02.CH1 +S12_U02.CH2 +S12_U02.CH3 +S12_U02.CH4 +S12_U03.CH1 +S12_U03.CH2 +S12_U03.CH3 +S12_U03.CH4 +S12_U04.CH1 +S12_U04.CH2 +S12_U04.CH3 +S12_U04.CH4 +S12_U05.CH1 +S12_U05.CH2 +S12_U05.CH3 +S12_U05.CH4 +S12_U06.CH1 +S12_U06.CH2 +S12_U06.CH3 +S12_U06.CH4 +S13_U01.CH1 +S13_U01.CH2 +S13_U01.CH3 +S13_U01.CH4 +S13_U02.CH1 +S13_U02.CH2 +S13_U02.CH3 +S13_U02.CH4 +S13_U03.CH1 +S13_U03.CH2 +S13_U03.CH3 +S13_U03.CH4 +S13_U04.CH1 +S13_U04.CH2 +S13_U04.CH3 +S13_U04.CH4 +S13_U05.CH1 +S13_U05.CH2 +S13_U05.CH3 +S13_U05.CH4 +S13_U06.CH1 +S13_U06.CH2 +S13_U06.CH3 +S13_U06.CH4 +S16_U01.CH1 +S16_U01.CH2 +S16_U01.CH3 +S16_U01.CH4 +S16_U02.CH1 +S16_U02.CH2 +S16_U02.CH3 +S16_U02.CH4 +S16_U03.CH1 +S16_U03.CH2 +S16_U03.CH3 +S16_U03.CH4 +S16_U04.CH1 +S16_U04.CH2 +S16_U04.CH3 +S16_U04.CH4 +S16_U05.CH1 +S16_U05.CH2 +S16_U05.CH3 +S16_U05.CH4 +S16_U06.CH1 +S16_U06.CH2 +S16_U06.CH3 +S16_U06.CH4 +S17_U01.CH1 +S17_U01.CH2 +S17_U01.CH3 +S17_U01.CH4 +S17_U02.CH1 +S17_U02.CH2 +S17_U02.CH3 +S17_U02.CH4 +S17_U03.CH1 +S17_U03.CH2 +S17_U03.CH3 +S17_U03.CH4 +S17_U04.CH1 +S17_U04.CH2 +S17_U04.CH3 +S17_U04.CH4 +S17_U05.CH1 +S17_U05.CH2 +S17_U05.CH3 +S17_U05.CH4 +S17_U06.CH1 +S17_U06.CH2 +S17_U06.CH3 +S17_U06.CH4 +S18_U01.CH1 +S18_U01.CH2 +S18_U01.CH3 +S18_U01.CH4 +S18_U02.CH1 +S18_U02.CH2 +S18_U02.CH3 +S18_U02.CH4 +S18_U03.CH1 +S18_U03.CH2 +S18_U03.CH3 +S18_U03.CH4 +S18_U04.CH1 +S18_U04.CH2 +S18_U04.CH3 +S18_U04.CH4 +S18_U05.CH1 +S18_U05.CH2 +S18_U05.CH3 +S18_U05.CH4 +S18_U06.CH1 +S18_U06.CH2 +S18_U06.CH3 +S18_U06.CH4 +S19_U01.CH1 +S19_U01.CH2 +S19_U01.CH3 +S19_U01.CH4 +S19_U02.CH1 +S19_U02.CH2 +S19_U02.CH3 +S19_U02.CH4 +S19_U03.CH1 +S19_U03.CH2 +S19_U03.CH3 +S19_U03.CH4 +S19_U04.CH1 +S19_U04.CH2 +S19_U04.CH3 +S19_U04.CH4 +S19_U05.CH1 +S19_U05.CH2 +S19_U05.CH3 +S19_U05.CH4 +S19_U06.CH1 +S19_U06.CH2 +S19_U06.CH3 +S19_U06.CH4 +S20_U01.CH1 +S20_U01.CH2 +S20_U01.CH3 +S20_U01.CH4 +S20_U02.CH1 +S20_U02.CH2 +S20_U02.CH3 +S20_U02.CH4 +S20_U03.CH1 +S20_U03.CH2 +S20_U03.CH3 +S20_U03.CH4 +S20_U04.CH1 +S20_U04.CH2 +S20_U04.CH3 +S20_U04.CH4 +S20_U05.CH1 +S20_U05.CH2 +S20_U05.CH3 +S20_U05.CH4 +S20_U06.CH1 +S20_U06.CH2 +S20_U06.CH3 +S20_U06.CH4 +S22_U01.CH1 +S22_U01.CH2 +S22_U01.CH3 +S22_U01.CH4 +S22_U02.CH1 +S22_U02.CH2 +S22_U02.CH3 +S22_U02.CH4 +S22_U04.CH1 +S22_U04.CH2 +S22_U04.CH3 +S22_U04.CH4 +S22_U05.CH1 +S22_U05.CH2 +S22_U05.CH3 +S22_U05.CH4 +S22_U06.CH1 +S22_U06.CH2 +S22_U06.CH3 +S22_U06.CH4 +S23_U01.CH1 +S23_U01.CH2 +S23_U01.CH3 +S23_U01.CH4 +S23_U02.CH1 +S23_U02.CH2 +S23_U02.CH3 +S23_U02.CH4 +S23_U03.CH1 +S23_U03.CH2 +S23_U03.CH3 +S23_U03.CH4 +S23_U04.CH1 +S23_U04.CH2 +S23_U04.CH3 +S23_U04.CH4 +S23_U05.CH1 +S23_U05.CH2 +S23_U05.CH3 +S23_U05.CH4 +S23_U06.CH1 +S23_U06.CH2 +S23_U06.CH3 +S23_U06.CH4 +S24_U01.CH1 +S24_U01.CH2 +S24_U01.CH3 +S24_U01.CH4 +S24_U02.CH1 +S24_U02.CH2 +S24_U02.CH3 +S24_U02.CH4 +S24_U03.CH1 +S24_U03.CH2 +S24_U03.CH3 +S24_U03.CH4 +S24_U04.CH1 +S24_U04.CH2 +S24_U04.CH3 +S24_U04.CH4 +S24_U05.CH1 +S24_U05.CH2 +S24_U05.CH3 +S24_U05.CH4 +S24_U06.CH1 +S24_U06.CH2 +S24_U06.CH3 +S24_U06.CH4 diff --git a/egs/chime6/s5_track1/local/extract_noises.py b/egs/chime6/s5_track1/local/extract_noises.py new file mode 100755 index 00000000000..f7b7f752d9e --- /dev/null +++ b/egs/chime6/s5_track1/local/extract_noises.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 + +import argparse +import json +import logging +import os +import sys +import scipy.io.wavfile as siw +import math +import numpy as np + + +def get_args(): + parser = argparse.ArgumentParser( + """Extract noises from the corpus based on the non-speech regions. + e.g. {} /export/corpora4/CHiME5/audio/train/ \\ + /export/corpora4/CHiME5/transcriptions/train/ \\ + /export/b05/zhiqiw/noise/""".format(sys.argv[0])) + + parser.add_argument("--segment-length", default=20) + parser.add_argument("audio_dir", help="""Location of the CHiME5 Audio files. e.g. /export/corpora4/CHiME5/audio/train/""") + parser.add_argument("trans_dir", help="""Location of the CHiME5 Transcriptions. e.g. /export/corpora4/CHiME5/transcriptions/train/""") + parser.add_argument("audio_list", help="""List of ids of the CHiME5 recordings from which noise is extracted. e.g. local/distant_audio_list""") + parser.add_argument("out_dir", help="Output directory to write noise files. e.g. /export/b05/zhiqiw/noise/") + + args = parser.parse_args() + return args + + +def Trans_time(time, fs): + units = time.split(':') + time_second = float(units[0]) * 3600 + float(units[1]) * 60 + float(units[2]) + return int(time_second*fs) + + +def Get_time(conf, tag, mic, fs): + for i in conf: + st = Trans_time(i['start_time'][mic], fs) + ed = Trans_time(i['end_time'][mic], fs) + tag[st:ed] = 0 + return tag + + +def write_noise(out_dir, seg, audio, sig, tag, fs, cnt): + sig_noise = sig[np.nonzero(tag)] + for i in range(math.floor(len(sig_noise)/(seg*fs))): + siw.write(out_dir +'/noise'+str(cnt)+'.wav', fs, sig_noise[i*seg*fs:(i+1)*seg*fs]) + cnt += 1 + return cnt + + +def main(): + args = get_args() + + if not os.path.exists(args.out_dir): + os.makedirs(args.out_dir) + + wav_list = open(args.audio_list).readlines() + + cnt = 1 + for i, audio in enumerate(wav_list): + parts = audio.strip().split('.') + if len(parts) == 2: + # Assuming distant mic with name like S03_U01.CH1 + session, mic = parts[0].split('_') + channel = parts[1] + base_name = session + "_" + mic + "." + channel + else: + # Assuming close talk mic with name like S03_P09 + session, mic = audio.strip().split('_') + base_name = session + "_" + mic + fs, sig = siw.read(args.audio_dir + "/" + base_name + '.wav') + tag = np.ones(len(sig)) + if i == 0 or session != session_p: + with open(args.trans_dir + "/" + session + '.json') as f: + conf = json.load(f) + tag = Get_time(conf, tag, mic, fs) + cnt = write_noise(args.out_dir, args.segment_length, audio, sig, tag, fs, cnt) + session_p = session + + +if __name__ == '__main__': + main() diff --git a/egs/chime6/s5_track1/local/extract_vad_weights.sh b/egs/chime6/s5_track1/local/extract_vad_weights.sh new file mode 100755 index 00000000000..250b021bd8f --- /dev/null +++ b/egs/chime6/s5_track1/local/extract_vad_weights.sh @@ -0,0 +1,86 @@ +#!/bin/bash + +# Copyright 2016 Johns Hopkins University (Author: Daniel Povey, Vijayaditya Peddinti) +# 2019 Vimal Manohar +# Apache 2.0. + +# This script converts lattices available from a first pass decode into a per-frame weights file +# The ctms generated from the lattices are filtered. Silence frames are assigned a low weight (e.g.0.00001) +# and voiced frames have a weight of 1. + +set -e + +stage=1 +cmd=run.pl +silence_weight=0.00001 +#end configuration section. + +. ./cmd.sh + +[ -f ./path.sh ] && . ./path.sh +. utils/parse_options.sh || exit 1; +if [ $# -ne 4 ]; then + echo "Usage: $0 [--cmd (run.pl|queue.pl...)] " + echo " Options:" + echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes." + exit 1; +fi + +data_dir=$1 +lang=$2 # Note: may be graph directory not lang directory, but has the necessary stuff copied. +decode_dir=$3 +output_wts_file_gz=$4 + +if [ $stage -le 1 ]; then + echo "$0: generating CTM from input lattices" + steps/get_ctm_conf.sh --cmd "$cmd" \ + --use-segments false \ + $data_dir \ + $lang \ + $decode_dir +fi + +if [ $stage -le 2 ]; then + name=`basename $data_dir` + # we just take the ctm from LMWT 10, it doesn't seem to affect the results a lot + ctm=$decode_dir/score_10/$name.ctm + echo "$0: generating weights file from ctm $ctm" + + pad_frames=0 # this did not seem to be helpful but leaving it as an option. + feat-to-len scp:$data_dir/feats.scp ark,t:- >$decode_dir/utt.lengths + if [ ! -f $ctm ]; then echo "$0: expected ctm to exist: $ctm"; exit 1; fi + + cat $ctm | awk '$6 == 1.0 && $4 < 1.0' | \ + grep -v -w mm | grep -v -w mhm | grep -v -F '[noise]' | \ + grep -v -F '[laughter]' | grep -v -F '' | \ + perl -e ' $lengths=shift @ARGV; $pad_frames=shift @ARGV; $silence_weight=shift @ARGV; + $pad_frames >= 0 || die "bad pad-frames value $pad_frames"; + open(L, "<$lengths") || die "opening lengths file"; + @all_utts = (); + $utt2ref = { }; + while () { + ($utt, $len) = split(" ", $_); + push @all_utts, $utt; + $array_ref = [ ]; + for ($n = 0; $n < $len; $n++) { ${$array_ref}[$n] = $silence_weight; } + $utt2ref{$utt} = $array_ref; + } + while () { + @A = split(" ", $_); + @A == 6 || die "bad ctm line $_"; + $utt = $A[0]; $beg = $A[2]; $len = $A[3]; + $beg_int = int($beg * 100) - $pad_frames; + $len_int = int($len * 100) + 2*$pad_frames; + $array_ref = $utt2ref{$utt}; + !defined $array_ref && die "No length info for utterance $utt"; + for ($t = $beg_int; $t < $beg_int + $len_int; $t++) { + if ($t >= 0 && $t < @$array_ref) { + ${$array_ref}[$t] = 1; + } + } + } + foreach $utt (@all_utts) { $array_ref = $utt2ref{$utt}; + print $utt, " [ ", join(" ", @$array_ref), " ]\n"; + } ' $decode_dir/utt.lengths $pad_frames $silence_weight | \ + gzip -c > $output_wts_file_gz +fi diff --git a/egs/chime6/s5_track1/local/get_location.py b/egs/chime6/s5_track1/local/get_location.py new file mode 100755 index 00000000000..5493e1b6012 --- /dev/null +++ b/egs/chime6/s5_track1/local/get_location.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +import json +from datetime import timedelta +from glob import glob +import sys, io + + +output = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') +location_dict = {} + +json_file_location= sys.argv[1] + '/*.json' +json_files = glob(json_file_location) + +for file in json_files: + with open(file, 'r') as f: + session_dict = json.load(f) + + for uttid in session_dict: + try: + ref=uttid['ref'] + speaker_id = uttid['speaker'] + location = uttid['location'] + session_id=uttid['session_id'] + words = uttid['words'] + + end_time_hh=str(uttid['end_time'][speaker_id]) + time = end_time_hh.strip().split(':') + hrs, mins, secs = float(time[0]), float(time[1]), float(time[2]) + end_time = timedelta(hours=hrs, minutes=mins, seconds=secs).total_seconds() + end_time = '{0:7.2f}'.format(end_time) + end_time = "".join(end_time.strip().split('.')) + end_time = int(end_time) + end_time = str(end_time).zfill(7) + + start_time_hh = str(uttid['start_time'][speaker_id]) + time = start_time_hh.strip().split(':') + hrs, mins, secs = float(time[0]), float(time[1]), float(time[2]) + start_time = timedelta(hours=hrs, minutes=mins, seconds=secs).total_seconds() + start_time = '{0:7.2f}'.format(start_time) + start_time = "".join(start_time.strip().split('.')) + start_time = int(start_time) + start_time =str(start_time).zfill(7) + + utt = "{0}_{1}-{2}-{3}".format(speaker_id, session_id, start_time, end_time) + location_dict[utt]=(location) + except: + continue + +for key in sorted(location_dict.keys()): + utt= "{0} {1}".format(key, location_dict[key]) + output.write(utt+ '\n') + diff --git a/egs/chime6/s5_track1/local/install_pb_chime5.sh b/egs/chime6/s5_track1/local/install_pb_chime5.sh new file mode 100755 index 00000000000..b907b5ee66e --- /dev/null +++ b/egs/chime6/s5_track1/local/install_pb_chime5.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Installs pb_chime5 +# miniconda should be installed in $HOME/miniconda3/ + +miniconda_dir=$HOME/miniconda3/ + +if [ ! -d $miniconda_dir ]; then + echo "$miniconda_dir does not exist. Please run 'tools/extras/install_miniconda.sh" && exit 1; +fi + +git clone https://github.com/fgnt/pb_chime5.git +cd pb_chime5 +# Download submodule dependencies # https://stackoverflow.com/a/3796947/5766934 +git submodule init +git submodule update + +$miniconda_dir/bin/python -m pip install cython +$miniconda_dir/bin/python -m pip install pymongo +$miniconda_dir/bin/python -m pip install -e pb_bss/ +$miniconda_dir/bin/python -m pip install -e . diff --git a/egs/chime6/s5_track1/local/json2text.py b/egs/chime6/s5_track1/local/json2text.py new file mode 100755 index 00000000000..3de66064848 --- /dev/null +++ b/egs/chime6/s5_track1/local/json2text.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 + +# Copyright 2017 Johns Hopkins University (Shinji Watanabe) +# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) + +import json +import argparse +import logging +import sys + + +def hms_to_seconds(hms): + hour = hms.split(':')[0] + minute = hms.split(':')[1] + second = hms.split(':')[2].split('.')[0] + + # .xx (10 ms order) + ms10 = hms.split(':')[2].split('.')[1] + + # total seconds + seconds = int(hour) * 3600 + int(minute) * 60 + int(second) + + return '{:07d}'.format(int(str(seconds) + ms10)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('json', type=str, help='JSON transcription file') + parser.add_argument('--mictype', type=str, + choices=['ref', 'worn', 'gss', 'u01', 'u02', 'u03', 'u04', 'u05', 'u06'], + help='Type of microphones') + args = parser.parse_args() + + # logging info + log_format = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s:%(message)s" + logging.basicConfig(level=logging.INFO, format=log_format) + + logging.debug("reading %s", args.json) + with open(args.json, 'rt', encoding="utf-8") as f: + j = json.load(f) + + for x in j: + if '[redacted]' not in x['words']: + session_id = x['session_id'] + speaker_id = x['speaker'] + if args.mictype == 'ref': + mictype = x['ref'] + elif args.mictype == 'worn' or args.mictype == 'gss': + mictype = 'original' + else: + mictype = args.mictype.upper() # convert from u01 to U01 + + # add location tag for scoring (only for dev and eval sets) + if 'location' in x.keys(): + location = x['location'].upper() + else: + location = 'NOLOCATION' + + start_time = x['start_time'][mictype] + end_time = x['end_time'][mictype] + + # remove meta chars and convert to lower + words = x['words'].replace('"', '')\ + .replace('.', '')\ + .replace('?', '')\ + .replace(',', '')\ + .replace(':', '')\ + .replace(';', '')\ + .replace('!', '').lower() + + # remove multiple spaces + words = " ".join(words.split()) + + # convert to seconds, e.g., 1:10:05.55 -> 3600 + 600 + 5.55 = 4205.55 + start_time = hms_to_seconds(start_time) + end_time = hms_to_seconds(end_time) + + uttid = speaker_id + '_' + session_id + if not args.mictype in ['worn', 'gss']: + uttid += '_' + mictype + + if args.mictype == 'gss': + uttid += '-' + start_time + '-' + end_time + else: + uttid += '_' + location + '-' + start_time + '-' + end_time + + if end_time > start_time: + sys.stdout.buffer.write((uttid + ' ' + words + '\n').encode("utf-8")) diff --git a/egs/chime6/s5_track1/local/make_noise_list.py b/egs/chime6/s5_track1/local/make_noise_list.py new file mode 100755 index 00000000000..5aaf7fa4062 --- /dev/null +++ b/egs/chime6/s5_track1/local/make_noise_list.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 + +import glob +import os +import sys + + +if len(sys.argv) != 2: + print ("Usage: {} ".format(sys.argv[0])) + raise SystemExit(1) + + +for line in glob.glob("{}/*.wav".format(sys.argv[1])): + fname = os.path.basename(line.strip()) + + print ("--noise-id {} --noise-type point-source " + "--bg-fg-type foreground {}".format(fname, line.strip())) diff --git a/egs/chime6/s5_track1/local/nnet3/compare_wer.sh b/egs/chime6/s5_track1/local/nnet3/compare_wer.sh new file mode 100755 index 00000000000..095e85cc338 --- /dev/null +++ b/egs/chime6/s5_track1/local/nnet3/compare_wer.sh @@ -0,0 +1,132 @@ +#!/bin/bash + +# this script is used for comparing decoding results between systems. +# e.g. local/chain/compare_wer.sh exp/chain/tdnn_{c,d}_sp +# For use with discriminatively trained systems you specify the epochs after a colon: +# for instance, +# local/chain/compare_wer.sh exp/chain/tdnn_c_sp exp/chain/tdnn_c_sp_smbr:{1,2,3} + + +if [ $# == 0 ]; then + echo "Usage: $0: [--looped] [--online] [ ... ]" + echo "e.g.: $0 exp/chain/tdnn_{b,c}_sp" + echo "or (with epoch numbers for discriminative training):" + echo "$0 exp/chain/tdnn_b_sp_disc:{1,2,3}" + exit 1 +fi + +echo "# $0 $*" + +include_looped=false +if [ "$1" == "--looped" ]; then + include_looped=true + shift +fi +include_online=false +if [ "$1" == "--online" ]; then + include_online=true + shift +fi + + +used_epochs=false + +# this function set_names is used to separate the epoch-related parts of the name +# [for discriminative training] and the regular parts of the name. +# If called with a colon-free directory name, like: +# set_names exp/chain/tdnn_lstm1e_sp_bi_smbr +# it will set dir=exp/chain/tdnn_lstm1e_sp_bi_smbr and epoch_infix="" +# If called with something like: +# set_names exp/chain/tdnn_d_sp_smbr:3 +# it will set dir=exp/chain/tdnn_d_sp_smbr and epoch_infix="_epoch3" + + +set_names() { + if [ $# != 1 ]; then + echo "compare_wer_general.sh: internal error" + exit 1 # exit the program + fi + dirname=$(echo $1 | cut -d: -f1) + epoch=$(echo $1 | cut -s -d: -f2) + if [ -z $epoch ]; then + epoch_infix="" + else + used_epochs=true + epoch_infix=_epoch${epoch} + fi +} + + + +echo -n "# System " +for x in $*; do printf "% 10s" " $(basename $x)"; done +echo + +strings=( + "#WER dev_clean_2 (tgsmall) " + "#WER dev_clean_2 (tglarge) ") + +for n in 0 1; do + echo -n "${strings[$n]}" + for x in $*; do + set_names $x # sets $dirname and $epoch_infix + decode_names=(tgsmall_dev_clean_2 tglarge_dev_clean_2) + + wer=$(cat $dirname/decode_${decode_names[$n]}/wer_* | utils/best_wer.sh | awk '{print $2}') + printf "% 10s" $wer + done + echo + if $include_looped; then + echo -n "# [looped:] " + for x in $*; do + set_names $x # sets $dirname and $epoch_infix + wer=$(cat $dirname/decode_looped_${decode_names[$n]}/wer_* | utils/best_wer.sh | awk '{print $2}') + printf "% 10s" $wer + done + echo + fi + if $include_online; then + echo -n "# [online:] " + for x in $*; do + set_names $x # sets $dirname and $epoch_infix + wer=$(cat ${dirname}_online/decode_${decode_names[$n]}/wer_* | utils/best_wer.sh | awk '{print $2}') + printf "% 10s" $wer + done + echo + fi +done + + +if $used_epochs; then + exit 0; # the diagnostics aren't comparable between regular and discriminatively trained systems. +fi + +echo -n "# Final train prob " +for x in $*; do + prob=$(grep Overall $x/log/compute_prob_train.{final,combined}.log 2>/dev/null | grep log-like | awk '{printf("%.4f", $8)}') + printf "% 10s" $prob +done +echo + +echo -n "# Final valid prob " +for x in $*; do + prob=$(grep Overall $x/log/compute_prob_valid.{final,combined}.log 2>/dev/null | grep log-like | awk '{printf("%.4f", $8)}') + printf "% 10s" $prob +done +echo + +echo -n "# Final train acc " +for x in $*; do + prob=$(grep Overall $x/log/compute_prob_train.{final,combined}.log 2>/dev/null | grep accuracy | awk '{printf("%.4f", $8)}') + printf "% 10s" $prob +done +echo + +echo -n "# Final valid acc " +for x in $*; do + prob=$(grep Overall $x/log/compute_prob_valid.{final,combined}.log 2>/dev/null | grep accuracy | awk '{printf("%.4f", $8)}') + printf "% 10s" $prob +done +echo + +echo diff --git a/egs/chime6/s5_track1/local/nnet3/decode.sh b/egs/chime6/s5_track1/local/nnet3/decode.sh new file mode 100755 index 00000000000..8fa54e0d4a6 --- /dev/null +++ b/egs/chime6/s5_track1/local/nnet3/decode.sh @@ -0,0 +1,164 @@ +#!/bin/bash + +# Copyright 2016 Johns Hopkins University (Author: Daniel Povey, Vijayaditya Peddinti) +# 2019 Vimal Manohar +# Apache 2.0. + +# This script does 2-stage decoding where the first stage is used to get +# reliable frames for i-vector extraction. + +set -e + +# general opts +iter= +stage=0 +nj=30 +affix= # affix for decode directory + +# ivector opts +max_count=75 # parameter for extract_ivectors.sh +sub_speaker_frames=6000 +ivector_scale=0.75 +get_weights_from_ctm=true +weights_file= # use weights from this archive (must be compressed using gunzip) +silence_weight=0.00001 # apply this weight to silence frames during i-vector extraction +ivector_dir=exp/nnet3 + +# decode opts +pass2_decode_opts="--min-active 1000" +lattice_beam=8 +extra_left_context=0 # change for (B)LSTM +extra_right_context=0 # change for BLSTM +frames_per_chunk=50 # change for (B)LSTM +acwt=0.1 # important to change this when using chain models +post_decode_acwt=1.0 # important to change this when using chain models +extra_left_context_initial=0 +extra_right_context_final=0 + +graph_affix= + +score_opts="--min-lmwt 6 --max-lmwt 13" + +. ./cmd.sh +[ -f ./path.sh ] && . ./path.sh +. utils/parse_options.sh || exit 1; + +if [ $# -ne 4 ]; then + echo "Usage: $0 [options] " + echo " Options:" + echo " --stage (0|1|2) # start scoring script from part-way through." + echo "e.g.:" + echo "$0 data/dev data/lang exp/tri5a/graph_pp exp/nnet3/tdnn" + exit 1; +fi + +data=$1 # data directory +lang=$2 # data/lang +graph=$3 #exp/tri5a/graph_pp +dir=$4 # exp/nnet3/tdnn + +model_affix=`basename $dir` +ivector_affix=${affix:+_$affix}_chain_${model_affix}${iter:+_iter$iter} +affix=${affix:+_${affix}}${iter:+_iter${iter}} + +if [ $stage -le 1 ]; then + if [ ! -s ${data}_hires/feats.scp ]; then + utils/copy_data_dir.sh $data ${data}_hires + steps/make_mfcc.sh --mfcc-config conf/mfcc_hires.conf --nj $nj --cmd "$train_cmd" ${data}_hires + steps/compute_cmvn_stats.sh ${data}_hires + utils/fix_data_dir.sh ${data}_hires + fi +fi + +data_set=$(basename $data) +if [ $stage -le 2 ]; then + echo "Extracting i-vectors, stage 1" + steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj $nj \ + --max-count $max_count \ + ${data}_hires $ivector_dir/extractor \ + $ivector_dir/ivectors_${data_set}${ivector_affix}_stage1; + # float comparisons are hard in bash + if [ `bc <<< "$ivector_scale != 1"` -eq 1 ]; then + ivector_scale_affix=_scale$ivector_scale + else + ivector_scale_affix= + fi + + if [ ! -z "$ivector_scale_affix" ]; then + echo "$0: Scaling iVectors, stage 1" + srcdir=$ivector_dir/ivectors_${data_set}${ivector_affix}_stage1 + outdir=$ivector_dir/ivectors_${data_set}${ivector_affix}${ivector_scale_affix}_stage1 + mkdir -p $outdir + $train_cmd $outdir/log/scale_ivectors.log \ + copy-matrix --scale=$ivector_scale scp:$srcdir/ivector_online.scp ark:- \| \ + copy-feats --compress=true ark:- ark,scp:$outdir/ivector_online.ark,$outdir/ivector_online.scp; + cp $srcdir/ivector_period $outdir/ivector_period + fi +fi + +decode_dir=$dir/decode${graph_affix}_${data_set}${affix} +# generate the lattices +if [ $stage -le 3 ]; then + echo "Generating lattices, stage 1" + steps/nnet3/decode.sh --nj $nj --cmd "$decode_cmd" \ + --acwt $acwt --post-decode-acwt $post_decode_acwt \ + --extra-left-context $extra_left_context \ + --extra-right-context $extra_right_context \ + --extra-left-context-initial $extra_left_context_initial \ + --extra-right-context-final $extra_right_context_final \ + --frames-per-chunk "$frames_per_chunk" \ + --online-ivector-dir $ivector_dir/ivectors_${data_set}${ivector_affix}${ivector_scale_affix}_stage1 \ + --skip-scoring true ${iter:+--iter $iter} \ + $graph ${data}_hires ${decode_dir}_stage1; +fi + +if [ $stage -le 4 ]; then + if $get_weights_from_ctm; then + if [ ! -z $weights_file ]; then + echo "$0: Using provided vad weights file $weights_file" + ivector_extractor_weights=$weights_file + else + echo "$0 : Generating vad weights file" + ivector_extractor_weights=${decode_dir}_stage1/weights${affix}.gz + local/extract_vad_weights.sh --silence-weight $silence_weight \ + --cmd "$decode_cmd" ${iter:+--iter $iter} \ + ${data}_hires $lang \ + ${decode_dir}_stage1 $ivector_extractor_weights + fi + else + # get weights from best path decoding + ivector_extractor_weights=${decode_dir}_stage1 + fi +fi + +if [ $stage -le 5 ]; then + echo "Extracting i-vectors, stage 2 with weights from $ivector_extractor_weights" + # this does offline decoding, except we estimate the iVectors per + # speaker, excluding silence (based on alignments from a DNN decoding), with a + # different script. This is just to demonstrate that script. + # the --sub-speaker-frames is optional; if provided, it will divide each speaker + # up into "sub-speakers" of at least that many frames... can be useful if + # acoustic conditions drift over time within the speaker's data. + steps/online/nnet2/extract_ivectors.sh --cmd "$train_cmd" --nj $nj \ + --silence-weight $silence_weight \ + --sub-speaker-frames $sub_speaker_frames --max-count $max_count \ + ${data}_hires $lang $ivector_dir/extractor \ + $ivector_extractor_weights $ivector_dir/ivectors_${data_set}${ivector_affix}; +fi + +if [ $stage -le 6 ]; then + echo "Generating lattices, stage 2 with --acwt $acwt" + rm -f ${decode_dir}/.error + steps/nnet3/decode.sh --nj $nj --cmd "$decode_cmd" $pass2_decode_opts \ + --acwt $acwt --post-decode-acwt $post_decode_acwt \ + --extra-left-context $extra_left_context \ + --extra-right-context $extra_right_context \ + --extra-left-context-initial $extra_left_context_initial \ + --extra-right-context-final $extra_right_context_final \ + --frames-per-chunk "$frames_per_chunk" \ + --skip-scoring false ${iter:+--iter $iter} --lattice-beam $lattice_beam \ + --online-ivector-dir $ivector_dir/ivectors_${data_set}${ivector_affix} \ + $graph ${data}_hires ${decode_dir} || touch ${decode_dir}/.error + [ -f ${decode_dir}/.error ] && echo "$0: Error decoding" && exit 1; +fi +exit 0 diff --git a/egs/chime6/s5_track1/local/nnet3/multi_condition/run_ivector_common.sh b/egs/chime6/s5_track1/local/nnet3/multi_condition/run_ivector_common.sh new file mode 100755 index 00000000000..8f823f4306e --- /dev/null +++ b/egs/chime6/s5_track1/local/nnet3/multi_condition/run_ivector_common.sh @@ -0,0 +1,194 @@ +#!/bin/bash + +set -euo pipefail + +# This script is called from local/nnet3/run_tdnn.sh and +# local/chain/run_tdnn.sh (and may eventually be called by more +# scripts). It contains the common feature preparation and +# iVector-related parts of the script. See those scripts for examples +# of usage. + +stage=0 +train_set_clean=train_worn +train_set_noisy=train_u400k +combined_train_set=train_worn_u400k +test_sets="dev_worn" +nj=96 +include_clean=false + +noise_list= +num_data_reps=2 +snrs="20:10:15:5:0" +foreground_snrs="20:10:15:5:0" +background_snrs="20:10:15:5:0" + +nnet3_affix=_train_worn_u400k_rvb + +. ./cmd.sh +. ./path.sh +. utils/parse_options.sh + +if [ $stage -le 0 ]; then + # Perturb the original data. We need this Although the nnet will be trained by high resolution data, we still have to + # perturb the normal data to get the alignment _sp stands for speed-perturbed + echo "$0: preparing directory for low-resolution speed-perturbed data (for alignment)" + utils/data/perturb_data_dir_speed_3way.sh data/${train_set_clean} data/${train_set_clean}_sp + + utils/data/perturb_data_dir_speed_3way.sh data/${train_set_noisy} data/${train_set_noisy}_sp + + for datadir in ${train_set_clean}_sp ${train_set_noisy}_sp $test_sets; do + utils/copy_data_dir.sh data/$datadir data/${datadir}_hires + done + + for datadir in ${train_set_clean}_sp ${train_set_noisy}_sp; do + utils/data/perturb_data_dir_volume.sh data/${datadir}_hires + done +fi + + +if [ $stage -le 1 ]; then + for datadir in ${train_set_clean}_sp ${train_set_noisy}_sp; do + mfccdir=data/${datadir}/data + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $mfccdir/storage ]; then + utils/create_split_dir.pl /export/b0{5,6,7,8}/$USER/kaldi-data/mfcc/chime5-$(date +'%m_%d_%H_%M')/s5/$mfccdir/storage $mfccdir/storage + fi + + steps/make_mfcc.sh --nj 20 \ + --cmd "$train_cmd" data/${datadir} || exit 1; + steps/compute_cmvn_stats.sh data/${datadir} || exit 1; + utils/fix_data_dir.sh data/${datadir} || exit 1; + done +fi + +if [ $stage -le 2 ]; then + if [ ! -d RIRS_NOISES/ ]; then + # Download the package that includes the real RIRs, simulated RIRs, isotropic noises and point-source noises + wget --no-check-certificate http://www.openslr.org/resources/28/rirs_noises.zip + unzip rirs_noises.zip + fi + + if [ -z "$noise_list" ]; then + noise_list=RIRS_NOISES/pointsource_noises/noise_list + fi + + # This is the config for the system using simulated RIRs and point-source noises + rvb_opts+=(--rir-set-parameters "0.5, RIRS_NOISES/simulated_rirs/smallroom/rir_list") + rvb_opts+=(--rir-set-parameters "0.5, RIRS_NOISES/simulated_rirs/mediumroom/rir_list") + rvb_opts+=(--noise-set-parameters $noise_list) + + python steps/data/reverberate_data_dir.py \ + "${rvb_opts[@]}" \ + --prefix "rev" \ + --foreground-snrs $foreground_snrs \ + --background-snrs $background_snrs \ + --speech-rvb-probability 1 \ + --pointsource-noise-addition-probability 1 \ + --isotropic-noise-addition-probability 1 \ + --num-replications $num_data_reps \ + --max-noises-per-minute 1 \ + --source-sampling-rate 16000 \ + data/${train_set_clean}_sp_hires data/${train_set_clean}_sp_rvb_hires +fi + +if [ $stage -le 3 ]; then + # Create high-resolution MFCC features (with 40 cepstra instead of 13). + # this shows how you can split across multiple file-systems. + echo "$0: creating high-resolution MFCC features" + for datadir in ${train_set_clean}_sp_rvb ${train_set_noisy}_sp ${test_sets}; do + mfccdir=data/${datadir}_hires/data + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $mfccdir/storage ]; then + utils/create_split_dir.pl /export/b0{5,6,7,8}/$USER/kaldi-data/mfcc/chime5-$(date +'%m_%d_%H_%M')/s5/$mfccdir/storage $mfccdir/storage + fi + + steps/make_mfcc.sh --nj 20 --mfcc-config conf/mfcc_hires.conf \ + --cmd "$train_cmd" data/${datadir}_hires || exit 1; + steps/compute_cmvn_stats.sh data/${datadir}_hires || exit 1; + utils/fix_data_dir.sh data/${datadir}_hires || exit 1; + done +fi + +temp_data_root=exp/nnet3${nnet3_affix}/diag_ubm + +if [ $stage -le 4 ]; then + echo "$0: computing a subset of data to train the diagonal UBM." + # We'll use about a quarter of the data. + mkdir -p exp/nnet3${nnet3_affix}/diag_ubm + optional_clean= + if $include_clean; then + optional_clean=data/${train_set_clean}_sp_hires + fi + utils/combine_data.sh data/${combined_train_set}_sp_rvb_hires \ + ${optional_clean} \ + data/${train_set_clean}_sp_rvb_hires data/${train_set_noisy}_sp_hires + + num_utts_total=$(wc -l < data/${combined_train_set}_sp_rvb_hires/utt2spk) + num_utts=$[$num_utts_total/4] + utils/data/subset_data_dir.sh data/${combined_train_set}_sp_rvb_hires \ + $num_utts ${temp_data_root}/${combined_train_set}_sp_rvb_hires_subset + + echo "$0: computing a PCA transform from the hires data." + steps/online/nnet2/get_pca_transform.sh --cmd "$train_cmd" \ + --splice-opts "--left-context=3 --right-context=3" \ + --max-utts 10000 --subsample 2 \ + ${temp_data_root}/${combined_train_set}_sp_rvb_hires_subset \ + exp/nnet3${nnet3_affix}/pca_transform + + echo "$0: training the diagonal UBM." + # Use 512 Gaussians in the UBM. + steps/online/nnet2/train_diag_ubm.sh --cmd "$train_cmd" --nj 30 \ + --num-frames 700000 \ + --num-threads 8 \ + data/${combined_train_set}_sp_rvb_hires 512 \ + exp/nnet3${nnet3_affix}/pca_transform exp/nnet3${nnet3_affix}/diag_ubm +fi + +if [ $stage -le 5 ]; then + # Train the iVector extractor. Use all of the speed-perturbed data since iVector extractors + # can be sensitive to the amount of data. The script defaults to an iVector dimension of + # 100. + echo "$0: training the iVector extractor" + steps/online/nnet2/train_ivector_extractor.sh --cmd "$train_cmd" --nj 20 \ + data/${combined_train_set}_sp_rvb_hires exp/nnet3${nnet3_affix}/diag_ubm \ + exp/nnet3${nnet3_affix}/extractor || exit 1; +fi + + +if [ $stage -le 6 ]; then + # We extract iVectors on the speed-perturbed training data after combining + # short segments, which will be what we train the system on. With + # --utts-per-spk-max 2, the script pairs the utterances into twos, and treats + # each of these pairs as one speaker; this gives more diversity in iVectors.. + # Note that these are extracted 'online'. + + # note, we don't encode the 'max2' in the name of the ivectordir even though + # that's the data we extract the ivectors from, as it's still going to be + # valid for the non-'max2' data, the utterance list is the same. + + for datadir in ${combined_train_set}_sp_rvb; do + ivectordir=exp/nnet3${nnet3_affix}/ivectors_${datadir}_hires + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $ivectordir/storage ]; then + utils/create_split_dir.pl /export/b0{5,6,7,8}/$USER/kaldi-data/ivectors/chime5-$(date +'%m_%d_%H_%M')/s5/$ivectordir/storage $ivectordir/storage + fi + + # having a larger number of speakers is helpful for generalization, and to + # handle per-utterance decoding well (iVector starts at zero). + temp_data_root=${ivectordir} + utils/data/modify_speaker_info.sh --utts-per-spk-max 2 \ + data/${datadir}_hires ${temp_data_root}/${datadir}_hires_max2 + + steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj ${nj} \ + ${temp_data_root}/${datadir}_hires_max2 \ + exp/nnet3${nnet3_affix}/extractor $ivectordir + done + + # Also extract iVectors for the test data, but in this case we don't need the speed + # perturbation (sp). + for data in $test_sets; do + steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj 20 \ + data/${data}_hires exp/nnet3${nnet3_affix}/extractor \ + exp/nnet3${nnet3_affix}/ivectors_${data}_hires + done +fi + +exit 0 + diff --git a/egs/chime6/s5_track1/local/nnet3/run_ivector_common.sh b/egs/chime6/s5_track1/local/nnet3/run_ivector_common.sh new file mode 100755 index 00000000000..3910e1812a3 --- /dev/null +++ b/egs/chime6/s5_track1/local/nnet3/run_ivector_common.sh @@ -0,0 +1,151 @@ +#!/bin/bash + +set -euo pipefail + +# This script is called from local/nnet3/run_tdnn.sh and +# local/chain/run_tdnn.sh (and may eventually be called by more +# scripts). It contains the common feature preparation and +# iVector-related parts of the script. See those scripts for examples +# of usage. + +stage=0 +train_set=train_worn_u100k +test_sets="dev_worn dev_beamformit_ref" +gmm=tri3 +nj=96 + +nnet3_affix=_train_worn_u100k + +. ./cmd.sh +. ./path.sh +. utils/parse_options.sh + +gmm_dir=exp/${gmm} +ali_dir=exp/${gmm}_ali_${train_set}_sp + +for f in ${gmm_dir}/final.mdl; do + if [ ! -f $f ]; then + echo "$0: expected file $f to exist" + exit 1 + fi +done + +if [ $stage -le 1 ]; then + # Although the nnet will be trained by high resolution data, we still have to + # perturb the normal data to get the alignment _sp stands for speed-perturbed + echo "$0: preparing directory for low-resolution speed-perturbed data (for alignment)" + utils/data/perturb_data_dir_speed_3way.sh data/${train_set} data/${train_set}_sp + echo "$0: making MFCC features for low-resolution speed-perturbed data" + steps/make_mfcc.sh --cmd "$train_cmd" --nj 20 data/${train_set}_sp || exit 1; + steps/compute_cmvn_stats.sh data/${train_set}_sp || exit 1; + utils/fix_data_dir.sh data/${train_set}_sp +fi + +if [ $stage -le 2 ]; then + echo "$0: aligning with the perturbed low-resolution data" + steps/align_fmllr.sh --nj ${nj} --cmd "$train_cmd" \ + data/${train_set}_sp data/lang $gmm_dir $ali_dir || exit 1 +fi + +if [ $stage -le 3 ]; then + # Create high-resolution MFCC features (with 40 cepstra instead of 13). + # this shows how you can split across multiple file-systems. + echo "$0: creating high-resolution MFCC features" + mfccdir=data/${train_set}_sp_hires/data + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $mfccdir/storage ]; then + utils/create_split_dir.pl /export/b1{5,6,8,9}/$USER/kaldi-data/mfcc/chime5-$(date +'%m_%d_%H_%M')/s5/$mfccdir/storage $mfccdir/storage + fi + + for datadir in ${train_set}_sp ${test_sets}; do + utils/copy_data_dir.sh data/$datadir data/${datadir}_hires + done + + # do volume-perturbation on the training data prior to extracting hires + # features; this helps make trained nnets more invariant to test data volume. + utils/data/perturb_data_dir_volume.sh data/${train_set}_sp_hires || exit 1; + + for datadir in ${train_set}_sp ${test_sets}; do + steps/make_mfcc.sh --nj 20 --mfcc-config conf/mfcc_hires.conf \ + --cmd "$train_cmd" data/${datadir}_hires || exit 1; + steps/compute_cmvn_stats.sh data/${datadir}_hires || exit 1; + utils/fix_data_dir.sh data/${datadir}_hires || exit 1; + done +fi + +if [ $stage -le 4 ]; then + echo "$0: computing a subset of data to train the diagonal UBM." + # We'll use about a quarter of the data. + mkdir -p exp/nnet3${nnet3_affix}/diag_ubm + temp_data_root=exp/nnet3${nnet3_affix}/diag_ubm + + num_utts_total=$(wc -l &2 "$0" "$@" +if [ $# -ne 3 ] ; then + echo >&2 "$0" "$@" + echo >&2 "$0: Error: wrong number of arguments" + echo -e >&2 "Usage:\n $0 [opts] " + echo -e >&2 "eg:\n $0 /corpora/chime5/audio/train /corpora/chime5/transcriptions/train data/train" + exit 1 +fi + +set -e -o pipefail + +adir=$1 +jdir=$2 +dir=$3 + +json_count=$(find -L $jdir -name "*.json" | wc -l) +wav_count=$(find -L $adir -name "*.wav" | wc -l) + +if [ "$json_count" -eq 0 ]; then + echo >&2 "We expect that the directory $jdir will contain json files." + echo >&2 "That implies you have supplied a wrong path to the data." + exit 1 +fi +if [ "$wav_count" -eq 0 ]; then + echo >&2 "We expect that the directory $adir will contain wav files." + echo >&2 "That implies you have supplied a wrong path to the data." + exit 1 +fi + +echo "$0: Converting transcription to text" + +mkdir -p $dir + +for file in $jdir/*json; do + ./local/json2text.py --mictype $mictype $file +done | \ + sed -e "s/\[inaudible[- 0-9]*\]/[inaudible]/g" |\ + sed -e 's/ - / /g' |\ + sed -e 's/mm-/mm/g' > $dir/text.orig + +echo "$0: Creating datadir $dir for type=\"$mictype\"" + +if [ $mictype == "worn" ]; then + # convert the filenames to wav.scp format, use the basename of the file + # as a the wav.scp key, add .L and .R for left and right channel + # i.e. each file will have two entries (left and right channel) + find -L $adir -name "S[0-9]*_P[0-9]*.wav" | \ + perl -ne '{ + chomp; + $path = $_; + next unless $path; + @F = split "/", $path; + ($f = $F[@F-1]) =~ s/.wav//; + @F = split "_", $f; + print "${F[1]}_${F[0]}.L sox $path -t wav - remix 1 |\n"; + print "${F[1]}_${F[0]}.R sox $path -t wav - remix 2 |\n"; + }' | sort > $dir/wav.scp + + # generate the transcripts for both left and right channel + # from the original transcript in the form + # P09_S03-0006072-0006147 gimme the baker + # create left and right channel transcript + # P09_S03.L-0006072-0006147 gimme the baker + # P09_S03.R-0006072-0006147 gimme the baker + sed -n 's/ *$//; h; s/-/\.L-/p; g; s/-/\.R-/p' $dir/text.orig | sort > $dir/text +elif [ $mictype == "ref" ]; then + # fixed reference array + + # first get a text, which will be used to extract reference arrays + perl -ne 's/-/.ENH-/;print;' $dir/text.orig | sort > $dir/text + + find -L $adir | grep "\.wav" | sort > $dir/wav.flist + # following command provide the argument for grep to extract only reference arrays + grep `cut -f 1 -d"-" $dir/text | awk -F"_" '{print $2 "_" $3}' | sed -e "s/\.ENH//" | sort | uniq | sed -e "s/^/ -e /" | tr "\n" " "` $dir/wav.flist > $dir/wav.flist2 + paste -d" " \ + <(awk -F "/" '{print $NF}' $dir/wav.flist2 | sed -e "s/\.wav/.ENH/") \ + $dir/wav.flist2 | sort > $dir/wav.scp +elif [ $mictype == "gss" ]; then + find -L $adir -name "P[0-9]*_S[0-9]*.wav" | \ + perl -ne '{ + chomp; + $path = $_; + next unless $path; + @F = split "/", $path; + ($f = $F[@F-1]) =~ s/.wav//; + $f =~ m/(P[0-9]*_S[0-9]*)_(\d+)-(\d+)/; + print "$1-$2-$3 $path\n"; + }' | sort > $dir/wav.scp + + cat $dir/text.orig | sort > $dir/text +else + # array mic case + # convert the filenames to wav.scp format, use the basename of the file + # as a the wav.scp key + find -L $adir -name "*.wav" -ipath "*${mictype}*" |\ + perl -ne '$p=$_;chomp $_;@F=split "/";$F[$#F]=~s/\.wav//;print "$F[$#F] $p";' |\ + sort -u > $dir/wav.scp + + # convert the transcripts from + # P09_S03-0006072-0006147 gimme the baker + # to the per-channel transcripts + # P09_S03_U01_NOLOCATION.CH1-0006072-0006147 gimme the baker + # P09_S03_U01_NOLOCATION.CH2-0006072-0006147 gimme the baker + # P09_S03_U01_NOLOCATION.CH3-0006072-0006147 gimme the baker + # P09_S03_U01_NOLOCATION.CH4-0006072-0006147 gimme the baker + perl -ne '$l=$_; + for($i=1; $i<=4; $i++) { + ($x=$l)=~ s/-/.CH\Q$i\E-/; + print $x;}' $dir/text.orig | sort > $dir/text + +fi +$cleanup && rm -f $dir/text.* $dir/wav.scp.* $dir/wav.flist + +# Prepare 'segments', 'utt2spk', 'spk2utt' +if [ $mictype == "worn" ]; then + cut -d" " -f 1 $dir/text | \ + awk -F"-" '{printf("%s %s %08.2f %08.2f\n", $0, $1, $2/100.0, $3/100.0)}' |\ + sed -e "s/_[A-Z]*\././2" \ + > $dir/segments +elif [ $mictype == "ref" ]; then + cut -d" " -f 1 $dir/text | \ + awk -F"-" '{printf("%s %s %08.2f %08.2f\n", $0, $1, $2/100.0, $3/100.0)}' |\ + sed -e "s/_[A-Z]*\././2" |\ + sed -e "s/ P.._/ /" > $dir/segments +elif [ $mictype != "gss" ]; then + cut -d" " -f 1 $dir/text | \ + awk -F"-" '{printf("%s %s %08.2f %08.2f\n", $0, $1, $2/100.0, $3/100.0)}' |\ + sed -e "s/_[A-Z]*\././2" |\ + sed -e 's/ P.._/ /' > $dir/segments +fi + +cut -f 1 -d ' ' $dir/text | \ + perl -ne 'chomp;$utt=$_;s/_.*//;print "$utt $_\n";' > $dir/utt2spk + +utils/utt2spk_to_spk2utt.pl $dir/utt2spk > $dir/spk2utt + +# Check that data dirs are okay! +utils/validate_data_dir.sh --no-feats $dir || exit 1 diff --git a/egs/chime6/s5_track1/local/prepare_dict.sh b/egs/chime6/s5_track1/local/prepare_dict.sh new file mode 100755 index 00000000000..09083d0e795 --- /dev/null +++ b/egs/chime6/s5_track1/local/prepare_dict.sh @@ -0,0 +1,124 @@ +#!/bin/bash +# Copyright (c) 2018, Johns Hopkins University (Jan "Yenda" Trmal) +# License: Apache 2.0 + +# Begin configuration section. +# End configuration section +. ./utils/parse_options.sh + +. ./path.sh + +set -e -o pipefail +set -o nounset # Treat unset variables as an error + + +# The parts of the output of this that will be needed are +# [in data/local/dict/ ] +# lexicon.txt +# extra_questions.txt +# nonsilence_phones.txt +# optional_silence.txt +# silence_phones.txt + + +# check existing directories +[ $# != 0 ] && echo "Usage: $0" && exit 1; + +dir=data/local/dict + +mkdir -p $dir +echo "$0: Getting CMU dictionary" +if [ ! -f $dir/cmudict.done ]; then + [ -d $dir/cmudict ] && rm -rf $dir/cmudict + svn co https://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict $dir/cmudict + touch $dir/cmudict.done +fi + +# silence phones, one per line. +for w in sil spn inaudible laughs noise; do + echo $w; +done > $dir/silence_phones.txt +echo sil > $dir/optional_silence.txt + +# For this setup we're discarding stress. +cat $dir/cmudict/cmudict-0.7b.symbols | \ + perl -ne 's:[0-9]::g; s:\r::; print lc($_)' | \ + sort -u > $dir/nonsilence_phones.txt + +# An extra question will be added by including the silence phones in one class. +paste -d ' ' -s $dir/silence_phones.txt > $dir/extra_questions.txt + +grep -v ';;;' $dir/cmudict/cmudict-0.7b |\ + uconv -f latin1 -t utf-8 -x Any-Lower |\ + perl -ne 's:(\S+)\(\d+\) :$1 :; s: : :; print;' |\ + perl -ne '@F = split " ",$_,2; $F[1] =~ s/[0-9]//g; print "$F[0] $F[1]";' \ + > $dir/lexicon1_raw_nosil.txt || exit 1; + +# Add prons for laughter, noise, oov +for w in `grep -v sil $dir/silence_phones.txt`; do + echo "[$w] $w" +done | cat - $dir/lexicon1_raw_nosil.txt > $dir/lexicon2_raw.txt || exit 1; + +# we keep all words from the cmudict in the lexicon +# might reduce OOV rate on dev and eval +cat $dir/lexicon2_raw.txt \ + <( echo "mm m" + echo " spn" + echo "cuz k aa z" + echo "cuz k ah z" + echo "cuz k ao z" + echo "mmm m"; \ + echo "hmm hh m"; \ + ) | sort -u | sed 's/[\t ]/\t/' > $dir/iv_lexicon.txt + + +cat data/train*/text | \ + awk '{for (n=2;n<=NF;n++){ count[$n]++; } } END { for(n in count) { print count[n], n; }}' | \ + sort -nr > $dir/word_counts + +cat $dir/word_counts | awk '{print $2}' > $dir/word_list + +awk '{print $1}' $dir/iv_lexicon.txt | \ + perl -e '($word_counts)=@ARGV; + open(W, "<$word_counts")||die "opening word-counts $word_counts"; + while() { chop; $seen{$_}=1; } + while() { + ($c,$w) = split; + if (!defined $seen{$w}) { print; } + } ' $dir/word_counts > $dir/oov_counts.txt + +echo "*Highest-count OOVs (including fragments) are:" +head -n 10 $dir/oov_counts.txt +echo "*Highest-count OOVs (excluding fragments) are:" +grep -v -E '^-|-$' $dir/oov_counts.txt | head -n 10 || true + +echo "*Training a G2P and generating missing pronunciations" +mkdir -p $dir/g2p/ +phonetisaurus-align --input=$dir/iv_lexicon.txt --ofile=$dir/g2p/aligned_lexicon.corpus +ngram-count -order 4 -kn-modify-counts-at-end -ukndiscount\ + -gt1min 0 -gt2min 0 -gt3min 0 -gt4min 0 \ + -text $dir/g2p/aligned_lexicon.corpus -lm $dir/g2p/aligned_lexicon.arpa +phonetisaurus-arpa2wfst --lm=$dir/g2p/aligned_lexicon.arpa --ofile=$dir/g2p/g2p.fst +awk '{print $2}' $dir/oov_counts.txt > $dir/oov_words.txt +phonetisaurus-apply --nbest 2 --model $dir/g2p/g2p.fst --thresh 5 --accumulate \ + --word_list $dir/oov_words.txt > $dir/oov_lexicon.txt + +## The next section is again just for debug purposes +## to show words for which the G2P failed +cat $dir/oov_lexicon.txt $dir/iv_lexicon.txt | sort -u > $dir/lexicon.txt +rm -f $dir/lexiconp.txt 2>/dev/null; # can confuse later script if this exists. +awk '{print $1}' $dir/lexicon.txt | \ + perl -e '($word_counts)=@ARGV; + open(W, "<$word_counts")||die "opening word-counts $word_counts"; + while() { chop; $seen{$_}=1; } + while() { + ($c,$w) = split; + if (!defined $seen{$w}) { print; } + } ' $dir/word_counts > $dir/oov_counts.g2p.txt + +echo "*Highest-count OOVs (including fragments) after G2P are:" +head -n 10 $dir/oov_counts.g2p.txt + +utils/validate_dict_dir.pl $dir +exit 0; + diff --git a/egs/chime6/s5_track1/local/replace_uttid.py b/egs/chime6/s5_track1/local/replace_uttid.py new file mode 100755 index 00000000000..372b5b2e93e --- /dev/null +++ b/egs/chime6/s5_track1/local/replace_uttid.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 + +import sys, io +output = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8') + +def load_uttid_location(f): + locations = {} + for line in f: + parts=line.strip().split(' ') + uttid, loc = parts[0], parts[1] + locations[uttid] = loc + return locations + +locations = load_uttid_location(open(sys.argv[1],'r', encoding='utf8')) + +for line in open(sys.argv[2],'r', encoding='utf8'): + uttid, res = line.split(None, 1) + location = locations[uttid] + location_uttid = location +'_'+ str(uttid) + output.write(location_uttid + ' ' + res) diff --git a/egs/chime6/s5_track1/local/reverberate_lat_dir.sh b/egs/chime6/s5_track1/local/reverberate_lat_dir.sh new file mode 100755 index 00000000000..f601a37c0e1 --- /dev/null +++ b/egs/chime6/s5_track1/local/reverberate_lat_dir.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +# Copyright 2018 Vimal Manohar +# Apache 2.0 + +num_data_reps=1 +cmd=run.pl +nj=20 +include_clean=false + +. utils/parse_options.sh +. ./path.sh + +if [ $# -ne 4 ]; then + echo "Usage: $0 " + exit 1 +fi + +train_data_dir=$1 +noisy_latdir=$2 +clean_latdir=$3 +dir=$4 + +clean_nj=$(cat $clean_latdir/num_jobs) + +$cmd JOB=1:$clean_nj $dir/copy_clean_lattices.JOB.log \ + lattice-copy "ark:gunzip -c $clean_latdir/lat.JOB.gz |" \ + ark,scp:$dir/lats_clean.JOB.ark,$dir/lats_clean.JOB.scp || exit 1 + +for n in $(seq $clean_nj); do + cat $dir/lats_clean.$n.scp +done > $dir/lats_clean.scp + +for i in $(seq $num_data_reps); do + cat $dir/lats_clean.scp | awk -vi=$i '{print "rev"i"_"$0}' +done > $dir/lats_rvb.scp + +noisy_nj=$(cat $noisy_latdir/num_jobs) +$cmd JOB=1:$noisy_nj $dir/copy_noisy_lattices.JOB>log \ + lattice-copy "ark:gunzip -c $noisy_latdir/lat.JOB.gz |" \ + ark,scp:$dir/lats_noisy.JOB.ark,$dir/lats_noisy.JOB.scp || exit 1 + +optional_clean= +if $include_clean; then + optional_clean=$dir/lats_clean.scp +fi + +for n in $(seq $noisy_nj); do + cat $dir/lats_noisy.$n.scp +done | cat - $dir/lats_rvb.scp ${optional_clean} | sort -k1,1 > $dir/lats.scp + +utils/split_data.sh $train_data_dir $nj +$cmd JOB=1:$nj $dir/copy_lattices.JOB.log \ + lattice-copy "scp:utils/filter_scp.pl $train_data_dir/split$nj/JOB/utt2spk $dir/lats.scp |" \ + "ark:|gzip -c >$dir/lat.JOB.gz" || exit 1 + +echo $nj > $dir/num_jobs + +if [ -f $clean_latdir/ali.1.gz ]; then + $cmd JOB=1:$clean_nj $dir/copy_clean_alignments.JOB.log \ + copy-int-vector "ark:gunzip -c $clean_latdir/ali.JOB.gz |" \ + ark,scp:$dir/ali_clean.JOB.ark,$dir/ali_clean.JOB.scp + + for n in $(seq $clean_nj); do + cat $dir/ali_clean.$n.scp + done > $dir/ali_clean.scp + + for i in $(seq $num_data_reps); do + cat $dir/ali_clean.scp | awk -vi=$i '{print "rev"i"_"$0}' + done > $dir/ali_rvb.scp + + optional_clean= + if $include_clean; then + optional_clean=$dir/ali_clean.scp + fi + + $cmd JOB=1:$noisy_nj $dir/copy_noisy_alignments.JOB.log \ + copy-int-vector "ark:gunzip -c $noisy_latdir/ali.JOB.gz |" \ + ark,scp:$dir/ali_noisy.JOB.ark,$dir/ali_noisy.JOB.scp + + for n in $(seq $noisy_nj); do + cat $dir/ali_noisy.$n.scp + done | cat - $dir/ali_rvb.scp $optional_clean | sort -k1,1 > $dir/ali.scp + + utils/split_data.sh $train_data_dir $nj || exit 1 + $cmd JOB=1:$nj $dir/copy_rvb_alignments.JOB.log \ + copy-int-vector "scp:utils/filter_scp.pl $train_data_dir/split$nj/JOB/utt2spk $dir/ali.scp |" \ + "ark:|gzip -c >$dir/ali.JOB.gz" || exit 1 +fi + +cp $clean_latdir/{final.*,tree,*.mat,*opts,*.txt} $dir || true + +rm $dir/lats_{clean,noisy}.*.{ark,scp} $dir/ali_{clean,noisy}.*.{ark,scp} || true # save space diff --git a/egs/chime6/s5_track1/local/run_beamformit.sh b/egs/chime6/s5_track1/local/run_beamformit.sh new file mode 100755 index 00000000000..aa3badd90d8 --- /dev/null +++ b/egs/chime6/s5_track1/local/run_beamformit.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +# Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Shinji Watanabe) + +. ./cmd.sh +. ./path.sh + +# Config: +cmd=run.pl +bmf="1 2 3 4" + +. utils/parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Wrong #arguments ($#, expected 3)" + echo "Usage: local/run_beamformit.sh [options] " + echo "main options (for others, see top of script file)" + echo " --cmd # Command to run in parallel with" + echo " --bmf \"1 2 3 4\" # microphones used for beamforming" + exit 1; +fi + +sdir=$1 +odir=$2 +array=$3 +expdir=exp/enhan/`echo $odir | awk -F '/' '{print $NF}'`_`echo $bmf | tr ' ' '_'` + +if ! command -v BeamformIt &>/dev/null ; then + echo "Missing BeamformIt, run 'cd $KALDI_ROOT/tools/; ./extras/install_beamformit.sh; cd -;'" && exit 1 +fi + +# Set bash to 'debug' mode, it will exit on : +# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', +set -e +set -u +set -o pipefail + +mkdir -p $odir +mkdir -p $expdir/log + +echo "Will use the following channels: $bmf" +# number of channels +numch=`echo $bmf | tr ' ' '\n' | wc -l` +echo "the number of channels: $numch" + +# wavfiles.list can be used as the name of the output files +output_wavfiles=$expdir/wavfiles.list +find -L ${sdir} | grep -i ${array} | awk -F "/" '{print $NF}' | sed -e "s/\.CH.\.wav//" | sort | uniq > $expdir/wavfiles.list + +# this is an input file list of the microphones +# format: 1st_wav 2nd_wav ... nth_wav +input_arrays=$expdir/channels_$numch +for x in `cat $output_wavfiles`; do + echo -n "$x" + for ch in $bmf; do + echo -n " $x.CH$ch.wav" + done + echo "" +done > $input_arrays + +# split the list for parallel processing +# number of jobs are set by the number of WAV files +nj=`wc -l $expdir/wavfiles.list | awk '{print $1}'` +split_wavfiles="" +for n in `seq $nj`; do + split_wavfiles="$split_wavfiles $output_wavfiles.$n" +done +utils/split_scp.pl $output_wavfiles $split_wavfiles || exit 1; + +echo -e "Beamforming\n" +# making a shell script for each job +for n in `seq $nj`; do +cat << EOF > $expdir/log/beamform.$n.sh +while read line; do + $BEAMFORMIT/BeamformIt -s \$line -c $input_arrays \ + --config_file `pwd`/conf/beamformit.cfg \ + --source_dir $sdir \ + --result_dir $odir +done < $output_wavfiles.$n +EOF +done + +chmod a+x $expdir/log/beamform.*.sh +$cmd JOB=1:$nj $expdir/log/beamform.JOB.log \ + $expdir/log/beamform.JOB.sh + +echo "`basename $0` Done." diff --git a/egs/chime6/s5_track1/local/run_gss.sh b/egs/chime6/s5_track1/local/run_gss.sh new file mode 100755 index 00000000000..92444bdbcd0 --- /dev/null +++ b/egs/chime6/s5_track1/local/run_gss.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +# Copyright 2015, Mitsubishi Electric Research Laboratories, MERL (Author: Shinji Watanabe) + +. ./cmd.sh +if [ -f ./path.sh ]; then . ./path.sh; fi + +# Config: +cmd=run.pl +nj=4 +use_multiarray=false + +. utils/parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Wrong #arguments ($#, expected 3)" + echo "Usage: local/run_gss.sh [options] " + echo "main options (for others, see top of script file)" + echo " --cmd # Command to run in parallel with" + echo " --bmf \"1 2 3 4\" # microphones used for beamforming" + exit 1; +fi + +session_id=$1 +log_dir=$2 +enhanced_dir=$3 + +if [ ! -d pb_chime5/ ]; then + echo "Missing pb_chime5, run 'local/install_pb_chime5'" + exit 1 +fi + +miniconda_dir=$HOME/miniconda3/ +if [ ! -d $miniconda_dir/ ]; then + echo "$miniconda_dir/ does not exist. Please run '../../../tools/extras/install_miniconda.sh'" + exit 1 +fi + +enhanced_dir=$(utils/make_absolute.sh $enhanced_dir) || \ + { echo "Could not make absolute '$enhanced_dir'" && exit 1; } + +if $use_multiarray; then + multiarray=True +else + multiarray=False +fi + +$cmd JOB=1:$nj $log_dir/log/enhance_${session_id}.JOB.log \ + cd pb_chime5/ '&&' \ + $miniconda_dir/bin/python -m pb_chime5.scripts.kaldi_run with \ + storage_dir=$enhanced_dir \ + session_id=$session_id \ + job_id=JOB number_of_jobs=$nj \ + multiarray=$multiarray || exit 1 diff --git a/egs/chime6/s5_track1/local/run_recog.sh b/egs/chime6/s5_track1/local/run_recog.sh new file mode 100755 index 00000000000..9eaa870e8b8 --- /dev/null +++ b/egs/chime6/s5_track1/local/run_recog.sh @@ -0,0 +1,192 @@ +#!/bin/bash +# +# Based mostly on the TED-LIUM and Switchboard recipe +# +# Copyright 2017 Johns Hopkins University (Author: Shinji Watanabe and Yenda Trmal) +# Apache 2.0 +# +# This is a subset of run.sh to only perform recognition experiments with evaluation data + +# Begin configuration section. +decode_nj=20 +stage=0 +use_multiarray=false +enhancement=gss # for a new enhancement method, + # change this variable and stage 4 +# End configuration section +. ./utils/parse_options.sh + +. ./cmd.sh +. ./path.sh + + +set -e # exit on error + +# chime5 main directory path +# please change the path accordingly +chime5_corpus=/export/corpora4/CHiME5 +json_dir=${chime5_corpus}/transcriptions +audio_dir=${chime5_corpus}/audio + +# training and test data +train_set=train_worn_simu_u400k +test_sets="dev_${enhancement} eval_${enhancement}" + +# This script also needs the phonetisaurus g2p, srilm, beamformit +./local/check_tools.sh || exit 1 + +enhanced_dir=enhanced +if $use_multiarray; then + enhanced_dir=${enhanced_dir}_multiarray + enhancement=${enhancement}_multiarray +fi + +enhanced_dir=$(utils/make_absolute.sh $enhanced_dir) || exit 1 + +if [ $stage -le 1 ]; then + # Guided Source Separation (GSS) from Paderbon Univerisity + # http://spandh.dcs.shef.ac.uk/chime_workshop/papers/CHiME_2018_paper_boeddecker.pdf + # @Article{PB2018CHiME5, + # author = {Boeddeker, Christoph and Heitkaemper, Jens and Schmalenstroeer, Joerg and Drude, Lukas and Heymann, Jahn and Haeb-Umbach, Reinhold}, + # title = {{Front-End Processing for the CHiME-5 Dinner Party Scenario}}, + # year = {2018}, + # booktitle = {CHiME5 Workshop}, + # } + + echo "$0: enhance data..." + if [ ! -d pb_chime5/ ]; then + local/install_pb_chime5.sh + fi + + if [ ! -f pb_chime5/cache/chime5.json ]; then + ( + cd pb_chime5 + miniconda_dir=$HOME/miniconda3/ + export PATH=$miniconda_dir/bin:$PATH + export CHIME5_DIR=$chime5_corpus + make cache/chime5.json + ) + fi + + for dset in dev eval; do + local/run_gss.sh \ + --cmd "$train_cmd --max-jobs-run 30" --nj 160 \ + --use-multiarray $use_multiarray \ + ${dset} \ + ${enhanced_dir} \ + ${enhanced_dir} || exit 1 + done + + + echo "$0: Prepare data..." + for dset in dev eval; do + local/prepare_data.sh --mictype gss ${enhanced_dir}/audio/${dset} \ + ${json_dir}/${dset} data/${dset}_${enhancement} || exit 1 + done + +fi + + +if [ $stage -le 2 ]; then + # fix speaker ID issue (thanks to Dr. Naoyuki Kanda) + # add array ID to the speaker ID to avoid the use of other array information to meet regulations + # Before this fix + # $ head -n 2 data/eval_beamformit_ref_nosplit/utt2spk + # P01_S01_U02_KITCHEN.ENH-0000192-0001278 P01 + # P01_S01_U02_KITCHEN.ENH-0001421-0001481 P01 + # After this fix + # $ head -n 2 data/eval_beamformit_ref_nosplit_fix/utt2spk + # P01_S01_U02_KITCHEN.ENH-0000192-0001278 P01_U02 + # P01_S01_U02_KITCHEN.ENH-0001421-0001481 P01_U02 + echo "$0: fix data..." + for dset in ${test_sets}; do + utils/copy_data_dir.sh data/${dset} data/${dset}_nosplit + mkdir -p data/${dset}_nosplit_fix + for f in segments text wav.scp; do + if [ -f data/${dset}_nosplit/$f ]; then + cp data/${dset}_nosplit/$f data/${dset}_nosplit_fix + fi + done + awk -F "_" '{print $0 "_" $3}' data/${dset}_nosplit/utt2spk > data/${dset}_nosplit_fix/utt2spk + utils/utt2spk_to_spk2utt.pl data/${dset}_nosplit_fix/utt2spk > data/${dset}_nosplit_fix/spk2utt + done + + # Split speakers up into 3-minute chunks. This doesn't hurt adaptation, and + # lets us use more jobs for decoding etc. + for dset in ${test_sets}; do + utils/data/modify_speaker_info.sh --seconds-per-spk-max 180 data/${dset}_nosplit_fix data/${dset} + done +fi + + +if [ $stage -le 3 ]; then + # Now make MFCC features. + # mfccdir should be some place with a largish disk where you + # want to store MFCC features. + echo "$0: make features..." + mfccdir=mfcc + for x in ${test_sets}; do + steps/make_mfcc.sh --nj 20 --cmd "$train_cmd" \ + data/$x exp/make_mfcc/$x $mfccdir + steps/compute_cmvn_stats.sh data/$x exp/make_mfcc/$x $mfccdir + utils/fix_data_dir.sh data/$x + done +fi + + +nnet3_affix=_${train_set}_cleaned_rvb +lm_suffix= + +if [ $stage -le 4 ]; then + # First the options that are passed through to run_ivector_common.sh + # (some of which are also used in this script directly). + + # The rest are configs specific to this script. Most of the parameters + # are just hardcoded at this level, in the commands below. + echo "$0: decode data..." + affix=1b # affix for the TDNN directory name + tree_affix= + tree_dir=exp/chain${nnet3_affix}/tree_sp${tree_affix:+_$tree_affix} + dir=exp/chain${nnet3_affix}/tdnn${affix}_sp + + # training options + # training chunk-options + chunk_width=140,100,160 + # we don't need extra left/right context for TDNN systems. + chunk_left_context=0 + chunk_right_context=0 + + utils/mkgraph.sh \ + --self-loop-scale 1.0 data/lang${lm_suffix}/ \ + $tree_dir $tree_dir/graph${lm_suffix} || exit 1; + + frames_per_chunk=$(echo $chunk_width | cut -d, -f1) + rm $dir/.error 2>/dev/null || true + + for data in $test_sets; do + ( + local/nnet3/decode.sh --affix 2stage --pass2-decode-opts "--min-active 1000" \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --frames-per-chunk 150 --nj $decode_nj \ + --ivector-dir exp/nnet3${nnet3_affix} \ + data/${data} data/lang${lm_suffix} \ + $tree_dir/graph${lm_suffix} \ + exp/chain${nnet3_affix}/tdnn${affix}_sp + ) || touch $dir/.error & + done + wait + [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 +fi + +if [ $stage -le 5 ]; then + # final scoring to get the official challenge result + # please specify both dev and eval set directories so that the search parameters + # (insertion penalty and language model weight) will be tuned using the dev set + echo "$0: score data..." + + local/get_location.py $json_dir/dev > exp/chain_${train_set}_cleaned_rvb/tdnn1b_sp/decode_dev_${enhancement}_2stage/uttid_location + local/get_location.py $json_dir/eval > exp/chain_${train_set}_cleaned_rvb/tdnn1b_sp/decode_eval_${enhancement}_2stage/uttid_location + local/score_for_submit.sh \ + --dev exp/chain${nnet3_affix}/tdnn1b_sp/decode${lm_suffix}_dev_${enhancement}_2stage \ + --eval exp/chain${nnet3_affix}/tdnn1b_sp/decode${lm_suffix}_eval_${enhancement}_2stage +fi diff --git a/egs/chime6/s5_track1/local/run_wpe.py b/egs/chime6/s5_track1/local/run_wpe.py new file mode 100755 index 00000000000..2f3818f9c42 --- /dev/null +++ b/egs/chime6/s5_track1/local/run_wpe.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python +# Copyright 2018 Johns Hopkins University (Author: Aswin Shanmugam Subramanian) +# Apache 2.0 +# Works with both python2 and python3 +# This script assumes that WPE (nara_wpe) is installed locally using miniconda. +# ../../../tools/extras/install_miniconda.sh and ../../../tools/extras/install_wpe.sh +# needs to be run and this script needs to be launched run with that version of +# python. +# See local/run_wpe.sh for example. + +import numpy as np +import soundfile as sf +import time +import os, errno +from tqdm import tqdm +import argparse + +from nara_wpe.wpe import wpe +from nara_wpe.utils import stft, istft +from nara_wpe import project_root + +parser = argparse.ArgumentParser() +parser.add_argument('--files', '-f', nargs='+') +args = parser.parse_args() + +input_files = args.files[:len(args.files)//2] +output_files = args.files[len(args.files)//2:] +out_dir = os.path.dirname(output_files[0]) +try: + os.makedirs(out_dir) +except OSError as e: + if e.errno != errno.EEXIST: + raise + +stft_options = dict( + size=512, + shift=128, + window_length=None, + fading=True, + pad=True, + symmetric_window=False +) + +sampling_rate = 16000 +delay = 3 +iterations = 5 +taps = 10 + +signal_list = [ + sf.read(f)[0] + for f in input_files +] +y = np.stack(signal_list, axis=0) +Y = stft(y, **stft_options).transpose(2, 0, 1) +Z = wpe(Y, iterations=iterations, statistics_mode='full').transpose(1, 2, 0) +z = istft(Z, size=stft_options['size'], shift=stft_options['shift']) + +for d in range(len(signal_list)): + sf.write(output_files[d], z[d,:], sampling_rate) diff --git a/egs/chime6/s5_track1/local/run_wpe.sh b/egs/chime6/s5_track1/local/run_wpe.sh new file mode 100755 index 00000000000..ed512e69aae --- /dev/null +++ b/egs/chime6/s5_track1/local/run_wpe.sh @@ -0,0 +1,86 @@ +#!/bin/bash +# Copyright 2018 Johns Hopkins University (Author: Aswin Shanmugam Subramanian) +# Apache 2.0 + +. ./cmd.sh +. ./path.sh + +# Config: +nj=4 +cmd=run.pl + +. utils/parse_options.sh || exit 1; + +if [ $# != 3 ]; then + echo "Wrong #arguments ($#, expected 3)" + echo "Usage: local/run_wpe.sh [options] " + echo "main options (for others, see top of script file)" + echo " --cmd # Command to run in parallel with" + echo " --nj 50 # number of jobs for parallel processing" + exit 1; +fi + +sdir=$1 +odir=$2 +array=$3 +task=`basename $sdir` +expdir=exp/wpe/${task}_${array} +# Set bash to 'debug' mode, it will exit on : +# -e 'error', -u 'undefined variable', -o ... 'error in pipeline', -x 'print commands', +set -e +set -u +set -o pipefail + +miniconda_dir=$HOME/miniconda3/ +if [ ! -d $miniconda_dir ]; then + echo "$miniconda_dir does not exist. Please run '$KALDI_ROOT/tools/extras/install_miniconda.sh'." + exit 1 +fi + +# check if WPE is installed +result=`$miniconda_dir/bin/python -c "\ +try: + import nara_wpe + print('1') +except ImportError: + print('0')"` + +if [ "$result" == "1" ]; then + echo "WPE is installed" +else + echo "WPE is not installed. Please run ../../../tools/extras/install_wpe.sh" + exit 1 +fi + +mkdir -p $odir +mkdir -p $expdir/log + +# wavfiles.list can be used as the name of the output files +output_wavfiles=$expdir/wavfiles.list +find -L ${sdir} | grep -i ${array} > $expdir/channels_input +cat $expdir/channels_input | awk -F '/' '{print $NF}' | sed "s@S@$odir\/S@g" > $expdir/channels_output +paste -d" " $expdir/channels_input $expdir/channels_output > $output_wavfiles + +# split the list for parallel processing +split_wavfiles="" +for n in `seq $nj`; do + split_wavfiles="$split_wavfiles $output_wavfiles.$n" +done +utils/split_scp.pl $output_wavfiles $split_wavfiles || exit 1; + +echo -e "Dereverberation - $task - $array\n" +# making a shell script for each job +for n in `seq $nj`; do +cat <<-EOF > $expdir/log/wpe.$n.sh +while read line; do + $miniconda_dir/bin/python local/run_wpe.py \ + --file \$line +done < $output_wavfiles.$n +EOF +done + +chmod a+x $expdir/log/wpe.*.sh +$cmd JOB=1:$nj $expdir/log/wpe.JOB.log \ + $expdir/log/wpe.JOB.sh + +echo "`basename $0` Done." diff --git a/egs/chime6/s5_track1/local/score.sh b/egs/chime6/s5_track1/local/score.sh new file mode 120000 index 00000000000..6a200b42ed3 --- /dev/null +++ b/egs/chime6/s5_track1/local/score.sh @@ -0,0 +1 @@ +../steps/scoring/score_kaldi_wer.sh \ No newline at end of file diff --git a/egs/chime6/s5_track1/local/score_for_submit.sh b/egs/chime6/s5_track1/local/score_for_submit.sh new file mode 100755 index 00000000000..e6376074c6e --- /dev/null +++ b/egs/chime6/s5_track1/local/score_for_submit.sh @@ -0,0 +1,123 @@ +#!/bin/bash +# Copyright 2012-2014 Johns Hopkins University (Author: Daniel Povey, Yenda Trmal) +# Apache 2.0 +# +# This script provides official CHiME-5 challenge submission scores per room and session. +# It first calculates the best search parameter configurations by using the dev set +# and also create the transcriptions for dev and eval sets to be submitted. +# The default setup does not calculate scores of the evaluation set since +# the evaluation transcription is not distributed (July 9 2018) + +cmd=run.pl +dev=exp/chain_train_worn_u100k_cleaned/tdnn1a_sp/decode_dev_beamformit_ref +eval=exp/chain_train_worn_u100k_cleaned/tdnn1a_sp/decode_eval_beamformit_ref +do_eval=true + +echo "$0 $@" # Print the command line for logging +[ -f ./path.sh ] && . ./path.sh +. parse_options.sh || exit 1; + +if [ $# -ne 0 ]; then + echo "Usage: $0 [--cmd (run.pl|queue.pl...)]" + echo "This script provides official CHiME-5 challenge submission scores" + echo " Options:" + echo " --cmd (run.pl|queue.pl...) # specify how to run the sub-processes." + echo " --dev # dev set decoding directory" + echo " --eval # eval set decoding directory" + exit 1; +fi + +# get language model weight and word insertion penalty from the dev set +best_lmwt=`cat $dev/scoring_kaldi/wer_details/lmwt` +best_wip=`cat $dev/scoring_kaldi/wer_details/wip` + +echo "best LM weight: $best_lmwt" +echo "insertion penalty weight: $best_wip" + +echo "==== development set ====" +# development set +# get the scoring result per utterance +local/replace_uttid.py $dev/uttid_location $dev/scoring_kaldi/wer_details/per_utt > $dev/scoring_kaldi/wer_details/per_utt_loc +score_result=$dev/scoring_kaldi/wer_details/per_utt_loc + +for session in S02 S09; do + for room in dining kitchen living; do + # get nerror + nerr=`grep "\#csid" $score_result | grep $room | grep $session | awk '{sum+=$4+$5+$6} END {print sum}'` + # get nwords from references (NF-2 means to exclude utterance id and " ref ") + nwrd=`grep "\#csid" $score_result | grep $room | grep $session | awk '{sum+=$3+$4+$6} END {print sum}'` + # compute wer with scale=2 + wer=`echo "scale=2; 100 * $nerr / $nwrd" | bc` + + # report the results + echo -n "session $session " + echo -n "room $room: " + echo -n "#words $nwrd, " + echo -n "#errors $nerr, " + echo "wer $wer %" + done +done +echo -n "overall: " +# get nerror +nerr=`grep "\#csid" $score_result | awk '{sum+=$4+$5+$6} END {print sum}'` +# get nwords from references (NF-2 means to exclude utterance id and " ref ") +nwrd=`grep "\#csid" $score_result | awk '{sum+=$3+$4+$6} END {print sum}'` +# compute wer with scale=2 +wer=`echo "scale=2; 100 * $nerr / $nwrd" | bc` +echo -n "#words $nwrd, " +echo -n "#errors $nerr, " +echo "wer $wer %" + +echo "==== evaluation set ====" +# evaluation set +# get the scoring result per utterance. Copied from local/score.sh +mkdir -p $eval/scoring_kaldi/wer_details_devbest +$cmd $eval/scoring_kaldi/log/stats1.log \ + cat $eval/scoring_kaldi/penalty_$best_wip/$best_lmwt.txt \| \ + align-text --special-symbol="'***'" ark:$eval/scoring_kaldi/test_filt.txt ark:- ark,t:- \| \ + utils/scoring/wer_per_utt_details.pl --special-symbol "'***'" \> $eval/scoring_kaldi/wer_details_devbest/per_utt + +local/replace_uttid.py $eval/uttid_location $eval/scoring_kaldi/wer_details_devbest/per_utt > $eval/scoring_kaldi/wer_details_devbest/per_utt_loc +score_result=$eval/scoring_kaldi/wer_details_devbest/per_utt_loc +for session in S01 S21; do + for room in dining kitchen living; do + if $do_eval; then + # get nerror + nerr=`grep "\#csid" $score_result | grep $room | grep $session | awk '{sum+=$4+$5+$6} END {print sum}'` + # get nwords from references (NF-2 means to exclude utterance id and " ref ") + nwrd=`grep "\#csid" $score_result | grep $room | grep $session | awk '{sum+=$3+$4+$6} END {print sum}'` + # compute wer with scale=2 + wer=`echo "scale=2; 100 * $nerr / $nwrd" | bc` + + # report the results + echo -n "session $session " + echo -n "room $room: " + echo -n "#words $nwrd, " + echo -n "#errors $nerr, " + echo "wer $wer %" + fi + done +done +if $do_eval; then + # get nerror + nerr=`grep "\#csid" $score_result | awk '{sum+=$4+$5+$6} END {print sum}'` + # get nwords from references (NF-2 means to exclude utterance id and " ref ") + nwrd=`grep "\#csid" $score_result | awk '{sum+=$3+$4+$6} END {print sum}'` + # compute wer with scale=2 + wer=`echo "scale=2; 100 * $nerr / $nwrd" | bc` + echo -n "overall: " + echo -n "#words $nwrd, " + echo -n "#errors $nerr, " + echo "wer $wer %" +else + echo "skip evaluation scoring" + echo "" + echo "==== when you submit your result to the CHiME-5 challenge ====" + echo "Please rename your recognition results of " + echo "$dev/scoring_kaldi/penalty_$best_wip/$best_lmwt.txt" + echo "$eval/scoring_kaldi/penalty_$best_wip/$best_lmwt.txt" + echo "with {dev,eval}__.txt, e.g., dev_watanabe_jhu.txt and eval_watanabe_jhu.txt, " + echo "and submit both of them as your final challenge result" + echo "==================================================================" +fi + diff --git a/egs/chime6/s5_track1/local/train_lms_srilm.sh b/egs/chime6/s5_track1/local/train_lms_srilm.sh new file mode 100755 index 00000000000..5a1d56d24b3 --- /dev/null +++ b/egs/chime6/s5_track1/local/train_lms_srilm.sh @@ -0,0 +1,261 @@ +#!/bin/bash +# Copyright (c) 2017 Johns Hopkins University (Author: Yenda Trmal, Shinji Watanabe) +# Apache 2.0 + +export LC_ALL=C + +# Begin configuration section. +words_file= +train_text= +dev_text= +oov_symbol="" +# End configuration section + +echo "$0 $@" + +[ -f path.sh ] && . ./path.sh +. ./utils/parse_options.sh || exit 1 + +echo "-------------------------------------" +echo "Building an SRILM language model " +echo "-------------------------------------" + +if [ $# -ne 2 ] ; then + echo "Incorrect number of parameters. " + echo "Script has to be called like this:" + echo " $0 [switches] " + echo "For example: " + echo " $0 data data/srilm" + echo "The allowed switches are: " + echo " words_file= word list file -- data/lang/words.txt by default" + echo " train_text= data/train/text is used in case when not specified" + echo " dev_text= last 10 % of the train text is used by default" + echo " oov_symbol=> symbol to use for oov modeling -- by default" + exit 1 +fi + +datadir=$1 +tgtdir=$2 + +##End of configuration +loc=`which ngram-count`; +if [ -z $loc ]; then + echo >&2 "You appear to not have SRILM tools installed, either on your path," + echo >&2 "Use the script \$KALDI_ROOT/tools/install_srilm.sh to install it." + exit 1 +fi + +# Prepare the destination directory +mkdir -p $tgtdir + +for f in $words_file $train_text $dev_text; do + [ ! -s $f ] && echo "No such file $f" && exit 1; +done + +[ -z $words_file ] && words_file=$datadir/lang/words.txt +if [ ! -z "$train_text" ] && [ -z "$dev_text" ] ; then + nr=`cat $train_text | wc -l` + nr_dev=$(($nr / 10 )) + nr_train=$(( $nr - $nr_dev )) + orig_train_text=$train_text + head -n $nr_train $train_text > $tgtdir/train_text + tail -n $nr_dev $train_text > $tgtdir/dev_text + + train_text=$tgtdir/train_text + dev_text=$tgtdir/dev_text + echo "Using words file: $words_file" + echo "Using train text: 9/10 of $orig_train_text" + echo "Using dev text : 1/10 of $orig_train_text" +elif [ ! -z "$train_text" ] && [ ! -z "$dev_text" ] ; then + echo "Using words file: $words_file" + echo "Using train text: $train_text" + echo "Using dev text : $dev_text" + train_text=$train_text + dev_text=$dev_text +else + train_text=$datadir/train/text + dev_text=$datadir/dev2h/text + echo "Using words file: $words_file" + echo "Using train text: $train_text" + echo "Using dev text : $dev_text" + +fi + +[ ! -f $words_file ] && echo >&2 "File $words_file must exist!" && exit 1 +[ ! -f $train_text ] && echo >&2 "File $train_text must exist!" && exit 1 +[ ! -f $dev_text ] && echo >&2 "File $dev_text must exist!" && exit 1 + + +# Extract the word list from the training dictionary; exclude special symbols +sort $words_file | awk '{print $1}' | grep -v '\#0' | grep -v '' | grep -v -F "$oov_symbol" > $tgtdir/vocab +if (($?)); then + echo "Failed to create vocab from $words_file" + exit 1 +else + # wc vocab # doesn't work due to some encoding issues + echo vocab contains `cat $tgtdir/vocab | perl -ne 'BEGIN{$l=$w=0;}{split; $w+=$#_; $w++; $l++;}END{print "$l lines, $w words\n";}'` +fi + +# Kaldi transcript files contain Utterance_ID as the first word; remove it +# We also have to avoid skewing the LM by incorporating the same sentences +# from different channels +sed -e "s/\.CH.//" -e "s/_.\-./_/" -e "s/NOLOCATION\(\.[LR]\)*-//" -e "s/U[0-9][0-9]_//" $train_text | sort -u | \ + perl -ane 'print join(" ", @F[1..$#F]) . "\n" if @F > 1' > $tgtdir/train.txt +if (($?)); then + echo "Failed to create $tgtdir/train.txt from $train_text" + exit 1 +else + echo "Removed first word (uid) from every line of $train_text" + # wc text.train train.txt # doesn't work due to some encoding issues + echo $train_text contains `cat $train_text | perl -ane 'BEGIN{$w=$s=0;}{$w+=@F; $w--; $s++;}END{print "$w words, $s sentences\n";}'` + echo train.txt contains `cat $tgtdir/train.txt | perl -ane 'BEGIN{$w=$s=0;}{$w+=@F; $s++;}END{print "$w words, $s sentences\n";}'` +fi + +# Kaldi transcript files contain Utterance_ID as the first word; remove it +sed -e "s/\.CH.//" -e "s/_.\-./_/" $dev_text | sort -u | \ + perl -ane 'print join(" ", @F[1..$#F]) . "\n" if @F > 1' > $tgtdir/dev.txt +if (($?)); then + echo "Failed to create $tgtdir/dev.txt from $dev_text" + exit 1 +else + echo "Removed first word (uid) from every line of $dev_text" + # wc text.train train.txt # doesn't work due to some encoding issues + echo $dev_text contains `cat $dev_text | perl -ane 'BEGIN{$w=$s=0;}{$w+=@F; $w--; $s++;}END{print "$w words, $s sentences\n";}'` + echo $tgtdir/dev.txt contains `cat $tgtdir/dev.txt | perl -ane 'BEGIN{$w=$s=0;}{$w+=@F; $s++;}END{print "$w words, $s sentences\n";}'` +fi + + +echo "-------------------" +echo "Good-Turing 3grams" +echo "-------------------" +ngram-count -lm $tgtdir/3gram.gt011.gz -gt1min 0 -gt2min 1 -gt3min 1 -order 3 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.gt012.gz -gt1min 0 -gt2min 1 -gt3min 2 -order 3 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.gt022.gz -gt1min 0 -gt2min 2 -gt3min 2 -order 3 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.gt023.gz -gt1min 0 -gt2min 2 -gt3min 3 -order 3 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" + +echo "-------------------" +echo "Kneser-Ney 3grams" +echo "-------------------" +ngram-count -lm $tgtdir/3gram.kn011.gz -kndiscount1 -gt1min 0 \ + -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 1 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn012.gz -kndiscount1 -gt1min 0 \ + -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 2 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn022.gz -kndiscount1 -gt1min 0 \ + -kndiscount2 -gt2min 2 -kndiscount3 -gt3min 2 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn023.gz -kndiscount1 -gt1min 0 \ + -kndiscount2 -gt2min 2 -kndiscount3 -gt3min 3 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn111.gz -kndiscount1 -gt1min 1 \ + -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 1 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn112.gz -kndiscount1 -gt1min 1 \ + -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 2 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn122.gz -kndiscount1 -gt1min 1 \ + -kndiscount2 -gt2min 2 -kndiscount3 -gt3min 2 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/3gram.kn123.gz -kndiscount1 -gt1min 1 \ + -kndiscount2 -gt2min 2 -kndiscount3 -gt3min 3 -order 3 -interpolate \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" + + +echo "-------------------" +echo "Good-Turing 4grams" +echo "-------------------" +ngram-count -lm $tgtdir/4gram.gt0111.gz \ + -gt1min 0 -gt2min 1 -gt3min 1 -gt4min 1 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.gt0112.gz \ + -gt1min 0 -gt2min 1 -gt3min 1 -gt4min 2 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.gt0122.gz \ + -gt1min 0 -gt2min 1 -gt3min 2 -gt4min 2 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.gt0123.gz \ + -gt1min 0 -gt2min 1 -gt3min 2 -gt4min 3 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.gt0113.gz \ + -gt1min 0 -gt2min 1 -gt3min 1 -gt4min 3 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.gt0222.gz \ + -gt1min 0 -gt2min 2 -gt3min 2 -gt4min 2 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.gt0223.gz \ + -gt1min 0 -gt2min 2 -gt3min 2 -gt4min 3 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" + +echo "-------------------" +echo "Kneser-Ney 4grams" +echo "-------------------" +ngram-count -lm $tgtdir/4gram.kn0111.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 1 -kndiscount4 -gt4min 1 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.kn0112.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 1 -kndiscount4 -gt4min 2 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.kn0113.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 1 -kndiscount4 -gt4min 3 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.kn0122.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 2 -kndiscount4 -gt4min 2 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.kn0123.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 1 -kndiscount3 -gt3min 2 -kndiscount4 -gt4min 3 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.kn0222.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 2 -kndiscount3 -gt3min 2 -kndiscount4 -gt4min 2 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" +ngram-count -lm $tgtdir/4gram.kn0223.gz \ + -kndiscount1 -gt1min 0 -kndiscount2 -gt2min 2 -kndiscount3 -gt3min 2 -kndiscount4 -gt4min 3 -order 4 \ + -text $tgtdir/train.txt -vocab $tgtdir/vocab -unk -sort -map-unk "$oov_symbol" + +if [ ! -z ${LIBLBFGS} ]; then + #please note that if the switch -map-unk "$oov_symbol" is used with -maxent-convert-to-arpa, ngram-count will segfault + #instead of that, we simply output the model in the maxent format and convert it using the "ngram" + echo "-------------------" + echo "Maxent 3grams" + echo "-------------------" + sed 's/'${oov_symbol}'//g' $tgtdir/train.txt | \ + ngram-count -lm - -order 3 -text - -vocab $tgtdir/vocab -unk -sort -maxent -maxent-convert-to-arpa|\ + ngram -lm - -order 3 -unk -map-unk "$oov_symbol" -prune-lowprobs -write-lm - |\ + sed 's//'${oov_symbol}'/g' | gzip -c > $tgtdir/3gram.me.gz || exit 1 + + echo "-------------------" + echo "Maxent 4grams" + echo "-------------------" + sed 's/'${oov_symbol}'//g' $tgtdir/train.txt | \ + ngram-count -lm - -order 4 -text - -vocab $tgtdir/vocab -unk -sort -maxent -maxent-convert-to-arpa|\ + ngram -lm - -order 4 -unk -map-unk "$oov_symbol" -prune-lowprobs -write-lm - |\ + sed 's//'${oov_symbol}'/g' | gzip -c > $tgtdir/4gram.me.gz || exit 1 +else + echo >&2 "SRILM is not compiled with the support of MaxEnt models." + echo >&2 "You should use the script in \$KALDI_ROOT/tools/install_srilm.sh" + echo >&2 "which will take care of compiling the SRILM with MaxEnt support" + exit 1; +fi + + +echo "--------------------" +echo "Computing perplexity" +echo "--------------------" +( + for f in $tgtdir/3gram* ; do ( echo $f; ngram -order 3 -lm $f -unk -map-unk "$oov_symbol" -prune-lowprobs -ppl $tgtdir/dev.txt ) | paste -s -d ' ' ; done + for f in $tgtdir/4gram* ; do ( echo $f; ngram -order 4 -lm $f -unk -map-unk "$oov_symbol" -prune-lowprobs -ppl $tgtdir/dev.txt ) | paste -s -d ' ' ; done +) | sort -r -n -k 15,15g | column -t | tee $tgtdir/perplexities.txt + +echo "The perlexity scores report is stored in $tgtdir/perplexities.txt " +echo "" + +for best_ngram in {3,4}gram ; do + outlm=best_${best_ngram}.gz + lmfilename=$(grep "${best_ngram}" $tgtdir/perplexities.txt | head -n 1 | cut -f 1 -d ' ') + echo "$outlm -> $lmfilename" + (cd $tgtdir; rm -f $outlm; ln -sf $(basename $lmfilename) $outlm ) +done diff --git a/egs/chime6/s5_track1/local/wer_output_filter b/egs/chime6/s5_track1/local/wer_output_filter new file mode 100755 index 00000000000..6f4b6400716 --- /dev/null +++ b/egs/chime6/s5_track1/local/wer_output_filter @@ -0,0 +1,25 @@ +#!/bin/bash +# Copyright (c) 2017 Johns Hopkins University (Author: Yenda Trmal ) +# Apache 2.0 + + +## Filter for scoring of the STT results. Convert everything to lowercase +## and add some ad-hoc fixes for the hesitations + +perl -e ' + while() { + @A = split(" ", $_); + $id = shift @A; print "$id "; + foreach $a (@A) { + print lc($a) . " " unless $a =~ /\[.*\]/; + } + print "\n"; + }' | \ +sed -e ' + s/\/hmm/g; + s/\/hmm/g; + s/\/hmm/g; +' + +#| uconv -f utf-8 -t utf-8 -x Latin-ASCII + diff --git a/egs/chime6/s5_track1/local/worn_audio_list b/egs/chime6/s5_track1/local/worn_audio_list new file mode 100644 index 00000000000..fc7a44ad77d --- /dev/null +++ b/egs/chime6/s5_track1/local/worn_audio_list @@ -0,0 +1,64 @@ +/export/corpora4/CHiME5/audio/train/S03_P09.wav +/export/corpora4/CHiME5/audio/train/S03_P10.wav +/export/corpora4/CHiME5/audio/train/S03_P11.wav +/export/corpora4/CHiME5/audio/train/S03_P12.wav +/export/corpora4/CHiME5/audio/train/S04_P09.wav +/export/corpora4/CHiME5/audio/train/S04_P10.wav +/export/corpora4/CHiME5/audio/train/S04_P11.wav +/export/corpora4/CHiME5/audio/train/S04_P12.wav +/export/corpora4/CHiME5/audio/train/S05_P13.wav +/export/corpora4/CHiME5/audio/train/S05_P14.wav +/export/corpora4/CHiME5/audio/train/S05_P15.wav +/export/corpora4/CHiME5/audio/train/S05_P16.wav +/export/corpora4/CHiME5/audio/train/S06_P13.wav +/export/corpora4/CHiME5/audio/train/S06_P14.wav +/export/corpora4/CHiME5/audio/train/S06_P15.wav +/export/corpora4/CHiME5/audio/train/S06_P16.wav +/export/corpora4/CHiME5/audio/train/S07_P17.wav +/export/corpora4/CHiME5/audio/train/S07_P18.wav +/export/corpora4/CHiME5/audio/train/S07_P19.wav +/export/corpora4/CHiME5/audio/train/S07_P20.wav +/export/corpora4/CHiME5/audio/train/S08_P21.wav +/export/corpora4/CHiME5/audio/train/S08_P22.wav +/export/corpora4/CHiME5/audio/train/S08_P23.wav +/export/corpora4/CHiME5/audio/train/S08_P24.wav +/export/corpora4/CHiME5/audio/train/S12_P33.wav +/export/corpora4/CHiME5/audio/train/S12_P34.wav +/export/corpora4/CHiME5/audio/train/S12_P35.wav +/export/corpora4/CHiME5/audio/train/S12_P36.wav +/export/corpora4/CHiME5/audio/train/S13_P33.wav +/export/corpora4/CHiME5/audio/train/S13_P34.wav +/export/corpora4/CHiME5/audio/train/S13_P35.wav +/export/corpora4/CHiME5/audio/train/S13_P36.wav +/export/corpora4/CHiME5/audio/train/S16_P21.wav +/export/corpora4/CHiME5/audio/train/S16_P22.wav +/export/corpora4/CHiME5/audio/train/S16_P23.wav +/export/corpora4/CHiME5/audio/train/S16_P24.wav +/export/corpora4/CHiME5/audio/train/S17_P17.wav +/export/corpora4/CHiME5/audio/train/S17_P18.wav +/export/corpora4/CHiME5/audio/train/S17_P19.wav +/export/corpora4/CHiME5/audio/train/S17_P20.wav +/export/corpora4/CHiME5/audio/train/S18_P41.wav +/export/corpora4/CHiME5/audio/train/S18_P42.wav +/export/corpora4/CHiME5/audio/train/S18_P43.wav +/export/corpora4/CHiME5/audio/train/S18_P44.wav +/export/corpora4/CHiME5/audio/train/S19_P49.wav +/export/corpora4/CHiME5/audio/train/S19_P50.wav +/export/corpora4/CHiME5/audio/train/S19_P51.wav +/export/corpora4/CHiME5/audio/train/S19_P52.wav +/export/corpora4/CHiME5/audio/train/S20_P49.wav +/export/corpora4/CHiME5/audio/train/S20_P50.wav +/export/corpora4/CHiME5/audio/train/S20_P51.wav +/export/corpora4/CHiME5/audio/train/S20_P52.wav +/export/corpora4/CHiME5/audio/train/S22_P41.wav +/export/corpora4/CHiME5/audio/train/S22_P42.wav +/export/corpora4/CHiME5/audio/train/S22_P43.wav +/export/corpora4/CHiME5/audio/train/S22_P44.wav +/export/corpora4/CHiME5/audio/train/S23_P53.wav +/export/corpora4/CHiME5/audio/train/S23_P54.wav +/export/corpora4/CHiME5/audio/train/S23_P55.wav +/export/corpora4/CHiME5/audio/train/S23_P56.wav +/export/corpora4/CHiME5/audio/train/S24_P53.wav +/export/corpora4/CHiME5/audio/train/S24_P54.wav +/export/corpora4/CHiME5/audio/train/S24_P55.wav +/export/corpora4/CHiME5/audio/train/S24_P56.wav diff --git a/egs/chime6/s5_track1/path.sh b/egs/chime6/s5_track1/path.sh new file mode 100644 index 00000000000..fb1c0489386 --- /dev/null +++ b/egs/chime6/s5_track1/path.sh @@ -0,0 +1,7 @@ +export KALDI_ROOT=`pwd`/../../.. +[ -f $KALDI_ROOT/tools/env.sh ] && . $KALDI_ROOT/tools/env.sh +export PATH=$PWD/utils/:$KALDI_ROOT/tools/openfst/bin:$PWD:$PATH +[ ! -f $KALDI_ROOT/tools/config/common_path.sh ] && echo >&2 "The standard file $KALDI_ROOT/tools/config/common_path.sh is not present -> Exit!" && exit 1 +. $KALDI_ROOT/tools/config/common_path.sh +export LC_ALL=C + diff --git a/egs/chime6/s5_track1/run.sh b/egs/chime6/s5_track1/run.sh new file mode 100755 index 00000000000..b25e7a2101f --- /dev/null +++ b/egs/chime6/s5_track1/run.sh @@ -0,0 +1,329 @@ +#!/bin/bash +# +# Based mostly on the TED-LIUM and Switchboard recipe +# +# Copyright 2017 Johns Hopkins University (Author: Shinji Watanabe and Yenda Trmal) +# Apache 2.0 +# + +# Begin configuration section. +nj=96 +decode_nj=20 +stage=0 +nnet_stage=-10 +num_data_reps=4 +snrs="20:10:15:5:0" +foreground_snrs="20:10:15:5:0" +background_snrs="20:10:15:5:0" +use_multiarray=false +enhancement=gss # for a new enhancement method, + # change this variable and stage 4 +# End configuration section +. ./utils/parse_options.sh + +. ./cmd.sh +. ./path.sh + + +set -e # exit on error + +# chime5 main directory path +# please change the path accordingly +chime5_corpus=/export/corpora4/CHiME5 +json_dir=${chime5_corpus}/transcriptions +audio_dir=${chime5_corpus}/audio + +# training and test data +train_set=train_worn_simu_u400k +test_sets="dev_${enhancement}" #"dev_worn dev_addition_dereverb_ref" +#test_sets="dev_${enhancement}_ref" #"dev_worn dev_addition_dereverb_ref" + +# This script also needs the phonetisaurus g2p, srilm, beamformit +./local/check_tools.sh || exit 1 + +if [ $stage -le 1 ]; then + echo "$0: prepare data..." + # skip u03 as they are missing + for mictype in worn u01 u02 u04 u05 u06; do + local/prepare_data.sh --mictype ${mictype} \ + ${audio_dir}/train ${json_dir}/train data/train_${mictype} + done + for dataset in dev; do + for mictype in worn; do + local/prepare_data.sh --mictype ${mictype} \ + ${audio_dir}/${dataset} ${json_dir}/${dataset} \ + data/${dataset}_${mictype} + done + done +fi + +if [ $stage -le 2 ]; then + echo "$0: train lm ..." + local/prepare_dict.sh + + utils/prepare_lang.sh \ + data/local/dict "" data/local/lang data/lang + + local/train_lms_srilm.sh \ + --train-text data/train_worn/text --dev-text data/dev_worn/text \ + --oov-symbol "" --words-file data/lang/words.txt \ + data/ data/srilm +fi + +LM=data/srilm/best_3gram.gz +if [ $stage -le 3 ]; then + # Compiles G for chime5 trigram LM + echo "$0: prepare lang..." + utils/format_lm.sh \ + data/lang $LM data/local/dict/lexicon.txt data/lang + +fi + +enhanced_dir=enhanced +if $use_multiarray; then + enhanced_dir=${enhanced_dir}_multiarray + enhancement=${enhancement}_multiarray +fi + +enhanced_dir=$(utils/make_absolute.sh $enhanced_dir) || exit 1 + +if [ $stage -le 4 ]; then + echo "$0: enhance data..." + # Guided Source Separation (GSS) from Paderbon Univerisity + # http://spandh.dcs.shef.ac.uk/chime_workshop/papers/CHiME_2018_paper_boeddecker.pdf + # @Article{PB2018CHiME5, + # author = {Boeddeker, Christoph and Heitkaemper, Jens and Schmalenstroeer, Joerg and Drude, Lukas and Heymann, Jahn and Haeb-Umbach, Reinhold}, + # title = {{Front-End Processing for the CHiME-5 Dinner Party Scenario}}, + # year = {2018}, + # booktitle = {CHiME5 Workshop}, + # } + + if [ ! -d pb_chime5/ ]; then + local/install_pb_chime5.sh + fi + + if [ ! -f pb_chime5/cache/chime5.json ]; then + ( + cd pb_chime5 + miniconda_dir=$HOME/miniconda3/ + export PATH=$miniconda_dir/bin:$PATH + export CHIME5_DIR=$chime5_corpus + make cache/chime5.json + ) + fi + + for dset in dev eval; do + local/run_gss.sh \ + --cmd "$train_cmd --max-jobs-run 30" --nj 160 \ + --use-multiarray $use_multiarray \ + ${dset} \ + ${enhanced_dir} \ + ${enhanced_dir} || exit 1 + done + + for dset in dev eval; do + local/prepare_data.sh --mictype gss ${enhanced_dir}/audio/${dset} \ + ${json_dir}/${dset} data/${dset}_${enhancement} || exit 1 + done +fi + +if [ $stage -le 5 ]; then + # remove possibly bad sessions (P11_S03, P52_S19, P53_S24, P54_S24) + # see http://spandh.dcs.shef.ac.uk/chime_challenge/data.html for more details + utils/copy_data_dir.sh data/train_worn data/train_worn_org # back up + grep -v -e "^P11_S03" -e "^P52_S19" -e "^P53_S24" -e "^P54_S24" data/train_worn_org/text > data/train_worn/text + utils/fix_data_dir.sh data/train_worn +fi + +if [ $stage -le 6 ]; then + local/extract_noises.py $chime5_corpus/audio/train $chime5_corpus/transcriptions/train \ + local/distant_audio_list distant_noises + local/make_noise_list.py distant_noises > distant_noise_list + + noise_list=distant_noise_list + + if [ ! -d RIRS_NOISES/ ]; then + # Download the package that includes the real RIRs, simulated RIRs, isotropic noises and point-source noises + wget --no-check-certificate http://www.openslr.org/resources/28/rirs_noises.zip + unzip rirs_noises.zip + fi + + # This is the config for the system using simulated RIRs and point-source noises + rvb_opts+=(--rir-set-parameters "0.5, RIRS_NOISES/simulated_rirs/smallroom/rir_list") + rvb_opts+=(--rir-set-parameters "0.5, RIRS_NOISES/simulated_rirs/mediumroom/rir_list") + rvb_opts+=(--noise-set-parameters $noise_list) + + steps/data/reverberate_data_dir.py \ + "${rvb_opts[@]}" \ + --prefix "rev" \ + --foreground-snrs $foreground_snrs \ + --background-snrs $background_snrs \ + --speech-rvb-probability 1 \ + --pointsource-noise-addition-probability 1 \ + --isotropic-noise-addition-probability 1 \ + --num-replications $num_data_reps \ + --max-noises-per-minute 1 \ + --source-sampling-rate 16000 \ + data/train_worn data/train_worn_rvb +fi + +if [ $stage -le 7 ]; then + # combine mix array and worn mics + # randomly extract first 100k utterances from all mics + # if you want to include more training data, you can increase the number of array mic utterances + utils/combine_data.sh data/train_uall data/train_u01 data/train_u02 data/train_u04 data/train_u05 data/train_u06 + utils/subset_data_dir.sh data/train_uall 400000 data/train_u400k + utils/combine_data.sh data/${train_set} data/train_worn data/train_worn_rvb data/train_u400k + + # only use left channel for worn mic recognition + # you can use both left and right channels for training + for dset in train dev; do + utils/copy_data_dir.sh data/${dset}_worn data/${dset}_worn_stereo + grep "\.L-" data/${dset}_worn_stereo/text > data/${dset}_worn/text + utils/fix_data_dir.sh data/${dset}_worn + done +fi + +if [ $stage -le 8 ]; then + # fix speaker ID issue (thanks to Dr. Naoyuki Kanda) + # add array ID to the speaker ID to avoid the use of other array information to meet regulations + # Before this fix + # $ head -n 2 data/eval_beamformit_ref_nosplit/utt2spk + # P01_S01_U02_KITCHEN.ENH-0000192-0001278 P01 + # P01_S01_U02_KITCHEN.ENH-0001421-0001481 P01 + # After this fix + # $ head -n 2 data/eval_beamformit_ref_nosplit_fix/utt2spk + # P01_S01_U02_KITCHEN.ENH-0000192-0001278 P01_U02 + # P01_S01_U02_KITCHEN.ENH-0001421-0001481 P01_U02 + for dset in ${test_sets}; do + utils/copy_data_dir.sh data/${dset} data/${dset}_nosplit + mkdir -p data/${dset}_nosplit_fix + for f in segments text wav.scp; do + if [ -f data/${dset}_nosplit/$f ]; then + cp data/${dset}_nosplit/$f data/${dset}_nosplit_fix + fi + done + awk -F "_" '{print $0 "_" $3}' data/${dset}_nosplit/utt2spk > data/${dset}_nosplit_fix/utt2spk + utils/utt2spk_to_spk2utt.pl data/${dset}_nosplit_fix/utt2spk > data/${dset}_nosplit_fix/spk2utt + done + + # Split speakers up into 3-minute chunks. This doesn't hurt adaptation, and + # lets us use more jobs for decoding etc. + for dset in ${train_set} dev_worn; do + utils/copy_data_dir.sh data/${dset} data/${dset}_nosplit + utils/data/modify_speaker_info.sh --seconds-per-spk-max 180 data/${dset}_nosplit data/${dset} + done + for dset in ${test_sets}; do + utils/data/modify_speaker_info.sh --seconds-per-spk-max 180 data/${dset}_nosplit_fix data/${dset} + done +fi + +if [ $stage -le 9 ]; then + # Now make MFCC features. + # mfccdir should be some place with a largish disk where you + # want to store MFCC features. + echo "$0: make features..." + mfccdir=mfcc + for x in ${train_set} ${test_sets}; do + steps/make_mfcc.sh --nj 20 --cmd "$train_cmd" \ + data/$x exp/make_mfcc/$x $mfccdir + steps/compute_cmvn_stats.sh data/$x exp/make_mfcc/$x $mfccdir + utils/fix_data_dir.sh data/$x + done +fi + +if [ $stage -le 10 ]; then + # make a subset for monophone training + utils/subset_data_dir.sh --shortest data/${train_set} 100000 data/${train_set}_100kshort + utils/subset_data_dir.sh data/${train_set}_100kshort 30000 data/${train_set}_30kshort +fi + +if [ $stage -le 11 ]; then + # Starting basic training on MFCC features + steps/train_mono.sh --nj $nj --cmd "$train_cmd" \ + data/${train_set}_30kshort data/lang exp/mono +fi + +if [ $stage -le 12 ]; then + steps/align_si.sh --nj $nj --cmd "$train_cmd" \ + data/${train_set} data/lang exp/mono exp/mono_ali + + steps/train_deltas.sh --cmd "$train_cmd" \ + 2500 30000 data/${train_set} data/lang exp/mono_ali exp/tri1 +fi + +if [ $stage -le 13 ]; then + steps/align_si.sh --nj $nj --cmd "$train_cmd" \ + data/${train_set} data/lang exp/tri1 exp/tri1_ali + + steps/train_lda_mllt.sh --cmd "$train_cmd" \ + 4000 50000 data/${train_set} data/lang exp/tri1_ali exp/tri2 +fi + +if [ $stage -le 14 ]; then + utils/mkgraph.sh data/lang exp/tri2 exp/tri2/graph + for dset in ${test_sets}; do + steps/decode.sh --nj $decode_nj --cmd "$decode_cmd" --num-threads 4 \ + exp/tri2/graph data/${dset} exp/tri2/decode_${dset} & + done + wait +fi + +if [ $stage -le 15 ]; then + steps/align_si.sh --nj $nj --cmd "$train_cmd" \ + data/${train_set} data/lang exp/tri2 exp/tri2_ali + + steps/train_sat.sh --cmd "$train_cmd" \ + 5000 100000 data/${train_set} data/lang exp/tri2_ali exp/tri3 +fi + +if [ $stage -le 16 ]; then + utils/mkgraph.sh data/lang exp/tri3 exp/tri3/graph + for dset in ${test_sets}; do + steps/decode_fmllr.sh --nj $decode_nj --cmd "$decode_cmd" --num-threads 4 \ + exp/tri3/graph data/${dset} exp/tri3/decode_${dset} & + done + wait +fi + +if [ $stage -le 17 ]; then + # The following script cleans the data and produces cleaned data + steps/cleanup/clean_and_segment_data.sh --nj ${nj} --cmd "$train_cmd" \ + --segmentation-opts "--min-segment-length 0.3 --min-new-segment-length 0.6" \ + data/${train_set} data/lang exp/tri3 exp/tri3_cleaned data/${train_set}_cleaned +fi + +if [ $stage -le 18 ]; then + # chain TDNN + local/chain/tuning/run_tdnn_1b.sh --nj ${nj} \ + --stage $nnet_stage \ + --train-set ${train_set}_cleaned \ + --test-sets "$test_sets" \ + --gmm tri3_cleaned --nnet3-affix _${train_set}_cleaned_rvb +fi + +if [ $stage -le 19 ]; then + # 2-stage decoding + for test_set in $test_sets; do + local/nnet3/decode.sh --affix 2stage --pass2-decode-opts "--min-active 1000" \ + --acwt 1.0 --post-decode-acwt 10.0 \ + --frames-per-chunk 150 --nj $decode_nj \ + --ivector-dir exp/nnet3_${train_set}_cleaned_rvb \ + data/${test_set} data/lang_chain \ + exp/chain_${train_set}_cleaned_rvb/tree_sp/graph \ + exp/chain_${train_set}_cleaned_rvb/tdnn1b_sp + done +fi + +if [ $stage -le 20 ]; then + # final scoring to get the official challenge result + # please specify both dev and eval set directories so that the search parameters + # (insertion penalty and language model weight) will be tuned using the dev set + + local/get_location.py $json_dir/dev > exp/chain_${train_set}_cleaned_rvb/tdnn1b_sp/decode_dev_${enhancement}_2stage/uttid_location + local/get_location.py $json_dir/eval > exp/chain_${train_set}_cleaned_rvb/tdnn1b_sp/decode_eval_${enhancement}_2stage/uttid_location + + local/score_for_submit.sh \ + --dev exp/chain_${train_set}_cleaned_rvb/tdnn1b_sp/decode_dev_${enhancement}_2stage \ + --eval exp/chain_${train_set}_cleaned_rvb/tdnn1b_sp/decode_eval_${enhancement}_2stage +fi diff --git a/egs/chime6/s5_track1/steps b/egs/chime6/s5_track1/steps new file mode 120000 index 00000000000..1b186770dd1 --- /dev/null +++ b/egs/chime6/s5_track1/steps @@ -0,0 +1 @@ +../../wsj/s5/steps/ \ No newline at end of file diff --git a/egs/chime6/s5_track1/utils b/egs/chime6/s5_track1/utils new file mode 120000 index 00000000000..a3279dc8679 --- /dev/null +++ b/egs/chime6/s5_track1/utils @@ -0,0 +1 @@ +../../wsj/s5/utils/ \ No newline at end of file From 30f3556851921eb2d8e955d09a319a7998b229fb Mon Sep 17 00:00:00 2001 From: aarora8 Date: Mon, 18 Nov 2019 12:11:37 -0500 Subject: [PATCH 2/2] modification from review: adding comments, removing multicondition scripts, worn_audio_list --- .../local/chain/multi_condition/run_tdnn.sh | 1 - .../chain/multi_condition/run_tdnn_lstm.sh | 1 - .../tuning/run_cnn_tdnn_lstm_1a.sh | 329 ------------------ .../multi_condition/tuning/run_tdnn_1a.sh | 286 --------------- .../multi_condition/tuning/run_tdnn_1b.sh | 311 ----------------- .../tuning/run_tdnn_lstm_1a.sh | 323 ----------------- egs/chime6/s5_track1/local/chain/run_tdnn.sh | 2 +- .../chain/tuning/run_cnn_tdnn_lstm_1a.sh | 304 ---------------- .../local/chain/tuning/run_tdnn_lstm_1a.sh | 297 ---------------- .../multi_condition/run_ivector_common.sh | 194 ----------- egs/chime6/s5_track1/local/worn_audio_list | 64 ---- egs/chime6/s5_track1/run.sh | 37 ++ 12 files changed, 38 insertions(+), 2111 deletions(-) delete mode 120000 egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn.sh delete mode 120000 egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn_lstm.sh delete mode 100755 egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_cnn_tdnn_lstm_1a.sh delete mode 100755 egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_tdnn_1a.sh delete mode 100755 egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_tdnn_1b.sh delete mode 100755 egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_tdnn_lstm_1a.sh delete mode 100755 egs/chime6/s5_track1/local/chain/tuning/run_cnn_tdnn_lstm_1a.sh delete mode 100755 egs/chime6/s5_track1/local/chain/tuning/run_tdnn_lstm_1a.sh delete mode 100755 egs/chime6/s5_track1/local/nnet3/multi_condition/run_ivector_common.sh delete mode 100644 egs/chime6/s5_track1/local/worn_audio_list diff --git a/egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn.sh b/egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn.sh deleted file mode 120000 index 61f8f499182..00000000000 --- a/egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn.sh +++ /dev/null @@ -1 +0,0 @@ -tuning/run_tdnn_1b.sh \ No newline at end of file diff --git a/egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn_lstm.sh b/egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn_lstm.sh deleted file mode 120000 index 8e647598556..00000000000 --- a/egs/chime6/s5_track1/local/chain/multi_condition/run_tdnn_lstm.sh +++ /dev/null @@ -1 +0,0 @@ -tuning/run_tdnn_lstm_1a.sh \ No newline at end of file diff --git a/egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_cnn_tdnn_lstm_1a.sh b/egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_cnn_tdnn_lstm_1a.sh deleted file mode 100755 index 65d7caed24c..00000000000 --- a/egs/chime6/s5_track1/local/chain/multi_condition/tuning/run_cnn_tdnn_lstm_1a.sh +++ /dev/null @@ -1,329 +0,0 @@ -#!/bin/bash - -# Set -e here so that we catch if any executable fails immediately -set -euo pipefail - -# First the options that are passed through to run_ivector_common.sh -# (some of which are also used in this script directly). -stage=0 -nj=96 -decode_nj=40 -train_set_clean=train_worn_cleaned -train_set_noisy=train_u400k_cleaned -combined_train_set=train_worn_u400k_cleaned -test_sets="dev_worn" -gmm=tri3_cleaned -nnet3_affix=_train_worn_u400k_cleaned_rvb -lm_suffix= -noise_list= - -# The rest are configs specific to this script. Most of the parameters -# are just hardcoded at this level, in the commands below. -affix=_rvb_1a # affix for the TDNN directory name -tree_affix= -train_stage=-10 -get_egs_stage=-10 -decode_iter= - -common_egs_dir= - -hidden_dim=1024 -cell_dim=1024 -projection_dim=256 - -# training options -num_epochs=2 # 2 works better than 4 -chunk_width=140,100,160 -chunk_left_context=40 -chunk_right_context=0 -dropout_schedule='0,0@0.20,0.3@0.50,0' -xent_regularize=0.025 -label_delay=5 - -# decode options -extra_left_context=50 -extra_right_context=0 - -# training options -srand=0 -remove_egs=true - -#decode options -test_online_decoding=false # if true, it will run the last decoding stage. - - -# End configuration section. -echo "$0 $@" # Print the command line for logging - -. ./cmd.sh -. ./path.sh -. ./utils/parse_options.sh - -if ! cuda-compiled; then - cat <$lang/topo - fi -fi - -lat_dir_clean=exp/chain${nnet3_affix}/${gmm}_${train_set_clean}_sp_lats -lat_dir_noisy=exp/chain${nnet3_affix}/${gmm}_${train_set_noisy}_sp_lats -lat_dir=exp/chain${nnet3_affix}/${gmm}_${combined_train_set}_sp_rvb_lats - -if [ $stage -le 11 ]; then - steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ - data/${train_set_noisy}_sp data/lang $gmm_dir $lat_dir_noisy || exit 1 - rm $lat_dir_noisy/fsts.*.gz -fi - -if [ $stage -le 12 ]; then - # Get the alignments as lattices (gives the chain training more freedom). - # use the same num-jobs as the alignments - steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ - data/${train_set_clean}_sp data/lang $gmm_dir $lat_dir_clean || exit 1 - rm $lat_dir_clean/fsts.*.gz -fi - -if [ $stage -le 13 ]; then - local/reverberate_lat_dir.sh --cmd "$train_cmd" --num-data-reps 2 \ - data/${combined_train_set}_sp_rvb_hires $lat_dir_noisy \ - $lat_dir_clean $lat_dir || exit 1 -fi - -if [ $stage -le 14 ]; then - # Build a tree using our new topology. We know we have alignments for the - # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use - # those. The num-leaves is always somewhat less than the num-leaves from - # the GMM baseline. - if [ -f $tree_dir/final.mdl ]; then - echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." - exit 1; - fi - steps/nnet3/chain/build_tree.sh \ - --frame-subsampling-factor 3 \ - --context-opts "--context-width=2 --central-position=1" \ - --cmd "$train_cmd" 3500 data/${train_set_clean}_sp \ - $lang $lat_dir_clean $tree_dir -fi - -if [ $stage -le 15 ]; then - mkdir -p $dir - echo "$0: creating neural net configs using the xconfig parser"; - - num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') - learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) - - lstm_opts="decay-time=40" - - mkdir -p $dir/configs - cat < $dir/configs/network.xconfig - input dim=100 name=ivector - input dim=40 name=input - - # please note that it is important to have input layer with the name=input - # as the layer immediately preceding the fixed-affine-layer to enable - # the use of short notation for the descriptor - fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat - idct-layer name=idct input=input dim=40 cepstral-lifter=22 affine-transform-file=$dir/configs/idct.mat - - conv-relu-batchnorm-layer name=cnn1 input=idct height-in=40 height-out=20 height-subsample-out=2 time-offsets=-1,0,1 height-offsets=-1,0,1 num-filters-out=256 learning-rate-factor=0.333 max-change=0.25 - conv-relu-batchnorm-layer name=cnn2 input=cnn1 height-in=20 height-out=20 time-offsets=-1,0,1 height-offsets=-1,0,1 num-filters-out=128 - - relu-batchnorm-layer name=affine1 input=lda dim=512 - - # the first splicing is moved before the lda layer, so no splicing here - relu-batchnorm-layer name=tdnn1 input=cnn2 dim=1024 - relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1,affine1) dim=1024 - relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1) dim=1024 - - # check steps/libs/nnet3/xconfig/lstm.py for the other options and defaults - fast-lstmp-layer name=lstm1 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts - relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=1024 - relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=1024 - relu-batchnorm-layer name=tdnn6 input=Append(-3,0,3) dim=1024 - fast-lstmp-layer name=lstm2 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts - relu-batchnorm-layer name=tdnn7 input=Append(-3,0,3) dim=1024 - relu-batchnorm-layer name=tdnn8 input=Append(-3,0,3) dim=1024 - relu-batchnorm-layer name=tdnn9 input=Append(-3,0,3) dim=1024 - fast-lstmp-layer name=lstm3 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts - - ## adding the layers for chain branch - output-layer name=output input=lstm3 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5 - - # adding the layers for xent branch - # This block prints the configs for a separate output that will be - # trained with a cross-entropy objective in the 'chain' models... this - # has the effect of regularizing the hidden parts of the model. we use - # 0.5 / args.xent_regularize as the learning rate factor- the factor of - # 0.5 / args.xent_regularize is suitable as it means the xent - # final-layer learns at a rate independent of the regularization - # constant; and the 0.5 was tuned so as to make the relative progress - # similar in the xent and regular final layers. - output-layer name=output-xent input=lstm3 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 - -EOF - steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ -fi - -if [ $stage -le 16 ]; then - if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then - utils/create_split_dir.pl \ - /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage - fi - - mkdir -p $dir/egs - touch $dir/egs/.nodelete # keep egs around when that run dies. - - steps/nnet3/chain/train.py --stage=$train_stage \ - --cmd="$train_cmd --mem 4G" \ - --feat.online-ivector-dir=$train_ivector_dir \ - --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ - --chain.xent-regularize $xent_regularize \ - --chain.leaky-hmm-coefficient=0.1 \ - --chain.l2-regularize=0.00005 \ - --chain.apply-deriv-weights=false \ - --chain.lm-opts="--num-extra-lm-states=2000" \ - --trainer.dropout-schedule $dropout_schedule \ - --trainer.num-chunk-per-minibatch 32 \ - --trainer.frames-per-iter 1500000 \ - --trainer.max-param-change 2.0 \ - --trainer.num-epochs $num_epochs \ - --trainer.srand=$srand \ - --trainer.optimization.shrink-value 0.99 \ - --trainer.optimization.num-jobs-initial=3 \ - --trainer.optimization.num-jobs-final=16 \ - --trainer.optimization.initial-effective-lrate=0.001 \ - --trainer.optimization.final-effective-lrate=0.0001 \ - --trainer.optimization.momentum=0.0 \ - --trainer.deriv-truncate-margin 8 \ - --egs.stage $get_egs_stage \ - --egs.opts="--frames-overlap-per-eg 0" \ - --egs.chunk-width=$chunk_width \ - --egs.chunk-left-context=$chunk_left_context \ - --egs.chunk-right-context=$chunk_right_context \ - --egs.chunk-left-context-initial=0 \ - --egs.chunk-right-context-final=0 \ - --egs.dir="$common_egs_dir" \ - --cleanup.remove-egs=$remove_egs \ - --feat-dir=$train_data_dir \ - --tree-dir=$tree_dir \ - --lat-dir=$lat_dir \ - --dir=$dir || exit 1; -fi - -if [ $stage -le 17 ]; then - # Note: it's not important to give mkgraph.sh the lang directory with the - # matched topology (since it gets the topology file from the model). - utils/mkgraph.sh \ - --self-loop-scale 1.0 data/lang${lm_suffix}/ \ - $tree_dir $tree_dir/graph${lm_suffix} || exit 1; -fi - -if [ -z "$extra_left_context" ]; then - extra_left_context=$chunk_left_context -fi -if [ -z "$extra_right_context" ]; then - extra_right_context=$chunk_right_context -fi - -if [ $stage -le 18 ]; then - frames_per_chunk=$(echo $chunk_width | cut -d, -f1) - rm $dir/.error 2>/dev/null || true - - for data in $test_sets; do - ( - steps/nnet3/decode.sh \ - --acwt 1.0 --post-decode-acwt 10.0 \ - --extra-left-context $extra_left_context \ - --extra-right-context $extra_right_context \ - --extra-left-context-initial 0 \ - --extra-right-context-final 0 \ - --frames-per-chunk $frames_per_chunk \ - --nj $decode_nj --cmd "$decode_cmd" --num-threads 4 \ - --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ - $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 - ) || touch $dir/.error & - done - wait - [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 -fi - -# Not testing the 'looped' decoding separately, because for -# TDNN systems it would give exactly the same results as the -# normal decoding. - -if $test_online_decoding && [ $stage -le 19 ]; then - # note: if the features change (e.g. you add pitch features), you will have to - # change the options of the following command line. - steps/online/nnet3/prepare_online_decoding.sh \ - --mfcc-config conf/mfcc_hires.conf \ - $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online - - rm $dir/.error 2>/dev/null || true - - for data in $test_sets; do - ( - nspk=$(wc -l $lang/topo - fi -fi - -lat_dir_clean=exp/chain${nnet3_affix}/${gmm}_${train_set_clean}_sp_lats -lat_dir_noisy=exp/chain${nnet3_affix}/${gmm}_${train_set_noisy}_sp_lats -lat_dir=exp/chain${nnet3_affix}/${gmm}_${combined_train_set}_sp_rvb_lats - -if [ $stage -le 11 ]; then - steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ - data/${train_set_noisy}_sp data/lang $gmm_dir $lat_dir_noisy || exit 1 - rm $lat_dir_noisy/fsts.*.gz -fi - -if [ $stage -le 12 ]; then - # Get the alignments as lattices (gives the chain training more freedom). - # use the same num-jobs as the alignments - steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ - data/${train_set_clean}_sp data/lang $gmm_dir $lat_dir_clean || exit 1 - rm $lat_dir_clean/fsts.*.gz -fi - -if [ $stage -le 13 ]; then - local/reverberate_lat_dir.sh --cmd "$train_cmd" --num-data-reps 2 \ - data/${combined_train_set}_sp_rvb_hires $lat_dir_noisy \ - $lat_dir_clean $lat_dir || exit 1 -fi - -if [ $stage -le 14 ]; then - # Build a tree using our new topology. We know we have alignments for the - # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use - # those. The num-leaves is always somewhat less than the num-leaves from - # the GMM baseline. - if [ -f $tree_dir/final.mdl ]; then - echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." - exit 1; - fi - steps/nnet3/chain/build_tree.sh \ - --frame-subsampling-factor 3 \ - --cmd "$train_cmd" 3500 data/${train_set_clean}_sp \ - $lang $lat_dir_clean $tree_dir -fi - -if [ $stage -le 15 ]; then - mkdir -p $dir - echo "$0: creating neural net configs using the xconfig parser"; - - num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') - learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) - opts="l2-regularize=0.05" - output_opts="l2-regularize=0.01 bottleneck-dim=320" - - mkdir -p $dir/configs - cat < $dir/configs/network.xconfig - input dim=100 name=ivector - input dim=40 name=input - - # please note that it is important to have input layer with the name=input - # as the layer immediately preceding the fixed-affine-layer to enable - # the use of short notation for the descriptor - fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat - - # the first splicing is moved before the lda layer, so no splicing here - relu-batchnorm-layer name=tdnn1 $opts dim=$hidden_dim - relu-batchnorm-layer name=tdnn2 $opts dim=$hidden_dim input=Append(-1,0,1) - relu-batchnorm-layer name=tdnn3 $opts dim=$hidden_dim - relu-batchnorm-layer name=tdnn4 $opts dim=$hidden_dim input=Append(-1,0,1) - relu-batchnorm-layer name=tdnn5 $opts dim=$hidden_dim - relu-batchnorm-layer name=tdnn6 $opts dim=$hidden_dim input=Append(-3,0,3) - relu-batchnorm-layer name=tdnn7 $opts dim=$hidden_dim input=Append(-3,0,3) - relu-batchnorm-layer name=tdnn8 $opts dim=$hidden_dim input=Append(-6,-3,0) - - ## adding the layers for chain branch - relu-batchnorm-layer name=prefinal-chain $opts dim=$hidden_dim target-rms=0.5 - output-layer name=output include-log-softmax=false $output_opts dim=$num_targets max-change=1.5 - - # adding the layers for xent branch - # This block prints the configs for a separate output that will be - # trained with a cross-entropy objective in the 'chain' models... this - # has the effect of regularizing the hidden parts of the model. we use - # 0.5 / args.xent_regularize as the learning rate factor- the factor of - # 0.5 / args.xent_regularize is suitable as it means the xent - # final-layer learns at a rate independent of the regularization - # constant; and the 0.5 was tuned so as to make the relative progress - # similar in the xent and regular final layers. - relu-batchnorm-layer name=prefinal-xent input=tdnn8 $opts dim=$hidden_dim target-rms=0.5 - output-layer name=output-xent $output_opts dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 -EOF - steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ -fi - -if [ $stage -le 16 ]; then - if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then - utils/create_split_dir.pl \ - /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage - fi - - mkdir -p $dir/egs - touch $dir/egs/.nodelete # keep egs around when that run dies. - - steps/nnet3/chain/train.py --stage=$train_stage \ - --cmd="$train_cmd --mem 4G" \ - --feat.online-ivector-dir=$train_ivector_dir \ - --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ - --chain.xent-regularize $xent_regularize \ - --chain.leaky-hmm-coefficient=0.1 \ - --chain.l2-regularize=0.00005 \ - --chain.apply-deriv-weights=false \ - --chain.lm-opts="--num-extra-lm-states=2000" \ - --trainer.num-chunk-per-minibatch 256,128,64 \ - --trainer.frames-per-iter 1500000 \ - --trainer.max-param-change 2.0 \ - --trainer.num-epochs $num_epochs \ - --trainer.srand=$srand \ - --trainer.optimization.num-jobs-initial=3 \ - --trainer.optimization.num-jobs-final=16 \ - --trainer.optimization.initial-effective-lrate=0.001 \ - --trainer.optimization.final-effective-lrate=0.0001 \ - --trainer.optimization.momentum=0.0 \ - --egs.stage $get_egs_stage \ - --egs.opts="--frames-overlap-per-eg 0" \ - --egs.chunk-width=$chunk_width \ - --egs.dir="$common_egs_dir" \ - --cleanup.remove-egs=$remove_egs \ - --feat-dir=$train_data_dir \ - --tree-dir=$tree_dir \ - --lat-dir=$lat_dir \ - --dir=$dir || exit 1; -fi - -if [ $stage -le 17 ]; then - # Note: it's not important to give mkgraph.sh the lang directory with the - # matched topology (since it gets the topology file from the model). - utils/mkgraph.sh \ - --self-loop-scale 1.0 data/lang${lm_suffix}/ \ - $tree_dir $tree_dir/graph${lm_suffix} || exit 1; -fi - -if [ $stage -le 18 ]; then - frames_per_chunk=$(echo $chunk_width | cut -d, -f1) - rm $dir/.error 2>/dev/null || true - - for data in $test_sets; do - ( - steps/nnet3/decode.sh \ - --acwt 1.0 --post-decode-acwt 10.0 \ - --frames-per-chunk $frames_per_chunk \ - --nj $decode_nj --cmd "$decode_cmd" --num-threads 4 \ - --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ - $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 - ) || touch $dir/.error & - done - wait - [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 -fi - -# Not testing the 'looped' decoding separately, because for -# TDNN systems it would give exactly the same results as the -# normal decoding. - -if $test_online_decoding && [ $stage -le 19 ]; then - # note: if the features change (e.g. you add pitch features), you will have to - # change the options of the following command line. - steps/online/nnet3/prepare_online_decoding.sh \ - --mfcc-config conf/mfcc_hires.conf \ - $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online - - rm $dir/.error 2>/dev/null || true - - for data in $test_sets; do - ( - nspk=$(wc -l 6034 combine=-0.058->-0.057 (over 8) xent:train/valid[261,393,final]=(-1.20,-0.897,-0.894/-1.20,-0.919,-0.911) logprob:train/valid[261,393,final]=(-0.090,-0.059,-0.058/-0.098,-0.073,-0.073) - -set -e - -# configs for 'chain' -stage=0 -nj=96 -decode_nj=40 -train_set_clean=train_worn_cleaned -train_set_noisy=train_u400k_cleaned -combined_train_set=train_worn_u400k_cleaned -test_sets="dev_worn dev_beamformit_ref" -gmm=tri3_cleaned -nnet3_affix=_train_worn_u400k_cleaned_rvb -lm_suffix= -noise_list= - -# The rest are configs specific to this script. Most of the parameters -# are just hardcoded at this level, in the commands below. -affix=_rvb_1b # affix for the TDNN directory name -tree_affix= -train_stage=-10 -get_egs_stage=-10 -decode_iter= - -num_epochs=4 -common_egs_dir= -# training options -# training chunk-options -chunk_width=140,100,160 -xent_regularize=0.1 -dropout_schedule='0,0@0.20,0.5@0.50,0' - -# training options -srand=0 -remove_egs=true - -#decode options -test_online_decoding=false # if true, it will run the last decoding stage. - -# End configuration section. -echo "$0 $@" # Print the command line for logging - -. ./cmd.sh -. ./path.sh -. ./utils/parse_options.sh - -if ! cuda-compiled; then - cat <$lang/topo - fi -fi - -lat_dir_clean=exp/chain${nnet3_affix}/${gmm}_${train_set_clean}_sp_lats -lat_dir_noisy=exp/chain${nnet3_affix}/${gmm}_${train_set_noisy}_sp_lats -lat_dir=exp/chain${nnet3_affix}/${gmm}_${combined_train_set}_sp_rvb_lats - -if [ $stage -le 11 ]; then - steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ - data/${train_set_noisy}_sp data/lang $gmm_dir $lat_dir_noisy || exit 1 - rm $lat_dir_noisy/fsts.*.gz -fi - -if [ $stage -le 12 ]; then - # Get the alignments as lattices (gives the chain training more freedom). - # use the same num-jobs as the alignments - steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ - data/${train_set_clean}_sp data/lang $gmm_dir $lat_dir_clean || exit 1 - rm $lat_dir_clean/fsts.*.gz -fi - -if [ $stage -le 13 ]; then - local/reverberate_lat_dir.sh --cmd "$train_cmd" --num-data-reps 2 \ - data/${combined_train_set}_sp_rvb_hires $lat_dir_noisy \ - $lat_dir_clean $lat_dir || exit 1 -fi - -if [ $stage -le 14 ]; then - # Build a tree using our new topology. We know we have alignments for the - # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use - # those. The num-leaves is always somewhat less than the num-leaves from - # the GMM baseline. - if [ -f $tree_dir/final.mdl ]; then - echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." - exit 1; - fi - steps/nnet3/chain/build_tree.sh \ - --frame-subsampling-factor 3 \ - --cmd "$train_cmd" 3500 data/${train_set_clean}_sp \ - $lang $lat_dir_clean $tree_dir -fi - -if [ $stage -le 15 ]; then - mkdir -p $dir - echo "$0: creating neural net configs using the xconfig parser"; - - num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') - learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) - affine_opts="l2-regularize=0.01 dropout-proportion=0.0 dropout-per-dim=true dropout-per-dim-continuous=true" - tdnnf_opts="l2-regularize=0.01 dropout-proportion=0.0 bypass-scale=0.66" - linear_opts="l2-regularize=0.01 orthonormal-constraint=-1.0" - prefinal_opts="l2-regularize=0.01" - output_opts="l2-regularize=0.002" - - mkdir -p $dir/configs - cat < $dir/configs/network.xconfig - input dim=100 name=ivector - input dim=40 name=input - - # please note that it is important to have input layer with the name=input - # as the layer immediately preceding the fixed-affine-layer to enable - # the use of short notation for the descriptor - fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat - - # the first splicing is moved before the lda layer, so no splicing here - relu-batchnorm-dropout-layer name=tdnn1 $affine_opts dim=1536 - tdnnf-layer name=tdnnf2 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1 - tdnnf-layer name=tdnnf3 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1 - tdnnf-layer name=tdnnf4 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=1 - tdnnf-layer name=tdnnf5 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=0 - tdnnf-layer name=tdnnf6 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 - tdnnf-layer name=tdnnf7 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 - tdnnf-layer name=tdnnf8 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 - tdnnf-layer name=tdnnf9 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 - tdnnf-layer name=tdnnf10 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 - tdnnf-layer name=tdnnf11 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 - tdnnf-layer name=tdnnf12 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 - tdnnf-layer name=tdnnf13 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 - tdnnf-layer name=tdnnf14 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 - tdnnf-layer name=tdnnf15 $tdnnf_opts dim=1536 bottleneck-dim=160 time-stride=3 - linear-component name=prefinal-l dim=256 $linear_opts - - prefinal-layer name=prefinal-chain input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256 - output-layer name=output include-log-softmax=false dim=$num_targets $output_opts - - prefinal-layer name=prefinal-xent input=prefinal-l $prefinal_opts big-dim=1536 small-dim=256 - output-layer name=output-xent dim=$num_targets learning-rate-factor=$learning_rate_factor $output_opts -EOF - steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ -fi - -if [ $stage -le 16 ]; then - if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then - utils/create_split_dir.pl \ - /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage - fi - - mkdir -p $dir/egs - touch $dir/egs/.nodelete # keep egs around when that run dies. - - steps/nnet3/chain/train.py --stage=$train_stage \ - --cmd="$train_cmd --mem 4G" \ - --feat.online-ivector-dir=$train_ivector_dir \ - --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ - --chain.xent-regularize $xent_regularize \ - --chain.leaky-hmm-coefficient 0.1 \ - --chain.l2-regularize 0.0 \ - --chain.apply-deriv-weights false \ - --chain.lm-opts="--num-extra-lm-states=2000" \ - --trainer.dropout-schedule "$dropout_schedule" \ - --trainer.add-option="--optimization.memory-compression-level=2" \ - --trainer.num-chunk-per-minibatch 64 \ - --trainer.frames-per-iter 1500000 \ - --trainer.max-param-change 2.0 \ - --trainer.num-epochs $num_epochs \ - --trainer.optimization.num-jobs-initial 3 \ - --trainer.optimization.num-jobs-final 16 \ - --trainer.optimization.initial-effective-lrate 0.00025 \ - --trainer.optimization.final-effective-lrate 0.000025 \ - --egs.stage $get_egs_stage \ - --egs.opts "--frames-overlap-per-eg 0" \ - --egs.chunk-width $chunk_width \ - --egs.dir "$common_egs_dir" \ - --cleanup.remove-egs $remove_egs \ - --feat-dir=$train_data_dir \ - --tree-dir=$tree_dir \ - --lat-dir=$lat_dir \ - --dir=$dir || exit 1; -fi - -if [ $stage -le 17 ]; then - # Note: it's not important to give mkgraph.sh the lang directory with the - # matched topology (since it gets the topology file from the model). - utils/mkgraph.sh \ - --self-loop-scale 1.0 data/lang${lm_suffix}/ \ - $tree_dir $tree_dir/graph${lm_suffix} || exit 1; -fi - -if [ $stage -le 18 ]; then - frames_per_chunk=$(echo $chunk_width | cut -d, -f1) - rm $dir/.error 2>/dev/null || true - - for data in $test_sets; do - ( - steps/nnet3/decode.sh \ - --acwt 1.0 --post-decode-acwt 10.0 \ - --frames-per-chunk $frames_per_chunk \ - --nj $decode_nj --cmd "$decode_cmd" --num-threads 4 \ - --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ - $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 - ) || touch $dir/.error & - done - wait - [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 -fi - -# Not testing the 'looped' decoding separately, because for -# TDNN systems it would give exactly the same results as the -# normal decoding. - -if $test_online_decoding && [ $stage -le 19 ]; then - # note: if the features change (e.g. you add pitch features), you will have to - # change the options of the following command line. - steps/online/nnet3/prepare_online_decoding.sh \ - --mfcc-config conf/mfcc_hires.conf \ - $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online - - rm $dir/.error 2>/dev/null || true - - for data in $test_sets; do - ( - nspk=$(wc -l $lang/topo - fi -fi - -lat_dir_clean=exp/chain${nnet3_affix}/${gmm}_${train_set_clean}_sp_lats -lat_dir_noisy=exp/chain${nnet3_affix}/${gmm}_${train_set_noisy}_sp_lats -lat_dir=exp/chain${nnet3_affix}/${gmm}_${combined_train_set}_sp_rvb_lats - -if [ $stage -le 11 ]; then - steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ - data/${train_set_noisy}_sp data/lang $gmm_dir $lat_dir_noisy || exit 1 - rm $lat_dir_noisy/fsts.*.gz -fi - -if [ $stage -le 12 ]; then - # Get the alignments as lattices (gives the chain training more freedom). - # use the same num-jobs as the alignments - steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" --generate-ali-from-lats true \ - data/${train_set_clean}_sp data/lang $gmm_dir $lat_dir_clean || exit 1 - rm $lat_dir_clean/fsts.*.gz -fi - -if [ $stage -le 13 ]; then - local/reverberate_lat_dir.sh --cmd "$train_cmd" --num-data-reps 2 \ - data/${combined_train_set}_sp_rvb_hires $lat_dir_noisy \ - $lat_dir_clean $lat_dir || exit 1 -fi - -if [ $stage -le 14 ]; then - # Build a tree using our new topology. We know we have alignments for the - # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use - # those. The num-leaves is always somewhat less than the num-leaves from - # the GMM baseline. - if [ -f $tree_dir/final.mdl ]; then - echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." - exit 1; - fi - steps/nnet3/chain/build_tree.sh \ - --frame-subsampling-factor 3 \ - --context-opts "--context-width=2 --central-position=1" \ - --cmd "$train_cmd" 3500 data/${train_set_clean}_sp \ - $lang $lat_dir_clean $tree_dir -fi - -if [ $stage -le 15 ]; then - mkdir -p $dir - echo "$0: creating neural net configs using the xconfig parser"; - - num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') - learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) - - lstm_opts="decay-time=40" - - mkdir -p $dir/configs - cat < $dir/configs/network.xconfig - input dim=100 name=ivector - input dim=40 name=input - - # please note that it is important to have input layer with the name=input - # as the layer immediately preceding the fixed-affine-layer to enable - # the use of short notation for the descriptor - fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat - - # the first splicing is moved before the lda layer, so no splicing here - relu-batchnorm-layer name=tdnn1 dim=$hidden_dim - relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1) dim=$hidden_dim - relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1) dim=$hidden_dim - - fast-lstmp-layer name=lstm1 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts - relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=$hidden_dim - relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=$hidden_dim - fast-lstmp-layer name=lstm2 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts - relu-batchnorm-layer name=tdnn6 input=Append(-3,0,3) dim=$hidden_dim - relu-batchnorm-layer name=tdnn7 input=Append(-3,0,3) dim=$hidden_dim - fast-lstmp-layer name=lstm3 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts - relu-batchnorm-layer name=tdnn8 input=Append(-3,0,3) dim=$hidden_dim - relu-batchnorm-layer name=tdnn9 input=Append(-3,0,3) dim=$hidden_dim - fast-lstmp-layer name=lstm4 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts - - ## adding the layers for chain branch - output-layer name=output input=lstm4 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5 - - # adding the layers for xent branch - # This block prints the configs for a separate output that will be - # trained with a cross-entropy objective in the 'chain' models... this - # has the effect of regularizing the hidden parts of the model. we use - # 0.5 / args.xent_regularize as the learning rate factor- the factor of - # 0.5 / args.xent_regularize is suitable as it means the xent - # final-layer learns at a rate independent of the regularization - # constant; and the 0.5 was tuned so as to make the relative progress - # similar in the xent and regular final layers. - output-layer name=output-xent input=lstm4 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 - -EOF - steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ -fi - -if [ $stage -le 16 ]; then - if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then - utils/create_split_dir.pl \ - /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage - fi - - mkdir -p $dir/egs - touch $dir/egs/.nodelete # keep egs around when that run dies. - - steps/nnet3/chain/train.py --stage=$train_stage \ - --cmd="$train_cmd --mem 4G" \ - --feat.online-ivector-dir=$train_ivector_dir \ - --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ - --chain.xent-regularize $xent_regularize \ - --chain.leaky-hmm-coefficient=0.1 \ - --chain.l2-regularize=0.00005 \ - --chain.apply-deriv-weights=false \ - --chain.lm-opts="--num-extra-lm-states=2000" \ - --trainer.dropout-schedule $dropout_schedule \ - --trainer.num-chunk-per-minibatch 64,32 \ - --trainer.frames-per-iter 1500000 \ - --trainer.max-param-change 2.0 \ - --trainer.num-epochs $num_epochs \ - --trainer.srand=$srand \ - --trainer.optimization.shrink-value 0.99 \ - --trainer.optimization.num-jobs-initial=3 \ - --trainer.optimization.num-jobs-final=16 \ - --trainer.optimization.initial-effective-lrate=0.001 \ - --trainer.optimization.final-effective-lrate=0.0001 \ - --trainer.optimization.momentum=0.0 \ - --trainer.deriv-truncate-margin 8 \ - --egs.stage $get_egs_stage \ - --egs.opts="--frames-overlap-per-eg 0" \ - --egs.chunk-width=$chunk_width \ - --egs.chunk-left-context=$chunk_left_context \ - --egs.chunk-right-context=$chunk_right_context \ - --egs.chunk-left-context-initial=0 \ - --egs.chunk-right-context-final=0 \ - --egs.dir="$common_egs_dir" \ - --cleanup.remove-egs=$remove_egs \ - --feat-dir=$train_data_dir \ - --tree-dir=$tree_dir \ - --lat-dir=$lat_dir \ - --dir=$dir || exit 1; -fi - -if [ $stage -le 17 ]; then - # Note: it's not important to give mkgraph.sh the lang directory with the - # matched topology (since it gets the topology file from the model). - utils/mkgraph.sh \ - --self-loop-scale 1.0 data/lang${lm_suffix}/ \ - $tree_dir $tree_dir/graph${lm_suffix} || exit 1; -fi - -if [ -z "$extra_left_context" ]; then - extra_left_context=$chunk_left_context -fi -if [ -z "$extra_right_context" ]; then - extra_right_context=$chunk_right_context -fi - -if [ $stage -le 18 ]; then - frames_per_chunk=$(echo $chunk_width | cut -d, -f1) - rm $dir/.error 2>/dev/null || true - - for data in $test_sets; do - ( - steps/nnet3/decode.sh \ - --acwt 1.0 --post-decode-acwt 10.0 \ - --extra-left-context $extra_left_context \ - --extra-right-context $extra_right_context \ - --extra-left-context-initial 0 \ - --extra-right-context-final 0 \ - --frames-per-chunk $frames_per_chunk \ - --nj $decode_nj --cmd "$decode_cmd" --num-threads 4 \ - --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ - $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 - ) || touch $dir/.error & - done - wait - [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 -fi - -# Not testing the 'looped' decoding separately, because for -# TDNN systems it would give exactly the same results as the -# normal decoding. - -if $test_online_decoding && [ $stage -le 19 ]; then - # note: if the features change (e.g. you add pitch features), you will have to - # change the options of the following command line. - steps/online/nnet3/prepare_online_decoding.sh \ - --mfcc-config conf/mfcc_hires.conf \ - $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online - - rm $dir/.error 2>/dev/null || true - - for data in $test_sets; do - ( - nspk=$(wc -l $lang/topo - fi -fi - -if [ $stage -le 11 ]; then - # Get the alignments as lattices (gives the chain training more freedom). - # use the same num-jobs as the alignments - steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" ${lores_train_data_dir} \ - data/lang $gmm_dir $lat_dir - rm $lat_dir/fsts.*.gz # save space -fi - -if [ $stage -le 12 ]; then - # Build a tree using our new topology. We know we have alignments for the - # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use - # those. The num-leaves is always somewhat less than the num-leaves from - # the GMM baseline. - if [ -f $tree_dir/final.mdl ]; then - echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." - exit 1; - fi - steps/nnet3/chain/build_tree.sh \ - --frame-subsampling-factor 3 \ - --context-opts "--context-width=2 --central-position=1" \ - --cmd "$train_cmd" 3500 ${lores_train_data_dir} \ - $lang $ali_dir $tree_dir -fi - -if [ $stage -le 13 ]; then - mkdir -p $dir - echo "$0: creating neural net configs using the xconfig parser"; - - num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') - learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) - - lstm_opts="decay-time=40" - - mkdir -p $dir/configs - cat < $dir/configs/network.xconfig - input dim=100 name=ivector - input dim=40 name=input - - # please note that it is important to have input layer with the name=input - # as the layer immediately preceding the fixed-affine-layer to enable - # the use of short notation for the descriptor - fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat - idct-layer name=idct input=input dim=40 cepstral-lifter=22 affine-transform-file=$dir/configs/idct.mat - - conv-relu-batchnorm-layer name=cnn1 input=idct height-in=40 height-out=20 height-subsample-out=2 time-offsets=-1,0,1 height-offsets=-1,0,1 num-filters-out=256 learning-rate-factor=0.333 max-change=0.25 - conv-relu-batchnorm-layer name=cnn2 input=cnn1 height-in=20 height-out=20 time-offsets=-1,0,1 height-offsets=-1,0,1 num-filters-out=128 - - relu-batchnorm-layer name=affine1 input=lda dim=512 - - # the first splicing is moved before the lda layer, so no splicing here - relu-batchnorm-layer name=tdnn1 input=cnn2 dim=1024 - relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1,affine1) dim=1024 - relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1) dim=1024 - - # check steps/libs/nnet3/xconfig/lstm.py for the other options and defaults - fast-lstmp-layer name=lstm1 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts - relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=1024 - relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=1024 - relu-batchnorm-layer name=tdnn6 input=Append(-3,0,3) dim=1024 - fast-lstmp-layer name=lstm2 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts - relu-batchnorm-layer name=tdnn7 input=Append(-3,0,3) dim=1024 - relu-batchnorm-layer name=tdnn8 input=Append(-3,0,3) dim=1024 - relu-batchnorm-layer name=tdnn9 input=Append(-3,0,3) dim=1024 - fast-lstmp-layer name=lstm3 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 $lstm_opts - - ## adding the layers for chain branch - output-layer name=output input=lstm3 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5 - - # adding the layers for xent branch - # This block prints the configs for a separate output that will be - # trained with a cross-entropy objective in the 'chain' models... this - # has the effect of regularizing the hidden parts of the model. we use - # 0.5 / args.xent_regularize as the learning rate factor- the factor of - # 0.5 / args.xent_regularize is suitable as it means the xent - # final-layer learns at a rate independent of the regularization - # constant; and the 0.5 was tuned so as to make the relative progress - # similar in the xent and regular final layers. - output-layer name=output-xent input=lstm3 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 - -EOF - - steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ -fi - -if [ $stage -le 14 ]; then - if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then - utils/create_split_dir.pl \ - /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage - fi - - mkdir -p $dir/egs - touch $dir/egs/.nodelete # keep egs around when that run dies. - - steps/nnet3/chain/train.py --stage=$train_stage \ - --cmd="$train_cmd --mem 4G" \ - --feat.online-ivector-dir=$train_ivector_dir \ - --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ - --chain.xent-regularize $xent_regularize \ - --chain.leaky-hmm-coefficient=0.1 \ - --chain.l2-regularize=0.00005 \ - --chain.apply-deriv-weights=false \ - --chain.lm-opts="--num-extra-lm-states=2000" \ - --trainer.dropout-schedule $dropout_schedule \ - --trainer.num-chunk-per-minibatch 64,32 \ - --trainer.frames-per-iter 1500000 \ - --trainer.max-param-change 2.0 \ - --trainer.num-epochs $num_epochs \ - --trainer.srand=$srand \ - --trainer.optimization.shrink-value 0.99 \ - --trainer.optimization.num-jobs-initial=3 \ - --trainer.optimization.num-jobs-final=16 \ - --trainer.optimization.initial-effective-lrate=0.001 \ - --trainer.optimization.final-effective-lrate=0.0001 \ - --trainer.optimization.momentum=0.0 \ - --trainer.deriv-truncate-margin 8 \ - --egs.stage $get_egs_stage \ - --egs.opts="--frames-overlap-per-eg 0" \ - --egs.chunk-width=$chunk_width \ - --egs.chunk-left-context=$chunk_left_context \ - --egs.chunk-right-context=$chunk_right_context \ - --egs.chunk-left-context-initial=0 \ - --egs.chunk-right-context-final=0 \ - --egs.dir="$common_egs_dir" \ - --cleanup.remove-egs=$remove_egs \ - --feat-dir=$train_data_dir \ - --tree-dir=$tree_dir \ - --lat-dir=$lat_dir \ - --dir=$dir || exit 1; -fi - -if [ $stage -le 15 ]; then - # Note: it's not important to give mkgraph.sh the lang directory with the - # matched topology (since it gets the topology file from the model). - utils/mkgraph.sh \ - --self-loop-scale 1.0 data/lang${lm_suffix}/ \ - $tree_dir $tree_dir/graph${lm_suffix} || exit 1; -fi - -if [ $stage -le 16 ]; then - frames_per_chunk=$(echo $chunk_width | cut -d, -f1) - rm $dir/.error 2>/dev/null || true - - for data in $test_sets; do - ( - steps/nnet3/decode.sh \ - --acwt 1.0 --post-decode-acwt 10.0 \ - --extra-left-context $chunk_left_context \ - --extra-right-context $chunk_right_context \ - --extra-left-context-initial 0 \ - --extra-right-context-final 0 \ - --frames-per-chunk $frames_per_chunk \ - --nj 8 --cmd "$decode_cmd" --num-threads 4 \ - --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ - $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 - ) || touch $dir/.error & - done - wait - [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 -fi - -# Not testing the 'looped' decoding separately, because for -# TDNN systems it would give exactly the same results as the -# normal decoding. - -if $test_online_decoding && [ $stage -le 17 ]; then - # note: if the features change (e.g. you add pitch features), you will have to - # change the options of the following command line. - steps/online/nnet3/prepare_online_decoding.sh \ - --mfcc-config conf/mfcc_hires.conf \ - $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online - - rm $dir/.error 2>/dev/null || true - - for data in $test_sets; do - ( - nspk=$(wc -l $lang/topo - fi -fi - -if [ $stage -le 11 ]; then - # Get the alignments as lattices (gives the chain training more freedom). - # use the same num-jobs as the alignments - steps/align_fmllr_lats.sh --nj ${nj} --cmd "$train_cmd" ${lores_train_data_dir} \ - data/lang $gmm_dir $lat_dir - rm $lat_dir/fsts.*.gz # save space -fi - -if [ $stage -le 12 ]; then - # Build a tree using our new topology. We know we have alignments for the - # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use - # those. The num-leaves is always somewhat less than the num-leaves from - # the GMM baseline. - if [ -f $tree_dir/final.mdl ]; then - echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." - exit 1; - fi - steps/nnet3/chain/build_tree.sh \ - --frame-subsampling-factor 3 \ - --context-opts "--context-width=2 --central-position=1" \ - --cmd "$train_cmd" 3500 ${lores_train_data_dir} \ - $lang $ali_dir $tree_dir -fi - -if [ $stage -le 13 ]; then - mkdir -p $dir - echo "$0: creating neural net configs using the xconfig parser"; - - num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') - learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) - - lstm_opts="decay-time=40" - - mkdir -p $dir/configs - cat < $dir/configs/network.xconfig - input dim=100 name=ivector - input dim=40 name=input - - # please note that it is important to have input layer with the name=input - # as the layer immediately preceding the fixed-affine-layer to enable - # the use of short notation for the descriptor - fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat - - # the first splicing is moved before the lda layer, so no splicing here - relu-batchnorm-layer name=tdnn1 dim=$hidden_dim - relu-batchnorm-layer name=tdnn2 input=Append(-1,0,1) dim=$hidden_dim - relu-batchnorm-layer name=tdnn3 input=Append(-1,0,1) dim=$hidden_dim - - fast-lstmp-layer name=lstm1 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts - relu-batchnorm-layer name=tdnn4 input=Append(-3,0,3) dim=$hidden_dim - relu-batchnorm-layer name=tdnn5 input=Append(-3,0,3) dim=$hidden_dim - fast-lstmp-layer name=lstm2 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts - relu-batchnorm-layer name=tdnn6 input=Append(-3,0,3) dim=$hidden_dim - relu-batchnorm-layer name=tdnn7 input=Append(-3,0,3) dim=$hidden_dim - fast-lstmp-layer name=lstm3 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts - relu-batchnorm-layer name=tdnn8 input=Append(-3,0,3) dim=$hidden_dim - relu-batchnorm-layer name=tdnn9 input=Append(-3,0,3) dim=$hidden_dim - fast-lstmp-layer name=lstm4 cell-dim=$cell_dim recurrent-projection-dim=$projection_dim non-recurrent-projection-dim=$projection_dim delay=-3 dropout-proportion=0.0 $lstm_opts - - ## adding the layers for chain branch - output-layer name=output input=lstm4 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5 - - # adding the layers for xent branch - # This block prints the configs for a separate output that will be - # trained with a cross-entropy objective in the 'chain' models... this - # has the effect of regularizing the hidden parts of the model. we use - # 0.5 / args.xent_regularize as the learning rate factor- the factor of - # 0.5 / args.xent_regularize is suitable as it means the xent - # final-layer learns at a rate independent of the regularization - # constant; and the 0.5 was tuned so as to make the relative progress - # similar in the xent and regular final layers. - output-layer name=output-xent input=lstm4 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 - -EOF - steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ -fi - -if [ $stage -le 14 ]; then - if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then - utils/create_split_dir.pl \ - /export/b0{3,4,5,6}/$USER/kaldi-data/egs/chime5-$(date +'%m_%d_%H_%M')/s5/$dir/egs/storage $dir/egs/storage - fi - - mkdir -p $dir/egs - touch $dir/egs/.nodelete # keep egs around when that run dies. - - steps/nnet3/chain/train.py --stage=$train_stage \ - --cmd="$train_cmd --mem 4G" \ - --feat.online-ivector-dir=$train_ivector_dir \ - --feat.cmvn-opts="--norm-means=false --norm-vars=false" \ - --chain.xent-regularize $xent_regularize \ - --chain.leaky-hmm-coefficient=0.1 \ - --chain.l2-regularize=0.00005 \ - --chain.apply-deriv-weights=false \ - --chain.lm-opts="--num-extra-lm-states=2000" \ - --trainer.dropout-schedule $dropout_schedule \ - --trainer.num-chunk-per-minibatch 64,32 \ - --trainer.frames-per-iter 1500000 \ - --trainer.max-param-change 2.0 \ - --trainer.num-epochs $num_epochs \ - --trainer.srand=$srand \ - --trainer.optimization.shrink-value 0.99 \ - --trainer.optimization.num-jobs-initial=3 \ - --trainer.optimization.num-jobs-final=16 \ - --trainer.optimization.initial-effective-lrate=0.001 \ - --trainer.optimization.final-effective-lrate=0.0001 \ - --trainer.optimization.momentum=0.0 \ - --trainer.deriv-truncate-margin 8 \ - --egs.stage $get_egs_stage \ - --egs.opts="--frames-overlap-per-eg 0" \ - --egs.chunk-width=$chunk_width \ - --egs.chunk-left-context=$chunk_left_context \ - --egs.chunk-right-context=$chunk_right_context \ - --egs.chunk-left-context-initial=0 \ - --egs.chunk-right-context-final=0 \ - --egs.dir="$common_egs_dir" \ - --cleanup.remove-egs=$remove_egs \ - --feat-dir=$train_data_dir \ - --tree-dir=$tree_dir \ - --lat-dir=$lat_dir \ - --dir=$dir || exit 1; -fi - -if [ $stage -le 15 ]; then - # Note: it's not important to give mkgraph.sh the lang directory with the - # matched topology (since it gets the topology file from the model). - utils/mkgraph.sh \ - --self-loop-scale 1.0 data/lang${lm_suffix}/ \ - $tree_dir $tree_dir/graph${lm_suffix} || exit 1; -fi - -if [ $stage -le 16 ]; then - frames_per_chunk=$(echo $chunk_width | cut -d, -f1) - rm $dir/.error 2>/dev/null || true - - for data in $test_sets; do - ( - steps/nnet3/decode.sh \ - --acwt 1.0 --post-decode-acwt 10.0 \ - --extra-left-context $chunk_left_context \ - --extra-right-context $chunk_right_context \ - --extra-left-context-initial 0 \ - --extra-right-context-final 0 \ - --frames-per-chunk $frames_per_chunk \ - --nj 8 --cmd "$decode_cmd" --num-threads 4 \ - --online-ivector-dir exp/nnet3${nnet3_affix}/ivectors_${data}_hires \ - $tree_dir/graph${lm_suffix} data/${data}_hires ${dir}/decode${lm_suffix}_${data} || exit 1 - ) || touch $dir/.error & - done - wait - [ -f $dir/.error ] && echo "$0: there was a problem while decoding" && exit 1 -fi - -# Not testing the 'looped' decoding separately, because for -# TDNN systems it would give exactly the same results as the -# normal decoding. - -if $test_online_decoding && [ $stage -le 17 ]; then - # note: if the features change (e.g. you add pitch features), you will have to - # change the options of the following command line. - steps/online/nnet3/prepare_online_decoding.sh \ - --mfcc-config conf/mfcc_hires.conf \ - $lang exp/nnet3${nnet3_affix}/extractor ${dir} ${dir}_online - - rm $dir/.error 2>/dev/null || true - - for data in $test_sets; do - ( - nspk=$(wc -l