-
Notifications
You must be signed in to change notification settings - Fork 5.3k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* xconfig : A very concise representation of nnet3 architectures which enables the specification of complex neural networks without the verbosity. To achieve this compactness this config 1. uses the abstraction of layers 2. compact descriptor representation e.g. `Append(Offset(prev_layer, -3), prev_layer, Offset(prev_layer, 3))` --> `Append(-3, 0, 3)`. (For more details see #1124.) Basic layer types and LSTM layer are currently supported. Example recipes have been added in egs/swbd/s5c/local/chain/*.sh . Another major change is the elimination of layer-wise discriminative pretraining in the new recipes; as it was found to be beneficial.
- Loading branch information
1 parent
ac1f932
commit 07a5d51
Showing
28 changed files
with
3,463 additions
and
84 deletions.
There are no files selected for viewing
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
#!/bin/bash | ||
|
||
echo -n "System " | ||
for x in $*; do printf "% 10s" $x; done | ||
echo | ||
|
||
echo -n "WER on train_dev(tg) " | ||
for x in $*; do | ||
wer=$(grep WER exp/chain/${x}_sp/decode_train_dev_sw1_tg/wer_* | utils/best_wer.sh | awk '{print $2}') | ||
printf "% 10s" $wer | ||
done | ||
echo | ||
|
||
echo -n "WER on train_dev(fg) " | ||
for x in $*; do | ||
wer=$(grep WER exp/chain/${x}_sp/decode_train_dev_sw1_fsh_fg/wer_* | utils/best_wer.sh | awk '{print $2}') | ||
printf "% 10s" $wer | ||
done | ||
echo | ||
|
||
echo -n "WER on eval2000(tg) " | ||
for x in $*; do | ||
wer=$(grep Sum exp/chain/${x}_sp/decode_eval2000_sw1_tg/score*/*ys | grep -v swbd | utils/best_wer.sh | awk '{print $2}') | ||
printf "% 10s" $wer | ||
done | ||
echo | ||
|
||
echo -n "WER on eval2000(fg) " | ||
for x in $*; do | ||
wer=$(grep Sum exp/chain/${x}_sp/decode_eval2000_sw1_fsh_fg/score*/*ys | grep -v swbd | utils/best_wer.sh | awk '{print $2}') | ||
printf "% 10s" $wer | ||
done | ||
echo | ||
|
||
echo -n "Final train prob " | ||
for x in $*; do | ||
prob=$(grep Overall exp/chain/${x}_sp/log/compute_prob_train.final.log | grep -v xent | awk '{print $8}') | ||
printf "% 10s" $prob | ||
done | ||
echo | ||
|
||
echo -n "Final valid prob " | ||
for x in $*; do | ||
prob=$(grep Overall exp/chain/${x}_sp/log/compute_prob_valid.final.log | grep -v xent | awk '{print $8}') | ||
printf "% 10s" $prob | ||
done | ||
echo | ||
|
||
echo -n "Final train prob (xent) " | ||
for x in $*; do | ||
prob=$(grep Overall exp/chain/${x}_sp/log/compute_prob_train.final.log | grep -w xent | awk '{print $8}') | ||
printf "% 10s" $prob | ||
done | ||
echo | ||
|
||
echo -n "Final valid prob (xent) " | ||
for x in $*; do | ||
prob=$(grep Overall exp/chain/${x}_sp/log/compute_prob_valid.final.log | grep -w xent | awk '{print $8}') | ||
printf "% 10s" $prob | ||
done | ||
echo |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,6 @@ | ||
#!/bin/bash | ||
|
||
models="" | ||
for x in $*; do models="$models tdnn_${x}"; done | ||
|
||
local/chain/compare_wer_general.sh $models |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1 @@ | ||
tuning/run_blstm_6i.sh | ||
tuning/run_blstm_6j.sh |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1 @@ | ||
tuning/run_lstm_6i.sh | ||
tuning/run_lstm_6j.sh |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1 @@ | ||
tuning/run_tdnn_7f.sh | ||
tuning/run_tdnn_7h.sh |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,228 @@ | ||
#!/bin/bash | ||
|
||
# 6j is same as 6i but using the xconfig format of network specification. | ||
# Also, the model is trained without layer-wise discriminative pretraining. | ||
# Another minor change is that the final-affine component has param-stddev-0 | ||
# and bias-stddev=0 initialization. | ||
# This run also accounts for changes in training due to the BackpropTruncationComponent | ||
|
||
set -e | ||
|
||
# configs for 'chain' | ||
stage=12 | ||
train_stage=-10 | ||
get_egs_stage=-10 | ||
speed_perturb=true | ||
dir=exp/chain/blstm_6j # Note: _sp will get added to this if $speed_perturb == true. | ||
decode_iter= | ||
decode_dir_affix= | ||
|
||
# training options | ||
leftmost_questions_truncate=-1 | ||
chunk_width=150 | ||
chunk_left_context=40 | ||
chunk_right_context=40 | ||
xent_regularize=0.025 | ||
self_repair_scale=0.00001 | ||
label_delay=0 | ||
|
||
# decode options | ||
extra_left_context=50 | ||
extra_right_context=50 | ||
frames_per_chunk= | ||
|
||
remove_egs=false | ||
common_egs_dir= | ||
|
||
affix= | ||
# End configuration section. | ||
echo "$0 $@" # Print the command line for logging | ||
|
||
. ./cmd.sh | ||
. ./path.sh | ||
. ./utils/parse_options.sh | ||
|
||
if ! cuda-compiled; then | ||
cat <<EOF && exit 1 | ||
This script is intended to be used with GPUs but you have not compiled Kaldi with CUDA | ||
If you want to use GPUs (and have them), go to src/, and configure and make on a machine | ||
where "nvcc" is installed. | ||
EOF | ||
fi | ||
|
||
# The iVector-extraction and feature-dumping parts are the same as the standard | ||
# nnet3 setup, and you can skip them by setting "--stage 8" if you have already | ||
# run those things. | ||
|
||
suffix= | ||
if [ "$speed_perturb" == "true" ]; then | ||
suffix=_sp | ||
fi | ||
|
||
dir=$dir${affix:+_$affix} | ||
if [ $label_delay -gt 0 ]; then dir=${dir}_ld$label_delay; fi | ||
dir=${dir}$suffix | ||
train_set=train_nodup$suffix | ||
ali_dir=exp/tri4_ali_nodup$suffix | ||
treedir=exp/chain/tri5_7d_tree$suffix | ||
lang=data/lang_chain_2y | ||
|
||
|
||
# if we are using the speed-perturbed data we need to generate | ||
# alignments for it. | ||
local/nnet3/run_ivector_common.sh --stage $stage \ | ||
--speed-perturb $speed_perturb \ | ||
--generate-alignments $speed_perturb || exit 1; | ||
|
||
|
||
if [ $stage -le 9 ]; then | ||
# Get the alignments as lattices (gives the CTC training more freedom). | ||
# use the same num-jobs as the alignments | ||
nj=$(cat exp/tri4_ali_nodup$suffix/num_jobs) || exit 1; | ||
steps/align_fmllr_lats.sh --nj $nj --cmd "$train_cmd" data/$train_set \ | ||
data/lang exp/tri4 exp/tri4_lats_nodup$suffix | ||
rm exp/tri4_lats_nodup$suffix/fsts.*.gz # save space | ||
fi | ||
|
||
|
||
if [ $stage -le 10 ]; then | ||
# Create a version of the lang/ directory that has one state per phone in the | ||
# topo file. [note, it really has two states.. the first one is only repeated | ||
# once, the second one has zero or more repeats.] | ||
rm -rf $lang | ||
cp -r data/lang $lang | ||
silphonelist=$(cat $lang/phones/silence.csl) || exit 1; | ||
nonsilphonelist=$(cat $lang/phones/nonsilence.csl) || exit 1; | ||
# Use our special topology... note that later on may have to tune this | ||
# topology. | ||
steps/nnet3/chain/gen_topo.py $nonsilphonelist $silphonelist >$lang/topo | ||
fi | ||
|
||
if [ $stage -le 11 ]; then | ||
# Build a tree using our new topology. | ||
steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \ | ||
--leftmost-questions-truncate $leftmost_questions_truncate \ | ||
--context-opts "--context-width=2 --central-position=1" \ | ||
--cmd "$train_cmd" 7000 data/$train_set $lang $ali_dir $treedir | ||
fi | ||
|
||
if [ $stage -le 12 ]; then | ||
echo "$0: creating neural net configs using the xconfig parser"; | ||
|
||
num_targets=$(tree-info exp/chain/tri5_7d_tree_sp/tree |grep num-pdfs|awk '{print $2}') | ||
learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) | ||
|
||
mkdir -p $dir/configs | ||
cat <<EOF > $dir/configs/network.xconfig | ||
input dim=100 name=ivector | ||
input dim=40 name=input | ||
# please note that it is important to have input layer with the name=input | ||
# as the layer immediately preceding the fixed-affine-layer to enable | ||
# the use of short notation for the descriptor | ||
fixed-affine-layer name=lda input=Append(-2,-1,0,1,2,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat | ||
# check steps/libs/nnet3/xconfig/lstm.py for the other options and defaults | ||
lstmp-layer name=blstm1-forward input=lda cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 | ||
lstmp-layer name=blstm1-backward input=lda cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=3 | ||
lstmp-layer name=blstm2-forward input=Append(blstm1-forward, blstm1-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 | ||
lstmp-layer name=blstm2-backward input=Append(blstm1-forward, blstm1-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=3 | ||
lstmp-layer name=blstm3-forward input=Append(blstm2-forward, blstm2-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 | ||
lstmp-layer name=blstm3-backward input=Append(blstm2-forward, blstm2-backward) cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=3 | ||
## adding the layers for chain branch | ||
output-layer name=output input=Append(blstm3-forward, blstm3-backward) output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5 | ||
# adding the layers for xent branch | ||
# This block prints the configs for a separate output that will be | ||
# trained with a cross-entropy objective in the 'chain' models... this | ||
# has the effect of regularizing the hidden parts of the model. we use | ||
# 0.5 / args.xent_regularize as the learning rate factor- the factor of | ||
# 0.5 / args.xent_regularize is suitable as it means the xent | ||
# final-layer learns at a rate independent of the regularization | ||
# constant; and the 0.5 was tuned so as to make the relative progress | ||
# similar in the xent and regular final layers. | ||
output-layer name=output-xent input=Append(blstm3-forward, blstm3-backward) output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 | ||
EOF | ||
steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ | ||
fi | ||
|
||
if [ $stage -le 13 ]; then | ||
if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then | ||
utils/create_split_dir.pl \ | ||
/export/b0{5,6,7,8}/$USER/kaldi-data/egs/swbd-$(date +'%m_%d_%H_%M')/s5c/$dir/egs/storage $dir/egs/storage | ||
fi | ||
|
||
steps/nnet3/chain/train.py --stage $train_stage \ | ||
--cmd "$decode_cmd" \ | ||
--feat.online-ivector-dir exp/nnet3/ivectors_${train_set} \ | ||
--feat.cmvn-opts "--norm-means=false --norm-vars=false" \ | ||
--chain.xent-regularize $xent_regularize \ | ||
--chain.leaky-hmm-coefficient 0.1 \ | ||
--chain.l2-regularize 0.00005 \ | ||
--chain.apply-deriv-weights false \ | ||
--chain.lm-opts="--num-extra-lm-states=2000" \ | ||
--chain.left-deriv-truncate 0 \ | ||
--trainer.num-chunk-per-minibatch 64 \ | ||
--trainer.frames-per-iter 1200000 \ | ||
--trainer.max-param-change 2.0 \ | ||
--trainer.num-epochs 4 \ | ||
--trainer.optimization.shrink-value 0.99 \ | ||
--trainer.optimization.num-jobs-initial 3 \ | ||
--trainer.optimization.num-jobs-final 16 \ | ||
--trainer.optimization.initial-effective-lrate 0.001 \ | ||
--trainer.optimization.final-effective-lrate 0.0001 \ | ||
--trainer.optimization.momentum 0.0 \ | ||
--egs.stage $get_egs_stage \ | ||
--egs.opts "--frames-overlap-per-eg 0" \ | ||
--egs.chunk-width $chunk_width \ | ||
--egs.chunk-left-context $chunk_left_context \ | ||
--egs.chunk-right-context $chunk_right_context \ | ||
--egs.dir "$common_egs_dir" \ | ||
--cleanup.remove-egs $remove_egs \ | ||
--feat-dir data/${train_set}_hires \ | ||
--tree-dir $treedir \ | ||
--lat-dir exp/tri4_lats_nodup$suffix \ | ||
--dir $dir || exit 1; | ||
fi | ||
|
||
if [ $stage -le 14 ]; then | ||
# Note: it might appear that this $lang directory is mismatched, and it is as | ||
# far as the 'topo' is concerned, but this script doesn't read the 'topo' from | ||
# the lang directory. | ||
utils/mkgraph.sh --left-biphone --self-loop-scale 1.0 data/lang_sw1_tg $dir $dir/graph_sw1_tg | ||
fi | ||
|
||
decode_suff=sw1_tg | ||
graph_dir=$dir/graph_sw1_tg | ||
if [ $stage -le 15 ]; then | ||
[ -z $extra_left_context ] && extra_left_context=$chunk_left_context; | ||
[ -z $extra_right_context ] && extra_right_context=$chunk_right_context; | ||
[ -z $frames_per_chunk ] && frames_per_chunk=$chunk_width; | ||
iter_opts= | ||
if [ ! -z $decode_iter ]; then | ||
iter_opts=" --iter $decode_iter " | ||
fi | ||
for decode_set in train_dev eval2000; do | ||
( | ||
steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \ | ||
--nj 50 --cmd "$decode_cmd" $iter_opts \ | ||
--extra-left-context $extra_left_context \ | ||
--extra-right-context $extra_right_context \ | ||
--frames-per-chunk "$frames_per_chunk" \ | ||
--online-ivector-dir exp/nnet3/ivectors_${decode_set} \ | ||
$graph_dir data/${decode_set}_hires \ | ||
$dir/decode_${decode_set}${decode_dir_affix:+_$decode_dir_affix}_${decode_suff} || exit 1; | ||
if $has_fisher; then | ||
steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \ | ||
data/lang_sw1_{tg,fsh_fg} data/${decode_set}_hires \ | ||
$dir/decode_${decode_set}${decode_dir_affix:+_$decode_dir_affix}_sw1_{tg,fsh_fg} || exit 1; | ||
fi | ||
) & | ||
done | ||
fi | ||
wait; | ||
exit 0; |
Oops, something went wrong.