#!/bin/bash # Copyright 2012 Microsoft Corporation Johns Hopkins University (Author: Daniel Povey) # Apache 2.0 # This script takes data prepared in a corpus-dependent way # in data/local/, and converts it into the "canonical" form, # in various subdirectories of data/, e.g. data/lang, data/lang_test_ug, # data/train_si284, data/train_si84, etc. # Don't bother doing train_si84 separately (although we have the file lists # in data/local/) because it's just the first 7138 utterances in train_si284. # We'll create train_si84 after doing the feature extraction. . ./path.sh || exit 1; echo "Preparing train and test data" srcdir=data/local/data lmdir=data/local/nist_lm tmpdir=data/local/lm_tmp lexicon=data/local/lang_tmp/lexiconp.txt mkdir -p $tmpdir for x in train_si284 test_eval92 test_eval93 test_dev93 test_eval92_5k test_eval93_5k test_dev93_5k dev_dt_05 dev_dt_20; do mkdir -p data/$x cp $srcdir/${x}_wav.scp data/$x/wav.scp || exit 1; cp $srcdir/$x.txt data/$x/text || exit 1; cp $srcdir/$x.spk2utt data/$x/spk2utt || exit 1; cp $srcdir/$x.utt2spk data/$x/utt2spk || exit 1; utils/filter_scp.pl data/$x/spk2utt $srcdir/spk2gender > data/$x/spk2gender || exit 1; done # Next, for each type of language model, create the corresponding FST # and the corresponding lang_test_* directory. echo Preparing language models for test for lm_suffix in bg tgpr tg bg_5k tgpr_5k tg_5k; do test=data/lang_test_${lm_suffix} mkdir -p $test for f in phones.txt words.txt phones.txt L.fst L_disambig.fst \ phones/; do cp -r data/lang/$f $test done gunzip -c $lmdir/lm_${lm_suffix}.arpa.gz | \ utils/find_arpa_oovs.pl $test/words.txt > $tmpdir/oovs_${lm_suffix}.txt # grep -v ' ' because the LM seems to have some strange and useless # stuff in it with multiple 's in the history. Encountered some other similar # things in a LM from Geoff. Removing all "illegal" combinations of and , # which are supposed to occur only at being/end of utt. These can cause # determinization failures of CLG [ends up being epsilon cycles]. gunzip -c $lmdir/lm_${lm_suffix}.arpa.gz | \ grep -v ' ' | \ grep -v ' ' | \ grep -v ' ' | \ arpa2fst - | fstprint | \ utils/remove_oovs.pl $tmpdir/oovs_${lm_suffix}.txt | \ utils/eps2disambig.pl | utils/s2eps.pl | fstcompile --isymbols=$test/words.txt \ --osymbols=$test/words.txt --keep_isymbols=false --keep_osymbols=false | \ fstrmepsilon > $test/G.fst fstisstochastic $test/G.fst # The output is like: # 9.14233e-05 -0.259833 # we do expect the first of these 2 numbers to be close to zero (the second is # nonzero because the backoff weights make the states sum to >1). # Because of the fiasco for these particular LMs, the first number is not # as close to zero as it could be. # Everything below is only for diagnostic. # Checking that G has no cycles with empty words on them (e.g. , ); # this might cause determinization failure of CLG. # #0 is treated as an empty word. mkdir -p $tmpdir/g awk '{if(NF==1){ printf("0 0 %s %s\n", $1,$1); }} END{print "0 0 #0 #0"; print "0";}' \ < "$lexicon" >$tmpdir/g/select_empty.fst.txt fstcompile --isymbols=$test/words.txt --osymbols=$test/words.txt $tmpdir/g/select_empty.fst.txt | \ fstarcsort --sort_type=olabel | fstcompose - $test/G.fst > $tmpdir/g/empty_words.fst fstinfo $tmpdir/g/empty_words.fst | grep cyclic | grep -w 'y' && echo "Language model has cycles with empty words" && exit 1 rm -r $tmpdir/g done echo "Succeeded in formatting data." rm -r $tmpdir