, including all inherited members.
Adapt_Segmenter(FeaturePool *fp, int sID_train, int sID_decode, char *name, int sysID) | Adapt_Segmenter | |
adaptAMs(Vector *v, int time) | LexicalTree | |
adaptedModel | Adapt_Segmenter | [protected] |
adaptModel(TrainPhoneModel *model) | Adapt_Segmenter | [protected] |
addForcedAlignOOV(LexicalNode *oovNode) | LexicalTree | |
addNodeToList(LexicalNode *node) | LexicalTree | [protected] |
addWordStringToAlignment(LexicalNode *word) | LexicalTree | [protected] |
alignmentIsEmpty() | LexicalTree | |
alignParallel | LexicalTree | [protected] |
analyse_deletions | LexicalTree | [protected] |
analyse_insertions | LexicalTree | [protected] |
analyse_substitutions | LexicalTree | [protected] |
analyse_totRefWords | LexicalTree | [protected] |
analysisSettings | LexicalTree | [protected] |
bestL | LexicalTree | [protected] |
bestRecPath | LexicalTree | [protected] |
bestToken | LexicalTree | [protected] |
bic_meanoffset | Train_Segmenter | [protected] |
biggestNodeDepth | LexicalTree | [protected] |
blameAssingment | LexicalTree | [protected] |
borrowPhoneString(const char *string) | LexicalTree | |
borrowWordList | LexicalTree | [protected] |
calcErrorRegionStats(WLRType *wlr, WLRType *lastCorrect, int firstRef, int lastRef, int *nrWordHyp, float *scoreLM) | LexicalTree | [protected] |
cheatSpk1 | Train_Segmenter | [protected] |
cheatSpk2 | Train_Segmenter | [protected] |
checkAMs(int nrM, PhoneModel **models, bool outXML) | LexicalTree | |
checkTreeRobustness(LexicalNode *node) | LexicalTree | [protected] |
clusterScore | Train_Segmenter | [protected] |
clusterSize | Train_Segmenter | [protected] |
compareClusters | Train_Segmenter | [protected] |
compareLMHistory(int *lmHistory1, int *lmHistory2) | LexicalTree | [protected] |
compareModels | Adapt_Segmenter | [protected] |
copyLMHistory(int *lmHistory1, int *lmHistory2) | LexicalTree | [protected] |
countLatticePaths(LatticeNode *l, int sentenceID=0) | LexicalTree | [protected] |
createActiveNodesList() | LexicalTree | [protected] |
createFixedModels(int nrSpkrs, int fixedSeg) | Adapt_Segmenter | |
createInitialModels(int segAmount, char *outputName=NULL) | Train_Segmenter | [protected] |
createLattice() | LexicalTree | [protected] |
createLatticeLMRescoring(LatticeNode *l, NBestList *scoreList, float amTot, float lmTot, int nrWords, int noSilWords, int *lmHist, int sentenceID=0) | LexicalTree | [protected] |
createLatticeNodeGroups(WLRType *w, double lmScore, int wordID) | LexicalTree | [protected] |
createLexicalTree(int minNumberOfFrames, int *forEachFrames1=NULL, double *forEachFrames2=NULL) | Segmenter | [protected] |
createLMLAs(LMLAGlobalListType *lmlaGlobal, float *allLMP) | LexicalTree | [protected] |
createOverlapTree(int min) | Train_Segmenter | [protected] |
createSegments(int segID, SegmentationAdmin *admin) | Segmenter | [protected] |
createWordTree() | LexicalTree | |
currentlyAligning | LexicalTree | [protected] |
deleteLatticeAdmin() | LexicalTree | [protected] |
deleteLookAheadList() | LexicalTree | [protected] |
deleteNodes() | LexicalTree | [protected] |
deleteTree(LexicalNode *node, bool isPar) | LexicalTree | [protected] |
detectOverlap | Train_Segmenter | [protected] |
discrTrain | Segmenter | [protected] |
discrTrainLen | Segmenter | [protected] |
discrTrainMask | Segmenter | [protected] |
distanceVect | Train_Segmenter | [protected] |
endNode | LexicalTree | [protected] |
endOfSentenceWord | LexicalTree | [protected] |
errorAnalysis(WLRType *wlr, int depth, bool outputXML) | LexicalTree | [protected] |
fastCompressedTree | LexicalTree | [protected] |
fastMerge | Train_Segmenter | [protected] |
featurePool | Segmenter | [protected] |
findBestToken(bool addEndOfSentence, bool complete, bool outputXML, bool notFinishedAllowed=false) | LexicalTree | [protected] |
findCorrectNode(PLRType *pt, int wordID) | LexicalTree | [protected] |
findLatticeNodes(WLRType *wlr) | LexicalTree | [protected] |
fixedSpeakers | Adapt_Segmenter | [protected] |
getAlignmentString() | LexicalTree | |
getBestGrammarEndToken(TokenType **bestT, bool notFinishedAllowed) | LexicalTree | [protected] |
getBestIDSequence(int *idList, int maxLength, bool showSil, bool notFinishedAllowed) | LexicalTree | |
getBestPath(WLRType *wlr, bool outputXML, bool complete) | LexicalTree | [protected] |
getBestRecognition(bool addEndOfSentence, bool outputXML, bool complete, const char *label=NULL, int milliSec=0, int totLength=0, const char *beginTime=NULL, const char *endTime=NULL) | LexicalTree | |
getBestRecognitionScore() | LexicalTree | |
getLastModelForContext(LexicalNode *node, int wordID) | LexicalTree | [protected] |
getLMLAHashKey(int *lmHistory) | LexicalTree | [protected] |
getLMLATable(int *lmHistory, bool onlyPrepare) | LexicalTree | [protected] |
getLogging(const char *string) | LexicalTree | |
getMatrixScore(int a, int model1, int model2, int data1, int data2, int seg) | Adapt_Segmenter | [protected] |
getMergeModelScore(int model1, int model2, int method) | Adapt_Segmenter | [protected] |
Train_Segmenter::getMergeModelScore(int model1, int model2, double *mergeScore) | Train_Segmenter | [protected, virtual] |
getNumberOfModels() const | Segmenter | [inline] |
getNumberOfWords() | LexicalTree | |
getOverlap(FILE *file) | Train_Segmenter | [protected] |
getPhoneAlignment(const char *prefix, PLRType *pt, int lastFrame, bool confident) | LexicalTree | [protected] |
getPhoneString(LexicalNode *node, int wordID) | LexicalTree | [protected] |
getSegmentation() const | Train_Segmenter | [inline] |
getWord(int wordID) | LexicalTree | |
getWordFromWLR(WLRType *wlr) | LexicalTree | [protected] |
getWordID(const char *word) | LexicalTree | |
globalStats | LexicalTree | [protected] |
grammarStart | LexicalTree | [protected] |
grammarStartContext | LexicalTree | [protected] |
helpID | Adapt_Segmenter | [protected] |
initialiseNode(LexicalNode *node) | LexicalTree | [protected] |
initialiseSystem() | LexicalTree | [protected] |
initialiseTree(int startTime=0) | LexicalTree | [virtual] |
initialLMHist | LexicalTree | [protected] |
intervalTimer | LexicalTree | [protected] |
label | Adapt_Segmenter | [protected] |
languageModel | LexicalTree | [protected] |
lattice_copyNonSilArcs(LatticeNode *source, LatticeNode *dest, int loopID) | LexicalTree | [protected] |
lattice_removeDoubleArcs() | LexicalTree | [protected] |
latticeAdmin | LexicalTree | [protected] |
latticeBaumWelch_backward(double *latticeLikelihood, LexicalNode *node, double *latticeBaumWelchBeta, int time, int numberOfStates, double normFactor, double *resArray) | LexicalTree | |
latticeBaumWelch_calculatePosteriors(double *latticeLikelihood, LexicalNode *node, double incomingScore, double *latticeBaumWelchAlfa, double *latticeBaumWelchBeta, double *posteriors, int time, int numberOfStates) | LexicalTree | |
latticeBaumWelch_forward(double *latticeLikelihood, LexicalNode *node, double incomingScore, double *latticeBaumWelchAlfa, int time, int numberOfStates) | LexicalTree | |
latticeBaumWelch_initBackward(double *latticeBaumWelchBeta, int offset) | LexicalTree | |
latticeBaumWelch_initForward(double *latticeBaumWelchAlfa) | LexicalTree | |
latticeBaumWelch_mmi_accumulatorsPosteriors(LexicalNode *node, double *posteriors, int numberOfStates, Vector *observation) | LexicalTree | |
latticeBaumWelch_numberNodes(LexicalNode *node, int number, bool clear) | LexicalTree | |
latticeBaumWelch_printPosteriors(LexicalNode *node, double *posteriors, int time, int numberOfStates, int timeOffset) | LexicalTree | |
latticeBaumWelch_setLikelihoods(LexicalNode *node, Vector *t, int time, int numberOfStates, double *latticeLikelihood) | LexicalTree | |
latticeGeneration | LexicalTree | [protected] |
latticeL | LexicalTree | [protected] |
latticeN | LexicalTree | [protected] |
latticeWordList | LexicalTree | [protected] |
latticeWordListLength | LexicalTree | [protected] |
LexicalTree(FILE *outFile) | LexicalTree | |
LexicalTree(FILE *outFile, FILE *treeFile, bool useT=true) | LexicalTree | |
lmla_list | LexicalTree | [protected] |
loadClusters(FILE *inFile) | Train_Segmenter | |
lookAheadUnigram | LexicalTree | [protected] |
masterAdapt | Adapt_Segmenter | [protected] |
maxDataPoints | Train_Segmenter | [protected] |
maxDistance | Adapt_Segmenter | [protected] |
mergeable | Train_Segmenter | [protected] |
mergeClusters(int maxClusters, bool smallestWins, bool actuallyMerge) | Train_Segmenter | [protected] |
mergeModels(int model1, int model2) | Adapt_Segmenter | [protected, virtual] |
mergeTestMod | Adapt_Segmenter | [protected] |
mergeWinThreshold | Adapt_Segmenter | [protected] |
modelMapping | Adapt_Segmenter | [protected] |
myArtStream | LexicalTree | [protected] |
nodeArray | LexicalTree | [protected] |
nodeList | LexicalTree | [protected] |
nodeListLength | LexicalTree | [protected] |
nrCompareModels | Adapt_Segmenter | [protected] |
nrMergeIterations | Train_Segmenter | [protected] |
nrNonMergeModels | Train_Segmenter | [protected] |
nrOfTokens | LexicalTree | [protected] |
nrScoreClusters | Train_Segmenter | [protected] |
numberOfClusters | Segmenter | [protected] |
numberOfCompressedNodes | LexicalTree | [protected] |
numberOfMerges | Train_Segmenter | [protected] |
numberOfPhones | LexicalTree | [protected] |
numberOfWords | LexicalTree | [protected] |
oldcreateLMLAs(LMLAGlobalListType *lmlaGlobal, LexicalNode *node, float *allLMP) | LexicalTree | [protected] |
outputFile | LexicalTree | [protected] |
outputFileName | Train_Segmenter | [protected] |
overwritePrunePars(bool doHist, bool doBeam, bool doEndBeam, double beam, double state_beam, double endstate_beam, bool lmla, int histState, int hist) | LexicalTree | |
overwriteWeightPars(double lmScale, double transPenalty, double silPenalty) | LexicalTree | |
pdfUpdateList | LexicalTree | [protected] |
phoneLoopConfidence | LexicalTree | [protected] |
phoneLoopConfidenceOffset | LexicalTree | [protected] |
phoneModels | LexicalTree | [protected] |
prepareForASR | Train_Segmenter | [protected] |
prepareLMLACreation(LexicalNode *node) | LexicalTree | [protected] |
printErrorString(int errorID) | LexicalTree | [protected] |
printFinalSettings(bool outXML, int totMilliSec, int totTime) | LexicalTree | |
printInitialSettings(const char *amName, const char *dctName, const char *backName, const char *lmName, bool outXML) | LexicalTree | |
printLattice(FILE *latFile, const char *label, int timeEnd) | LexicalTree | |
printLMParStats(bool outputXML) | LexicalTree | [protected] |
printNBestList(FILE *nbestFile=NULL, LatticeNode *node=NULL) | LexicalTree | |
printTokenDistribution() | LexicalTree | [protected] |
printWordPronunciation(LexicalNode *node, int wordID) | LexicalTree | [protected] |
proceedMerge(int model1, int model2, int method) | Adapt_Segmenter | [protected, virtual] |
processNode(LexicalNode *node, Vector *v) | LexicalTree | |
processNodeOutput(LexicalNode *node) | LexicalTree | [protected, virtual] |
processVector(Vector **v, int time) | LexicalTree | |
processVector_administrationCleanup() | LexicalTree | [protected] |
processVector_grammar() | LexicalTree | [protected] |
processVector_LMLAReordering() | LexicalTree | [protected] |
processVector_LMLAReordering_prepare() | LexicalTree | [protected] |
processVector_processNodes(Vector **v) | LexicalTree | [protected] |
processVector_prune_processNodesOutput() | LexicalTree | [protected] |
processVector_pruneLM() | LexicalTree | [protected] |
processWord(int wordID, TokenType *token, char isSil, LexicalNode *resultNode) | LexicalTree | [protected] |
pruneLMLA() | LexicalTree | [protected] |
pruneToken(TokenType **token, float minLikelihood, float binSize=0.0, int *bins=NULL) | LexicalTree | [protected] |
pruneWithMinBeam(LexicalNode *node, float minLikelihood_0) | LexicalTree | [protected] |
pTable1 | Adapt_Segmenter | [protected] |
pTable2 | Adapt_Segmenter | [protected] |
pTlen | Adapt_Segmenter | [protected] |
readTree(LexicalNode *node, FILE *treeFile, int length) | LexicalTree | [protected] |
sadID_decode | Train_Segmenter | [protected] |
sadID_train | Train_Segmenter | [protected] |
safeBestRecognition(bool addEndOfSentence) | LexicalTree | |
scoreCluster | Train_Segmenter | [protected] |
segAmount | Adapt_Segmenter | [protected] |
segID | Train_Segmenter | [protected] |
segID_cheat | Train_Segmenter | [protected] |
segID_decode | Train_Segmenter | [protected] |
segInitID | Adapt_Segmenter | [protected] |
Segmenter() | Segmenter | |
Segmenter(FILE *inFile) | Segmenter | |
segmentFeaturePool(int inputSegID, int inputLabelID, int outputSegID) | Segmenter | |
sentenceStats | LexicalTree | [protected] |
setAlignParallel() | LexicalTree | |
setAMs(PhoneModel **models) | LexicalTree | |
setDepthLevel(LexicalNode *node, int phone, int depth) | LexicalTree | [protected] |
setFeaturePool(FeaturePool *fp) | Segmenter | |
setForcedAlign(const char *string) | LexicalTree | |
setInitialLMHistory(const char *word1, const char *word2) | LexicalTree | |
setLattice(FILE *latFile) | LexicalTree | |
setLatticeGeneration(bool setting) | LexicalTree | |
setLM(LanguageModel *lm) | LexicalTree | |
setNodeContext(LexicalNode *node, int leftContext) | LexicalTree | [protected] |
setNodeLocationPars(LexicalNode *node, bool fromParallel) | LexicalTree | [protected] |
setPhoneLoop(int nrP, PhoneModel **models) | LexicalTree | |
setPhoneLoopConfidence(float *phoneConf, int offset=0) | LexicalTree | |
settings | LexicalTree | [protected] |
setTokenDistributionFile(FILE *tdFile) | LexicalTree | |
setTreeStartEndMatrix() | LexicalTree | [protected] |
setWlrNBest(WLRType *wlr) | LexicalTree | [protected] |
sortLatticePaths(LatticeNode *l) | LexicalTree | [protected] |
startMergeIteration() | Adapt_Segmenter | [protected, virtual] |
startOfSentenceWord | LexicalTree | [protected] |
startTime | LexicalTree | [protected] |
storeClusters(FILE *outFile, char *outputName) | Train_Segmenter | |
storePLConfidence(int time) | LexicalTree | |
systemID | Adapt_Segmenter | [protected] |
tdFile | LexicalTree | [protected] |
testArticulatory(ArticulatoryStream *s) | LexicalTree | |
threadsRunning | LexicalTree | [protected] |
timeStamp | LexicalTree | [protected] |
tokenDepthAdmin | LexicalTree | [protected] |
totCategories | LexicalTree | [protected] |
touchWLRpath(WLRType *w) | LexicalTree | [protected] |
touchWLRs(TokenType *token) | LexicalTree | [protected] |
train(int maxClusters, char *label, char *tempStr, char *feaPosteriors, int segID_cheat=-1) | Train_Segmenter | |
Train_Segmenter(FeaturePool *fp, int sID_train, int sID_decode, const char *name, int nrMerges, int minClusters, int maxClusters, bool widen, const char *nonMergeableModels) | Train_Segmenter | |
trainCluster | Train_Segmenter | [protected] |
trainClusters(int nrG=-1) | Train_Segmenter | [protected] |
trainIteration(int nrIterations) | Train_Segmenter | [protected] |
trainModel(int model, int nrG) | Adapt_Segmenter | [protected] |
Train_Segmenter::trainModel(int model, int nrG, double *trainPRes) | Train_Segmenter | [protected, virtual] |
transitionPenalty | LexicalTree | [protected] |
trcl | Train_Segmenter | [protected] |
treeEnd | LexicalTree | [protected] |
treeStart | LexicalTree | [protected] |
ubm_word_amount | Adapt_Segmenter | [protected] |
updateGlobalStats() | LexicalTree | |
updateStats() | LexicalTree | [protected] |
vList | LexicalTree | [protected] |
vocabulary | LexicalTree | [protected] |
vtlnModel | Train_Segmenter | [protected] |
winnerM1 | Adapt_Segmenter | [protected] |
winnerM2 | Adapt_Segmenter | [protected] |
wlrNBest | LexicalTree | [protected] |
wlrStart | LexicalTree | [protected] |
wordLength | LexicalTree | [protected] |
writePosteriors(char *fileName) | Train_Segmenter | [protected] |
~Adapt_Segmenter() | Adapt_Segmenter | |
~LexicalTree() | LexicalTree | |
~Segmenter() | Segmenter | |
~Train_Segmenter() | Train_Segmenter | [virtual] |