op.display();
op.tlpParams.display();
// setup tree transforms
Treebank trainTreebank = op.tlpParams.memoryTreebank();
MemoryTreebank testTreebank = op.tlpParams.testMemoryTreebank();
// Treebank blippTreebank = ((EnglishTreebankParserParams) tlpParams).diskTreebank();
// String blippPath = "/afs/ir.stanford.edu/data/linguistic-data/BLLIP-WSJ/";
// blippTreebank.loadPath(blippPath, "", true);
Timing.startTime();
System.err.print("Reading trees...");
testTreebank.loadPath(path, new NumberRangeFileFilter(testLow, testHigh, true));
if (op.testOptions.increasingLength) {
Collections.sort(testTreebank, new TreeLengthComparator());
}
trainTreebank.loadPath(path, new NumberRangeFileFilter(trainLow, trainHigh, true));
Timing.tick("done.");
System.err.print("Binarizing trees...");
TreeAnnotatorAndBinarizer binarizer;
if (!op.trainOptions.leftToRight) {
binarizer = new TreeAnnotatorAndBinarizer(op.tlpParams, op.forceCNF, !op.trainOptions.outsideFactor(), true, op);
} else {
binarizer = new TreeAnnotatorAndBinarizer(op.tlpParams.headFinder(), new LeftHeadFinder(), op.tlpParams, op.forceCNF, !op.trainOptions.outsideFactor(), true, op);
}
CollinsPuncTransformer collinsPuncTransformer = null;
if (op.trainOptions.collinsPunc) {
collinsPuncTransformer = new CollinsPuncTransformer(tlp);
}
TreeTransformer debinarizer = new Debinarizer(op.forceCNF);
List<Tree> binaryTrainTrees = new ArrayList<Tree>();
if (op.trainOptions.selectiveSplit) {
op.trainOptions.splitters = ParentAnnotationStats.getSplitCategories(trainTreebank, op.trainOptions.tagSelectiveSplit, 0, op.trainOptions.selectiveSplitCutOff, op.trainOptions.tagSelectiveSplitCutOff, op.tlpParams.treebankLanguagePack());
if (op.trainOptions.deleteSplitters != null) {
List<String> deleted = new ArrayList<String>();
for (String del : op.trainOptions.deleteSplitters) {
String baseDel = tlp.basicCategory(del);
boolean checkBasic = del.equals(baseDel);
for (Iterator<String> it = op.trainOptions.splitters.iterator(); it.hasNext(); ) {
String elem = it.next();
String baseElem = tlp.basicCategory(elem);
boolean delStr = checkBasic && baseElem.equals(baseDel) ||
elem.equals(del);
if (delStr) {
it.remove();
deleted.add(elem);
}
}
}
System.err.println("Removed from vertical splitters: " + deleted);
}
}
if (op.trainOptions.selectivePostSplit) {
TreeTransformer myTransformer = new TreeAnnotator(op.tlpParams.headFinder(), op.tlpParams, op);
Treebank annotatedTB = trainTreebank.transform(myTransformer);
op.trainOptions.postSplitters = ParentAnnotationStats.getSplitCategories(annotatedTB, true, 0, op.trainOptions.selectivePostSplitCutOff, op.trainOptions.tagSelectivePostSplitCutOff, op.tlpParams.treebankLanguagePack());
}
if (op.trainOptions.hSelSplit) {
binarizer.setDoSelectiveSplit(false);
for (Tree tree : trainTreebank) {
if (op.trainOptions.collinsPunc) {
tree = collinsPuncTransformer.transformTree(tree);
}
//tree.pennPrint(tlpParams.pw());
tree = binarizer.transformTree(tree);
//binaryTrainTrees.add(tree);
}
binarizer.setDoSelectiveSplit(true);
}
for (Tree tree : trainTreebank) {
if (op.trainOptions.collinsPunc) {
tree = collinsPuncTransformer.transformTree(tree);
}
tree = binarizer.transformTree(tree);
binaryTrainTrees.add(tree);
}
if (op.testOptions.verbose) {
binarizer.dumpStats();
}
List<Tree> binaryTestTrees = new ArrayList<Tree>();
for (Tree tree : testTreebank) {
if (op.trainOptions.collinsPunc) {
tree = collinsPuncTransformer.transformTree(tree);
}
tree = binarizer.transformTree(tree);
binaryTestTrees.add(tree);
}
Timing.tick("done."); // binarization
BinaryGrammar bg = null;
UnaryGrammar ug = null;
DependencyGrammar dg = null;
// DependencyGrammar dgBLIPP = null;
Lexicon lex = null;
Index<String> stateIndex = new HashIndex<String>();
// extract grammars
Extractor<Pair<UnaryGrammar,BinaryGrammar>> bgExtractor = new BinaryGrammarExtractor(op, stateIndex);
//Extractor bgExtractor = new SmoothedBinaryGrammarExtractor();//new BinaryGrammarExtractor();
// Extractor lexExtractor = new LexiconExtractor();
//Extractor dgExtractor = new DependencyMemGrammarExtractor();
if (op.doPCFG) {
System.err.print("Extracting PCFG...");
Pair<UnaryGrammar, BinaryGrammar> bgug = null;
if (op.trainOptions.cheatPCFG) {
List<Tree> allTrees = new ArrayList<Tree>(binaryTrainTrees);
allTrees.addAll(binaryTestTrees);
bgug = bgExtractor.extract(allTrees);
} else {
bgug = bgExtractor.extract(binaryTrainTrees);
}
bg = bgug.second;
bg.splitRules();
ug = bgug.first;
ug.purgeRules();
Timing.tick("done.");
}
System.err.print("Extracting Lexicon...");
Index<String> wordIndex = new HashIndex<String>();
Index<String> tagIndex = new HashIndex<String>();
lex = op.tlpParams.lex(op, wordIndex, tagIndex);
lex.initializeTraining(binaryTrainTrees.size());
lex.train(binaryTrainTrees);
lex.finishTraining();
Timing.tick("done.");
if (op.doDep) {
System.err.print("Extracting Dependencies...");
binaryTrainTrees.clear();
Extractor<DependencyGrammar> dgExtractor = new MLEDependencyGrammarExtractor(op, wordIndex, tagIndex);
// dgBLIPP = (DependencyGrammar) dgExtractor.extract(new ConcatenationIterator(trainTreebank.iterator(),blippTreebank.iterator()),new TransformTreeDependency(tlpParams,true));
// DependencyGrammar dg1 = dgExtractor.extract(trainTreebank.iterator(), new TransformTreeDependency(op.tlpParams, true));
//dgBLIPP=(DependencyGrammar)dgExtractor.extract(blippTreebank.iterator(),new TransformTreeDependency(tlpParams));
//dg = (DependencyGrammar) dgExtractor.extract(new ConcatenationIterator(trainTreebank.iterator(),blippTreebank.iterator()),new TransformTreeDependency(tlpParams));
// dg=new DependencyGrammarCombination(dg1,dgBLIPP,2);
dg = dgExtractor.extract(binaryTrainTrees); //uses information whether the words are known or not, discards unknown words
Timing.tick("done.");
//System.out.print("Extracting Unknown Word Model...");
//UnknownWordModel uwm = (UnknownWordModel)uwmExtractor.extract(binaryTrainTrees);
//Timing.tick("done.");
System.out.print("Tuning Dependency Model...");
dg.tune(binaryTestTrees);
//System.out.println("TUNE DEPS: "+tuneDeps);
Timing.tick("done.");
}
BinaryGrammar boundBG = bg;
UnaryGrammar boundUG = ug;
GrammarProjection gp = new NullGrammarProjection(bg, ug);
// serialization
if (serializeFile != null) {
System.err.print("Serializing parser...");
LexicalizedParser parser = new LexicalizedParser(lex, bg, ug, dg, stateIndex, wordIndex, tagIndex, op);
parser.saveParserToSerialized(serializeFile);
Timing.tick("done.");
}
// test: pcfg-parse and output
ExhaustivePCFGParser parser = null;
if (op.doPCFG) {
parser = new ExhaustivePCFGParser(boundBG, boundUG, lex, op, stateIndex, wordIndex, tagIndex);
}
ExhaustiveDependencyParser dparser = ((op.doDep && ! op.testOptions.useFastFactored) ? new ExhaustiveDependencyParser(dg, lex, op, wordIndex, tagIndex) : null);
Scorer scorer = (op.doPCFG ? new TwinScorer(new ProjectionScorer(parser, gp, op), dparser) : null);
//Scorer scorer = parser;
BiLexPCFGParser bparser = null;
if (op.doPCFG && op.doDep) {
bparser = (op.testOptions.useN5) ? new BiLexPCFGParser.N5BiLexPCFGParser(scorer, parser, dparser, bg, ug, dg, lex, op, gp, stateIndex, wordIndex, tagIndex) : new BiLexPCFGParser(scorer, parser, dparser, bg, ug, dg, lex, op, gp, stateIndex, wordIndex, tagIndex);
}
Evalb pcfgPE = new Evalb("pcfg PE", true);
Evalb comboPE = new Evalb("combo PE", true);
AbstractEval pcfgCB = new Evalb.CBEval("pcfg CB", true);
AbstractEval pcfgTE = new TaggingEval("pcfg TE");
AbstractEval comboTE = new TaggingEval("combo TE");
AbstractEval pcfgTEnoPunct = new TaggingEval("pcfg nopunct TE");
AbstractEval comboTEnoPunct = new TaggingEval("combo nopunct TE");
AbstractEval depTE = new TaggingEval("depnd TE");
AbstractEval depDE = new UnlabeledAttachmentEval("depnd DE", true, null, tlp.punctuationWordRejectFilter());
AbstractEval comboDE = new UnlabeledAttachmentEval("combo DE", true, null, tlp.punctuationWordRejectFilter());
if (op.testOptions.evalb) {
EvalbFormatWriter.initEVALBfiles(op.tlpParams);
}
// int[] countByLength = new int[op.testOptions.maxLength+1];
// Use a reflection ruse, so one can run this without needing the
// tagger. Using a function rather than a MaxentTagger means we
// can distribute a version of the parser that doesn't include the
// entire tagger.
Function<List<? extends HasWord>,ArrayList<TaggedWord>> tagger = null;
if (op.testOptions.preTag) {
try {
Class[] argsClass = { String.class };
Object[] arguments = new Object[]{op.testOptions.taggerSerializedFile};
tagger = (Function<List<? extends HasWord>,ArrayList<TaggedWord>>) Class.forName("edu.stanford.nlp.tagger.maxent.MaxentTagger").getConstructor(argsClass).newInstance(arguments);
} catch (Exception e) {
System.err.println(e);
System.err.println("Warning: No pretagging of sentences will be done.");
}
}
for (int tNum = 0, ttSize = testTreebank.size(); tNum < ttSize; tNum++) {
Tree tree = testTreebank.get(tNum);
int testTreeLen = tree.yield().size();
if (testTreeLen > op.testOptions.maxLength) {
continue;
}
Tree binaryTree = binaryTestTrees.get(tNum);