Package edu.stanford.nlp.ling

Examples of edu.stanford.nlp.ling.IndexedWord


    // tabu list.  If it already is, we emit the edge and the target vertice, as
    // we will not be continuing in that vertex, but we wish to record the relation.
    // If we will proceed down that node, add parens if it will continue recursing down.
    for (SemanticGraphEdge edge : edgeIter) {
      seenEdges.add(edge);
      IndexedWord tgtVert = edge.getDependent();
      boolean applyParens =
        sg.outDegree(tgtVert) > 0 && !tabu.contains(tgtVert);
      buf.append(" >");
      buf.append(edge.getRelation().toString());
      if (nameEdges) {
        buf.append("=E");
        buf.write(String.valueOf(seenEdges.size()));
      }
      buf.append(" ");
      if (applyParens)
        buf.append("(");
      if (tabu.contains(tgtVert)) {
        buf.append("{tag:"); buf.append(tgtVert.tag()); buf.append("}");
        if (useWordAsLabel) {
          buf.append("=");
          buf.append(tgtVert.word());
          buf.append(" ");
        }
      } else {
        buf.append(semgrexFromGraphHelper(tgtVert, sg, tabu, seenEdges, useWordAsLabel, nameEdges,
            wildcardNodes, useTag, useWord, nodeNameMap, orderedNodes));


    return buf.toString();
  }
 
  public static String semgrexFromGraphOrderedNodes(SemanticGraph sg, Collection<IndexedWord> wildcardNodes,
      boolean useTag, boolean useWord, Map<IndexedWord, String> nodeNameMap) throws Exception {
    IndexedWord patternRoot = sg.getFirstRoot();
    StringWriter buf = new StringWriter();
    Set<IndexedWord> tabu = Generics.newHashSet();
    Set<SemanticGraphEdge> seenEdges = Generics.newHashSet();

    buf.append(semgrexFromGraphHelper(patternRoot, sg, tabu, seenEdges, true, true, wildcardNodes,

    List<IndexedWord> prevRoots = new ArrayList<IndexedWord>(newGraph.getRoots());
    List<IndexedWord> newRoots = new ArrayList<IndexedWord>();
    // TODO: we are using vertexListSorted here because we're changing
    // vertices while iterating.  Perhaps there is a better way to do it.
    for (IndexedWord node : newGraph.vertexListSorted()) {
      IndexedWord newWord = new IndexedWord(node);
      newWord.setSentIndex(newSentIndex);
      SemanticGraphUtils.replaceNode(newWord, node, newGraph);
      if (prevRoots.contains(node))
        newRoots.add(newWord);
    }
    newGraph.setRoots(newRoots);

        nextMatch = nodeMatchCandidateIterator.next();
        // System.err.println("going to next match: " + nextMatch.word() + " " +
        // myNode.descString + " " + myNode.isLink);
        if (myNode.descString.equals("{}") && myNode.isLink) {
          IndexedWord otherNode = namesToNodes.get(myNode.name);
          if (otherNode != null) {
            if (otherNode.equals(nextMatch)) {
              if (myNode.negDesc) {
                continue;
              } else {
                finished = false;
                break;

    // Now that a terminals to terminals map has been generated, account for the
    // tree non-terminals.
    for (Tree nonTerm : tree) {
      if (!nonTerm.isLeaf()) {
        IndexedWord bestNode = null;
        int bestScore = 99999;
        for (Tree curr : nonTerm) {
          IndexedWord equivNode = map.get(new PositionedTree(curr, tree));
          if ((equivNode == null) || !depthMap.containsKey(equivNode)) continue;
          int currScore = depthMap.get(equivNode);
          if (currScore < bestScore) {
            bestScore = currScore;
            bestNode = equivNode;

      // sentences such as "I went over the river and through the woods" have
      // copys for "went" in the collapsed dependencies
      TwoDimensionalMap<Integer, Integer, IndexedWord> nodeMap = TwoDimensionalMap.hashMap();
      for (IntermediateNode in: nodes){
        CoreLabel token = sentence.get(in.index - 1); // index starts at 1!
        IndexedWord word;
        if (in.copyAnnotation > 0) {
          // TODO: if we make a copy wrapper CoreLabel, use it here instead
          word = new IndexedWord(new CoreLabel(token));
          word.setCopyCount(in.copyAnnotation);
        } else {
          word = new IndexedWord(token);
        }
       
        // for backwards compatibility - new annotations should have
        // these fields set, but annotations older than August 2014 might not
        if (word.docID() == null && in.docId != null) {
          word.setDocID(in.docId);
        }
        if (word.sentIndex() < 0 && in.sentIndex >= 0) {
          word.setSentIndex(in.sentIndex);
        }
        if (word.index() < 0 && in.index >= 0) {
          word.setIndex(in.index);
        }     
       
        nodeMap.put(word.index(), word.copyCount(), word);
        graph.addVertex(word);
        if (in.isRoot) {
          graph.addRoot(word);
        }
      }
     
      // add all edges to the actual graph
      for(IntermediateEdge ie: edges){
        IndexedWord source = nodeMap.get(ie.source, ie.sourceCopy);
        if (source == null) {
          throw new RuntimeIOException("Failed to find node " + ie.source + "-" + ie.sourceCopy);
        }
        IndexedWord target = nodeMap.get(ie.target, ie.targetCopy);
        if (target == null) {
          throw new RuntimeIOException("Failed to find node " + ie.target + "-" + ie.targetCopy);
        }
        assert(target != null);
        synchronized (LOCK) {

  }

  protected static void crawl(IndexedWord vertex, SemanticGraph sg, Set<IndexedWord> seenVerts) {
    seenVerts.add(vertex);
    for (SemanticGraphEdge edge : sg.incomingEdgeIterable(vertex)) {
      IndexedWord gov = edge.getGovernor();
      if (!seenVerts.contains(gov)) {
        crawl(gov, sg, seenVerts);
      }
    }

    for (SemanticGraphEdge edge : sg.outgoingEdgeIterable(vertex)) {
      IndexedWord dep = edge.getDependent();
      if (!seenVerts.contains(dep)) {
        crawl(dep, sg, seenVerts);
      }
    }
  }

  }


  @Override
  public void evaluate(SemanticGraph sg, SemgrexMatcher sm) {
    IndexedWord seedNode = getNamedNode(destroyNodeName, sm);
    // TODO: do not execute if seedNode if not in graph (or just error?)
    if (sg.containsVertex(seedNode)) {
      Set<IndexedWord> nodesToDestroy = crawl(seedNode, sg);
      for (IndexedWord node : nodesToDestroy) {
        sg.removeVertex(node);

      max = in.getIndex() > max ? in.getIndex() : max;
    }
    TwoDimensionalMap<Integer, Integer, IndexedWord> nodes = TwoDimensionalMap.hashMap();
    for(CoreNLPProtos.DependencyGraph.Node in: proto.getNodeList()){
      CoreLabel token = sentence.get(in.getIndex() - 1); // index starts at 1!
      IndexedWord word;
      if (in.hasCopyAnnotation() && in.getCopyAnnotation() > 0) {
        // TODO: if we make a copy wrapper CoreLabel, use it here instead
        word = new IndexedWord(new CoreLabel(token));
        word.set(CopyAnnotation.class, in.getCopyAnnotation());
      } else {
        word = new IndexedWord(token);
      }

      // for backwards compatibility - new annotations should have
      // these fields set, but annotations older than August 2014 might not
      if (word.docID() == null && docid != null) {
        word.setDocID(docid);
      }
      if (word.sentIndex() < 0 && in.getSentenceIndex() >= 0) {
        word.setSentIndex(in.getSentenceIndex());
      }
      if (word.index() < 0 && in.getIndex() >= 0) {
        word.setIndex(in.getIndex());
      }     

      assert in.getIndex() == word.index();
      nodes.put(in.getIndex(), in.getCopyAnnotation(), word);
      graph.addVertex(word);
    }

    // add all edges to the actual graph
    for(CoreNLPProtos.DependencyGraph.Edge ie: proto.getEdgeList()){
      IndexedWord source = nodes.get(ie.getSource(), ie.getSourceCopy());
      assert(source != null);
      IndexedWord target = nodes.get(ie.getTarget(), ie.getTargetCopy());
      assert(target != null);
      synchronized (globalLock) {
        // this is not thread-safe: there are static fields in GrammaticalRelation
        assert ie.hasDep();
        GrammaticalRelation rel = GrammaticalRelation.valueOf(ie.getDep(), fromProto(ie.getLanguage()));

  }

  public static Map<IndexedWord,List<TypedDependency>> govToDepMap(List<TypedDependency> deps) {
    Map<IndexedWord,List<TypedDependency>> govToDepMap = Generics.newHashMap();
    for (TypedDependency dep : deps) {
      IndexedWord gov = dep.gov();

      List<TypedDependency> depList = govToDepMap.get(gov);
      if (depList == null) {
        depList = new ArrayList<TypedDependency>();
        govToDepMap.put(gov, depList);

TOP

Related Classes of edu.stanford.nlp.ling.IndexedWord

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.