Package edu.umd.cloud9.util.map

Examples of edu.umd.cloud9.util.map.HMapID


      int e = entry.getRightElement();
      String eTerm = eVocab_f2e.get(e);

      //      LOG.info("Pr("+eTerm+"|"+token+")="+probEF);

      if (probEF > 0 && e > 0 && !docLangTokenizer.isStopWord(eTerm) && (translateOnly == null || !translateOnly.equals("indri") || indriPuncPattern.matcher(eTerm).matches()) && (pairsInSCFG == null || pairsInSCFG.contains(new PairOfStrings(token,eTerm)))) {     
        // assuming our bilingual dictionary is learned from normally segmented text, but we want to use bigram tokenizer for CLIR purposes
        // then we need to convert the translations of each source token into a sequence of bigrams
        // we can distribute the translation probability equally to the each bigram
        if (bigramSegment) {
          String[] eTokens = docLangTokenizer.processContent(eTerm);
View Full Code Here


    }

    // in SCFG rule such as a b X1 X2 c --> X1 d e X2 f, we want to find out the src/trg tokens that are aligned to some trg/src token, ignoring the X variable
    // we can then decide if we want to include it as a multi-token phrase in our query representation based on various heuristics (e.g., only include if no X in between of tokens)
    String fPhrase = "";
    ArrayListOfInts sourceTokenIds = new ArrayListOfInts();     
    ArrayListOfInts targetTokenIds = new ArrayListOfInts();
    int f=0;
    for (; f < lhs.length; f++) {
      String fTerm = lhs[f];
      if (queryLangTokenizer.isStopWord(fTerm) || fTerm.matches("\\[X,\\d+\\]") || fTerm.matches("<s>") || fTerm.matches("</s>")) {
        continue;
      }

      srcTokenCnt.increment(fTerm);
      sourceTokenIds.add(f);

      ArrayListOfInts ids;
      if (isPassThrough){
        ids = new ArrayListOfInts();
        ids.add(0);
      }else {
        ids = one2manyAlign.get(f);
      }

      if (ids == null || (isOne2Many == 0 && ids.size() > 1)) {
        continue;
      }

      // find phrase in LHS and match to phrase in RHS
      if (isMany2Many) {
        fPhrase += fTerm + " ";
        targetTokenIds = targetTokenIds.mergeNoDuplicates(ids);       
      }

      String eTerm = null;
      for (int e : ids) {
        eTerm = rhs[e];

        // assumption: if this is pass-through rule, re-stem token in doc-language
        if (isPassThrough || (unknownWords != null && unknownWords.contains(fTerm))) {
          eTerm = stemmed2Stemmed.get(eTerm);
        }

        if (eTerm == null || docLangTokenizer.isStopWord(eTerm)) {
          //          LOG.info("Skipped trg token " + eTerm);
          eTerm = null;
          continue;     
        }
        bagOfTargetTokens.add(eTerm);
        if (isOne2Many <= 1) {
          if (probDist.containsKey(fTerm)) {
            HMapSFW eToken2Prob = probDist.get(fTerm);
            eToken2Prob.increment(eTerm, weight);
          }else {
            HMapSFW eToken2Prob = new HMapSFW();
            eToken2Prob.put(eTerm, weight);
            probDist.put(fTerm, eToken2Prob);
          }
        }
      }

      if (isOne2Many == 2) {
        // if ids.size() > 1 eTerm is a multi-token expression
        // even if eTerm is overwritten here, we need to do above loop to update bagOfTargetTokens
        if (ids.size() > 1) {
          eTerm = isConsecutiveWithStopwords(ids, rhs, docLangTokenizer);     // <---- heuristic
        }

        // no proper translation on target-side (e.g., stopword OR non-consecutive multi-word translation), let's skip
        if (eTerm == null) {
View Full Code Here

      String[] alPair = alignment.split("-");
      int f = Integer.parseInt(alPair[0]);
      int e = Integer.parseInt(alPair[1]);

      if(!one2manyAlign.containsKey(f)){
        one2manyAlign.put(f, new ArrayListOfInts())
      }
      one2manyAlign.get(f).add(e);
    }

    // for each source token id, sort ids of its translations in ascending order
    for(Integer f : one2manyAlign.keySet()) {
      ArrayListOfInts lst = one2manyAlign.get(f);
      lst.sort();
      one2manyAlign.put(f, lst);
    }

    return one2manyAlign;
  }
View Full Code Here

      // Remember, token position is numbered started from one...
      if (positions.containsKey(term)) {
        positions.get(term).add(i + 1);
      } else {
        ArrayListOfInts l = new ArrayListOfInts();
        l.add(i + 1);
        positions.put(term, l);
      }
    }

    int doclength = 0;
    Iterator<Map.Entry<String, ArrayListOfInts>> it = positions.entrySet().iterator();
    Map.Entry<String, ArrayListOfInts> e;
    ArrayListOfInts positionsList;
    while (it.hasNext()) {
      e = it.next();
      positionsList = e.getValue();

      // We're storing tfs as shorts, so check for overflow...
      if (positionsList.size() >= TF_CUT) {
        // There are a few ways to handle this... If we're getting such a high tf, then it most
        // likely means that this is a junk doc.
        LOG.warn("Error: tf of " + e.getValue()
            + " will overflow max short value. docno=" + doc.getDocid() + ", term="
            + e.getKey());
        it.remove();
      } else {
        positionsList.trimToSize();
        doclength += positionsList.size();
      }
    }

    if ( positions.size() == 0 ) {
      return positions;
    }

    positions.put("", new ArrayListOfInts(new int[] { doclength }));
    return positions;
  }
View Full Code Here

  public void testBasic1() {
    int size = 100000;
    Random r = new Random();
    double[] doubles = new double[size];

    MapID map = new HMapID();
    for (int i = 0; i < size; i++) {
      int k = r.nextInt(size);
      map.put(i, k + 0.1);
      doubles[i] = k + 0.1;
    }

    for (int i = 0; i < size; i++) {
      double v = map.get(i);

      assertEquals(doubles[i], v, 0.0);
      assertTrue(map.containsKey(i));
    }
  }
View Full Code Here

  public void testUpdate() {
    int size = 100000;
    Random r = new Random();
    double[] doubles = new double[size];

    MapID map = new HMapID();
    for (int i = 0; i < size; i++) {
      int k = r.nextInt(size);
      map.put(i, k + 0.1);
      doubles[i] = k + 0.1;
    }

    assertEquals(size, map.size());

    for (int i = 0; i < size; i++) {
      map.put(i, doubles[i] + 1.0);
    }

    assertEquals(size, map.size());

    for (int i = 0; i < size; i++) {
      double v = map.get(i);

      assertEquals(doubles[i] + 1.0, v, 0.0);
      assertTrue(map.containsKey(i));
    }
  }
View Full Code Here

    }
  }

  @Test
  public void testBasic() throws IOException {
    HMapID m = new HMapID();

    m.put(1, 5.0);
    m.put(2, 22.0);

    double value;

    assertEquals(m.size(), 2);

    value = m.get(1);
    assertTrue(value == 5.0);

    value = m.remove(1);
    assertEquals(m.size(), 1);

    value = m.get(2);
    assertTrue(value == 22.0);
  }
View Full Code Here

    assertTrue(value == 22.0);
  }

  @Test
  public void testPlus() throws IOException {
    HMapID m1 = new HMapID();

    m1.put(1, 5.0);
    m1.put(2, 22.0);

    HMapID m2 = new HMapID();

    m2.put(1, 4.0);
    m2.put(3, 5.0);

    m1.plus(m2);

    assertEquals(m1.size(), 3);
    assertTrue(m1.get(1) == 9);
View Full Code Here

    assertTrue(m1.get(3) == 5);
  }

  @Test
  public void testDot() throws IOException {
    HMapID m1 = new HMapID();

    m1.put(1, 2.3);
    m1.put(2, 1.9);
    m1.put(3, 3.0);

    HMapID m2 = new HMapID();

    m2.put(1, 1.2);
    m2.put(2, 4.3);
    m2.put(4, 5.0);

    double s = m1.dot(m2);

    assertTrue(s == 10.93);
  }
View Full Code Here

    assertTrue(s == 10.93);
  }

  @Test
  public void testLengthAndNormalize() throws IOException {
    HMapID m1 = new HMapID();

    m1.put(1, 2.3);
    m1.put(2, 1.9);
    m1.put(3, 3.0);

    assertEquals(m1.length(), 4.2308393, 10E-6);

    m1.normalize();

    assertEquals(m1.get(1), 0.5436274, 10E-6);
    assertEquals(m1.get(2), 0.44908348, 10E-6);
    assertEquals(m1.get(3), 0.70907915, 10E-6);
    assertEquals(m1.length(), 1, 10E-6);

    HMapID m2 = new HMapID();

    m2.put(1, 1.2);
    m2.put(2, 4.3);
    m2.put(3, 5.0);

    assertEquals(m2.length(), 6.7029843, 10E-6);

    m2.normalize();

    assertEquals(m2.get(1), 0.17902474, 10E-6);
    assertEquals(m2.get(2), 0.64150536, 10E-6);
    assertEquals(m2.get(3), 0.7459364, 10E-6);
    assertEquals(m2.length(), 1, 10E-6);
  }
View Full Code Here

TOP

Related Classes of edu.umd.cloud9.util.map.HMapID

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.