Package edu.umd.cloud9.io.map

Examples of edu.umd.cloud9.io.map.HMapSFW


    }
    return cnt;
  }

  private static HMapSFW array2Map(String[] array) {
    HMapSFW map = new HMapSFW();
    for ( int i = 0; i < array.length; i += 2 ) {
      map.put(array[i], Float.parseFloat(array[i+1]));
    }
    return map;
  }
View Full Code Here


    public void map(IntWritable docno, HMapSFW docvector,
        OutputCollector<IntWritable, PairOfFloatInt> output, Reporter reporter) throws IOException {
      for (int i = 0; i < vectors.size(); i++) {
        reporter.incrCounter(Pairs.Total, 1);
        IntWritable sampleDocno = (IntWritable) vectors.get(i).getLeftElement();
        HMapSFW fromSample = (HMapSFW) vectors.get(i).getRightElement();

        float cs = CLIRUtils.cosine(docvector, fromSample);      
        if (cs >= threshold) {
          sLogger.debug(sampleDocno + "," + fromSample + "\n" + fromSample.length());
          sLogger.debug(docno + "," + docvector + "\n" + docvector.length());
          sLogger.debug(cs);
          reporter.incrCounter(Pairs.Emitted, 1);
          output.collect(new IntWritable(sampleDocno.get()), new PairOfFloatInt(cs, docno.get()));
        }
View Full Code Here

      int tf = entry.getValue();
      // transTermTf won't be updated if fTerm not in vocab
      transTermTf = CLIRUtils.updateTFsByTerm(fTerm, tf, transTermTf, eVocabSrc, eVocabTrg, fVocabSrc, fVocabTrg, e2f_Probs, f2e_Probs, eTok, sLogger);
    }

    HMapSFW weightedVector = CLIRUtils.createTermDocVector(terms.length, transTermTf, eVocabTrg, fScoreFn, dict, dfTable, true, sLogger);

    // don't count numbers for the min #terms constraint since Wikipedia has "sentences" full of numbers that doesn't make any sense
    int numNonNumbers = 0;
    for(String term : weightedVector.keySet()){     
      if (!term.matches("\\d+")) {
        numNonNumbers++;
      }
    }
    if(numNonNumbers < MinVectorTerms){
View Full Code Here

  public HMapSFW createEDocVector(String sentence) {
    return createEDocVector(sentence, new HMapSIW());
  }

  public HMapSFW createEDocVector(String sentence, HMapSIW term2Tf) {
    HMapSFW weightedVector = new HMapSFW();
    String[] terms = eTok.processContent(sentence);

    for(String term : terms){
      term2Tf.increment(term);    
    }

    weightedVector = CLIRUtils.createTermDocVector(terms.length, term2Tf, eScoreFn, dict, dfTable, true, sLogger);
    // don't count numbers for the min #terms constraint since Wikipedia has "sentences" full of numbers that doesn't make any sense
    int numNonNumbers = 0;
    for(String term : weightedVector.keySet()){     
      if (!term.matches("\\d+")) {
        numNonNumbers++;
      }
    }
    if(numNonNumbers < MinVectorTerms){
View Full Code Here

          if(sent.contains("date:")||sent.contains("jpg")||sent.contains("png")||sent.contains("gif")||sent.contains("fontsize:")||sent.contains("category:")){
            continue;
          }
          int length = eTok.getNumberTokens(sent);
          if(length >= MinSentenceLength){
            HMapSFW vector = createEDocVector(sent.toString());
            if(vector != null){
              vectors.add(vector);
              sentences.add(new Text(sent));
              if (sentLengths != null) sentLengths.add(length);
            }
View Full Code Here

          if (sent.contains("datei:") || sent.contains("jpg") || sent.contains("png") || sent.contains("fontsize:") || sent.contains("kategorie:")) {
            continue;
          }
          int length = fTok.getNumberTokens(sent);
          if (length >= MinSentenceLength) {
            HMapSFW vector = createFDocVector(sent);
            if (vector != null) {
              vectors.add(vector);
              sentences.add(new Text(sent));
              sLogger.debug("added="+vector);
View Full Code Here

        docLen = CLIRUtils.translateTFs(deDoc, tfS, eVocabSrc, eVocabTrg, fVocabSrc,
            fVocabTrg, e2f_Probs, f2e_Probs, tokenizer , null);   // tokenizer just for stopword list
      } catch (IOException e) {
        e.printStackTrace();
      }
      HMapSFW v = CLIRUtils.createTermDocVector(docLen, tfS, eVocabTrg, mModel, dfE, true, null);
      // System.out.println("f"+(n++)+" : " + v);

      transDocs.add(v);
    }
View Full Code Here

    mModel.setAvgDocLength(avgLen);

    // tf-idf computation
    List<HMapSFW> docVectors = new ArrayList<HMapSFW>();
    for (HMapSIW enDoc : term2tfVectors) {
      HMapSFW v = new HMapSFW();
      int docLen = 0;
      for (Entry<String> item : enDoc.entrySet()) {
        int tf = item.getValue();
        docLen += tf;
      }
      float sum2 = 0;
      for (Entry<String> item : enDoc.entrySet()) {
        String term = item.getKey();
        int tf = item.getValue();
        int df = dfTable.get(term);
        mModel.setDF(df);
        float score = mModel.computeDocumentWeight(tf, docLen);
        if (score > 0) {
          v.put(term, score);
          sum2 += score * score;
        }
      }

      // normalize
      sum2 = (float) Math.sqrt(sum2);
      for (edu.umd.cloud9.util.map.MapKF.Entry<String> e : v.entrySet()) {
        float score = v.get(e.getKey());
        v.put(e.getKey(), score / sum2);
      }

      docVectors.add(v);
    }

View Full Code Here

    int cnt = 0;
    String label;
    long time = System.currentTimeMillis();

    for (int i = 0; i < transVectors.size(); i++) {
      HMapSFW transVector = transVectors.get(i);
      HMapSIW fTfMap = fTfs.get(i);
      String fSent = fSents.get(i);
      for (int j = 0; j < eVectors.size(); j++) {
        HMapSFW eVector = eVectors.get(j);
        HMapSIW eTfMap = eTfs.get(j);
        String eSent = eSents.get(j);
        if (parallelPairs.get(i) == j) {
          label = "parallel";
        } else {
View Full Code Here

      // We simply use the source-language doc length since the ratio of doc length to average doc
      // length is unlikely to change significantly (not worth complicating the pipeline)
      int docLen = CLIRUtils.translateTFs(doc, tfS, eVocabSrc, eVocabTrg, fVocabSrc, fVocabTrg,
          e2f_Probs, f2e_Probs, tokenizer, LOG);

      HMapSFW v = CLIRUtils.createTermDocVector(docLen, tfS, eVocabTrg, model, dict, dfTable,
          isNormalize, LOG);

      // If no translation of any word is in the target vocab, remove document i.e., our model
      // wasn't capable of translating it.
      if (v.isEmpty()) {
        reporter.incrCounter(Docs.ZERO, 1);
      } else {
        reporter.incrCounter(Docs.Total, 1);
        output.collect(docno, v);
      }
View Full Code Here

TOP

Related Classes of edu.umd.cloud9.io.map.HMapSFW

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.