Package gnu.trove

Examples of gnu.trove.TIntIntHashMap


    for (int i = 0; i < n; i++) { factorsByVar[i] = new ArrayList (); }
    vertexPots = new List [n];
    my2global = new int [n];

    if (projectionMap == null) {
      projectionMap = new TIntIntHashMap (n);
      // projectionMap.setDefaultValue (-1);
    } else {
      projectionMap.ensureCapacity (n);
    }
   
View Full Code Here


  {
    Set survivors = new THashSet (variablesSet ());
    survivors.remove (victim);

    int vi = 0;
    TIntIntHashMap dict = new TIntIntHashMap (survivors.size ());
    // dict.setDefaultValue (-1);  No longer supported, but this.getIndex() written to avoid need for this.
    my2global = new int[survivors.size ()];

    for (Iterator it = survivors.iterator (); it.hasNext();) {
      Variable var = (Variable) it.next ();
      int gvi = var.getIndex ();
      dict.put (gvi, vi);
      my2global [vi] = gvi;
    }

    projectionMap = dict;
    numNodes--;  // do this at end b/c it affects getVertexSet()
View Full Code Here

  private TIntIntHashMap constraintIndices;
  private boolean normalize;
 
  public MaxEntL2FLPRConstraints(int numFeatures, int numLabels, boolean useValues, boolean normalize) {
    super(numFeatures, numLabels, useValues);
    this.constraintIndices = new TIntIntHashMap();
    this.normalize = normalize;
  }
View Full Code Here

  private void sampleTopicsForOneTestDocAll(FeatureSequence tokenSequence,
      LabelSequence topicSequence) {
    // TODO Auto-generated method stub
    int[] oneDocTopics = topicSequence.getFeatures();

    TIntIntHashMap currentTypeTopicCounts;
    int type, oldTopic, newTopic;
    double tw;
    double[] topicWeights = new double[numTopics];
    double topicWeightsSum;
    int docLength = tokenSequence.getLength();

    //    populate topic counts
    int[] localTopicCounts = new int[numTopics];
    for (int ti = 0; ti < numTopics; ti++){
      localTopicCounts[ti] = 0;
    }
    for (int position = 0; position < docLength; position++) {
      localTopicCounts[oneDocTopics[position]] ++;
    }

    // Iterate over the positions (words) in the document
    for (int si = 0; si < docLength; si++) {
      type = tokenSequence.getIndexAtPosition(si);
      oldTopic = oneDocTopics[si];

      // Remove this token from all counts
      localTopicCounts[oldTopic] --;

      currentTypeTopicCounts = typeTopicCounts[type];
      assert(currentTypeTopicCounts.get(oldTopic) >= 0);

      if (currentTypeTopicCounts.get(oldTopic) == 1) {
        currentTypeTopicCounts.remove(oldTopic);
      }
      else {
        currentTypeTopicCounts.adjustValue(oldTopic, -1);
      }
      tokensPerTopic[oldTopic]--;

      // Build a distribution over topics for this token
      Arrays.fill (topicWeights, 0.0);
      topicWeightsSum = 0;

      for (int ti = 0; ti < numTopics; ti++) {
        tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
              * ((localTopicCounts[ti] + alpha[ti])); // (/docLen-1+tAlpha); is constant across all topics
        topicWeightsSum += tw;
        topicWeights[ti] = tw;
      }
      // Sample a topic assignment from this distribution
      newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);

      // Put that new topic into the counts
      oneDocTopics[si] = newTopic;
      currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
      localTopicCounts[newTopic] ++;
      tokensPerTopic[newTopic]++;
    }
  }
View Full Code Here

  private void sampleTopicsForOneTestDoc(FeatureSequence tokenSequence,
      LabelSequence topicSequence) {
    // TODO Auto-generated method stub
    int[] oneDocTopics = topicSequence.getFeatures();

    TIntIntHashMap currentTypeTopicCounts;
    int type, oldTopic, newTopic;
    double tw;
    double[] topicWeights = new double[numTopics];
    double topicWeightsSum;
    int docLength = tokenSequence.getLength();

    //    populate topic counts
    int[] localTopicCounts = new int[numTopics];
    for (int ti = 0; ti < numTopics; ti++){
      localTopicCounts[ti] = 0;
    }
    for (int position = 0; position < docLength; position++) {
      if(oneDocTopics[position] != -1) {
        localTopicCounts[oneDocTopics[position]] ++;
      }
    }

    // Iterate over the positions (words) in the document
    for (int si = 0; si < docLength; si++) {
      type = tokenSequence.getIndexAtPosition(si);
      oldTopic = oneDocTopics[si];
      if(oldTopic == -1) {
        continue;
      }

      // Remove this token from all counts
         localTopicCounts[oldTopic] --;
         currentTypeTopicCounts = typeTopicCounts[type];
      assert(currentTypeTopicCounts.get(oldTopic) >= 0);

      if (currentTypeTopicCounts.get(oldTopic) == 1) {
        currentTypeTopicCounts.remove(oldTopic);
      }
      else {
        currentTypeTopicCounts.adjustValue(oldTopic, -1);
      }
      tokensPerTopic[oldTopic]--;

      // Build a distribution over topics for this token
      Arrays.fill (topicWeights, 0.0);
      topicWeightsSum = 0;

      for (int ti = 0; ti < numTopics; ti++) {
        tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
              * ((localTopicCounts[ti] + alpha[ti])); // (/docLen-1+tAlpha); is constant across all topics
        topicWeightsSum += tw;
        topicWeights[ti] = tw;
      }
      // Sample a topic assignment from this distribution
      newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);

      // Put that new topic into the counts
      oneDocTopics[si] = newTopic;
      currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
      localTopicCounts[newTopic] ++;
      tokensPerTopic[newTopic]++;
    }
  }
View Full Code Here

  private void sampleTopicsForOneDocWithTheta(FeatureSequence tokenSequence,
      LabelSequence topicSequence, double[] topicDistribution) {
    // TODO Auto-generated method stub
    int[] oneDocTopics = topicSequence.getFeatures();

    TIntIntHashMap currentTypeTopicCounts;
    int type, oldTopic, newTopic;
    double tw;
    double[] topicWeights = new double[numTopics];
    double topicWeightsSum;
    int docLength = tokenSequence.getLength();
   
    // Iterate over the positions (words) in the document
    for (int si = 0; si < docLength; si++) {
      type = tokenSequence.getIndexAtPosition(si);
      oldTopic = oneDocTopics[si];
      if(oldTopic == -1) {
        continue;
      }

       currentTypeTopicCounts = typeTopicCounts[type];
      assert(currentTypeTopicCounts.get(oldTopic) >= 0);

      if (currentTypeTopicCounts.get(oldTopic) == 1) {
        currentTypeTopicCounts.remove(oldTopic);
      }
      else {
        currentTypeTopicCounts.adjustValue(oldTopic, -1);
      }
      tokensPerTopic[oldTopic]--;

      // Build a distribution over topics for this token
      Arrays.fill (topicWeights, 0.0);
      topicWeightsSum = 0;

      for (int ti = 0; ti < numTopics; ti++) {
        tw = ((currentTypeTopicCounts.get(ti) + beta) / (tokensPerTopic[ti] + betaSum))
              * topicDistribution[ti]; // (/docLen-1+tAlpha); is constant across all topics
        topicWeightsSum += tw;
        topicWeights[ti] = tw;
      }
      // Sample a topic assignment from this distribution
      newTopic = random.nextDiscrete (topicWeights, topicWeightsSum);

      // Put that new topic into the counts
      oneDocTopics[si] = newTopic;
      currentTypeTopicCounts.adjustOrPutValue(newTopic, 1, 1);
      tokensPerTopic[newTopic]++;
    }
  }
View Full Code Here

  // provided in preprocess call
  protected TIntArrayList cache;

  public OneLabelL2PRConstraints(boolean normalized) {
    this.constraints = new TIntObjectHashMap<OneLabelPRConstraint>();
    this.constraintIndices = new TIntIntHashMap();
    this.cache = new TIntArrayList();
    this.normalized = normalized;
  }
View Full Code Here

    if (instances == null) { throw new IllegalStateException("You must load instances before you can count features"); }

    double[][] result = new double[ numTypes ][ 2 ];

    TIntIntHashMap docCounts = new TIntIntHashMap();
   
    for (Instance instance: instances) {
      FeatureSequence features = (FeatureSequence) instance.getData();
           
      for (int i=0; i<features.getLength(); i++) {
        docCounts.adjustOrPutValue(features.getIndexAtPosition(i), 1, 1);
      }
           
      int[] keys = docCounts.keys();
      for (int i = 0; i < keys.length - 1; i++) {
        int feature = keys[i];
        result[feature][0] += docCounts.get(feature);
        result[feature][1]++;
      }
           
      docCounts = new TIntIntHashMap();
           
    }
   
    return result;
  }
View Full Code Here

    if (this.alphabet == null) {
      this.alphabet = alphabet;
      this.numTypes = alphabet.size();
      this.typeTopicCounts = new TIntIntHashMap[numTypes];
      for (int fi = 0; fi < numTypes; fi++)
        typeTopicCounts[fi] = new TIntIntHashMap();
      this.betaSum = beta * numTypes;
    } else if (alphabet != this.alphabet) {
      throw new IllegalArgumentException ("Cannot change Alphabet.");
    } else if (alphabet.size() != this.numTypes) {
      this.numTypes = alphabet.size();
      TIntIntHashMap[] newTypeTopicCounts = new TIntIntHashMap[numTypes];
      for (int i = 0; i < typeTopicCounts.length; i++)
        newTypeTopicCounts[i] = typeTopicCounts[i];
      for (int i = typeTopicCounts.length; i < numTypes; i++)
        newTypeTopicCounts[i] = new TIntIntHashMap();
      // TODO AKM July 18:  Why wasn't the next line there previously?
      // this.typeTopicCounts = newTypeTopicCounts;
      this.betaSum = beta * numTypes;
    // else, nothing changed, nothing to be done
  }
View Full Code Here

  private void initializeTypeTopicCounts () {
    TIntIntHashMap[] newTypeTopicCounts = new TIntIntHashMap[numTypes];
    for (int i = 0; i < typeTopicCounts.length; i++)
      newTypeTopicCounts[i] = typeTopicCounts[i];
    for (int i = typeTopicCounts.length; i < numTypes; i++)
      newTypeTopicCounts[i] = new TIntIntHashMap();
    this.typeTopicCounts = newTypeTopicCounts;
  }
View Full Code Here

TOP

Related Classes of gnu.trove.TIntIntHashMap

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.