Package de.lmu.ifi.dbs.elki.logging.progress

Examples of de.lmu.ifi.dbs.elki.logging.progress.FiniteProgress


  @Override
  protected void preprocess() {
    DistanceQuery<O, D> distanceQuery = relation.getDatabase().getDistanceQuery(relation, distanceFunction);
    storage = DataStoreUtil.makeStorage(relation.getDBIDs(), DataStoreFactory.HINT_STATIC, KNNResult.class);
    FiniteProgress progress = getLogger().isVerbose() ? new FiniteProgress("Materializing random-sample k nearest neighbors (k=" + k + ")", relation.size(), getLogger()) : null;

    final ArrayDBIDs ids = DBIDUtil.ensureArray(relation.getDBIDs());
    final int samplesize = (int) (ids.size() * share);
    final long iseed = (seed != null) ? seed : (new Random()).nextLong();

    int i = 0;
    for(DBID id : ids) {
      KNNHeap<D> kNN = new KNNHeap<D>(k, distanceQuery.infiniteDistance());

      long rseed = i * 0x7FFFFFFFFFFFFFE7L + iseed;
      DBIDs rsamp = DBIDUtil.randomSample(ids, samplesize, rseed);
      for(DBID oid : rsamp) {
        D dist = distanceQuery.distance(id, oid);
        kNN.add(new GenericDistanceResultPair<D>(dist, oid));
      }

      storage.put(id, kNN.toKNNList());
      if(progress != null) {
        progress.incrementProcessed(getLogger());
      }
    }

    if(progress != null) {
      progress.ensureCompleted(getLogger());
    }
  }
View Full Code Here


  @Override
  protected void preprocess() {
    createStorage();

    ArrayDBIDs ids = DBIDUtil.ensureArray(relation.getDBIDs());
    FiniteProgress progress = getLogger().isVerbose() ? new FiniteProgress("Materializing k nearest neighbors (k=" + k + ")", ids.size(), getLogger()) : null;

    // Try bulk
    List<KNNResult<D>> kNNList = null;
    if(usebulk) {
      kNNList = knnQuery.getKNNForBulkDBIDs(ids, k);
      if(kNNList != null) {
        for(int i = 0; i < ids.size(); i++) {
          DBID id = ids.get(i);
          storage.put(id, kNNList.get(i));
          if(progress != null) {
            progress.incrementProcessed(getLogger());
          }
        }
      }
    }
    else {
      for(DBID id : ids) {
        KNNResult<D> knn = knnQuery.getKNNForDBID(id, k);
        storage.put(id, knn);
        if(progress != null) {
          progress.incrementProcessed(getLogger());
        }
      }
    }

    if(progress != null) {
      progress.ensureCompleted(getLogger());
    }
  }
View Full Code Here

    // Build priority queue
    final int sqsize = ps_candidates.size() * (ps_candidates.size() - 1) / 2;
    if(logger.isDebuggingFine()) {
      logger.debugFine("Number of leaves: " + ps_candidates.size() + " so " + sqsize + " MBR computations.");
    }
    FiniteProgress mprogress = logger.isVerbose() ? new FiniteProgress("Comparing leaf MBRs", sqsize, logger) : null;
    for(int i = 0; i < ps_candidates.size(); i++) {
      E pr_entry = ps_candidates.get(i);
      List<KNNHeap<D>> pr_heaps = heaps.get(i);
      D pr_knn_distance = computeStopDistance(pr_heaps);

      for(int j = i + 1; j < ps_candidates.size(); j++) {
        E ps_entry = ps_candidates.get(j);
        List<KNNHeap<D>> ps_heaps = heaps.get(j);
        D ps_knn_distance = computeStopDistance(ps_heaps);
        D minDist = distFunction.minDist(pr_entry, ps_entry);
        // Resolve immediately:
        if(minDist.isNullDistance()) {
          N pr = index.getNode(ps_candidates.get(i));
          N ps = index.getNode(ps_candidates.get(j));
          processDataPagesOptimize(distFunction, doubleOptimize, pr_heaps, ps_heaps, pr, ps);
        }
        else if(minDist.compareTo(pr_knn_distance) <= 0 || minDist.compareTo(ps_knn_distance) <= 0) {
          pq.add(new Task(minDist, i, j));
        }
        if(mprogress != null) {
          mprogress.incrementProcessed(logger);
        }
      }
    }
    if(mprogress != null) {
      mprogress.ensureCompleted(logger);
    }

    // Process the queue
    FiniteProgress qprogress = logger.isVerbose() ? new FiniteProgress("Processing queue", pq.size(), logger) : null;
    IndefiniteProgress fprogress = logger.isVerbose() ? new IndefiniteProgress("Full comparisons", logger) : null;
    while(!pq.isEmpty()) {
      Task task = pq.poll();
      List<KNNHeap<D>> pr_heaps = heaps.get(task.i);
      List<KNNHeap<D>> ps_heaps = heaps.get(task.j);
      D pr_knn_distance = computeStopDistance(pr_heaps);
      D ps_knn_distance = computeStopDistance(ps_heaps);
      boolean dor = task.mindist.compareTo(pr_knn_distance) <= 0;
      boolean dos = task.mindist.compareTo(ps_knn_distance) <= 0;
      if(dor || dos) {
        N pr = index.getNode(ps_candidates.get(task.i));
        N ps = index.getNode(ps_candidates.get(task.j));
        if(dor && dos) {
          processDataPagesOptimize(distFunction, doubleOptimize, pr_heaps, ps_heaps, pr, ps);
        }
        else {
          if(dor) {
            processDataPagesOptimize(distFunction, doubleOptimize, pr_heaps, null, pr, ps);
          }
          else /* dos */{
            processDataPagesOptimize(distFunction, doubleOptimize, ps_heaps, null, ps, pr);
          }
        }
        if(fprogress != null) {
          fprogress.incrementProcessed(logger);
        }
      }
      if(qprogress != null) {
        qprogress.incrementProcessed(logger);
      }
    }
    if(qprogress != null) {
      qprogress.ensureCompleted(logger);
    }
    if(fprogress != null) {
      fprogress.setCompleted(logger);
    }

    WritableDataStore<KNNList<D>> knnLists = DataStoreUtil.makeStorage(ids, DataStoreFactory.HINT_STATIC, KNNList.class);
    // FiniteProgress progress = logger.isVerbose() ? new
    // FiniteProgress(this.getClass().getName(), relation.size(), logger) :
    // null;
    FiniteProgress pageprog = logger.isVerbose() ? new FiniteProgress("Number of processed data pages", ps_candidates.size(), logger) : null;
    // int processed = 0;
    for(int i = 0; i < ps_candidates.size(); i++) {
      N pr = index.getNode(ps_candidates.get(i));
      List<KNNHeap<D>> pr_heaps = heaps.get(i);

      // Finalize lists
      for(int j = 0; j < pr.getNumEntries(); j++) {
        knnLists.put(((LeafEntry) pr.getEntry(j)).getDBID(), pr_heaps.get(j).toKNNList());
      }
      // Forget heaps and pq
      heaps.set(i, null);
      // processed += pr.getNumEntries();

      // if(progress != null) {
      // progress.setProcessed(processed, logger);
      // }
      if(pageprog != null) {
        pageprog.incrementProcessed(logger);
      }
    }
    // if(progress != null) {
    // progress.ensureCompleted(logger);
    // }
    if(pageprog != null) {
      pageprog.ensureCompleted(logger);
    }
    return knnLists;
  }
View Full Code Here

    partitionDistanceQuery = (FilteredLocalPCABasedDistanceFunction.Instance<V, LocalProjectionIndex<V, ?>, D>) partitionDistanceFunction.instantiate(relation);
    LocalProjectionIndex<V, ?> preprocin = partitionDistanceQuery.getIndex();

    // partitioning
    Map<Integer, ModifiableDBIDs> partitionMap = new HashMap<Integer, ModifiableDBIDs>();
    FiniteProgress partitionProgress = logger.isVerbose() ? new FiniteProgress("Partitioning", relation.size(), logger) : null;
    int processed = 1;

    for(DBID id : relation.iterDBIDs()) {
      Integer corrdim = preprocin.getLocalProjection(id).getCorrelationDimension();

      if(!partitionMap.containsKey(corrdim)) {
        partitionMap.put(corrdim, DBIDUtil.newArray());
      }

      partitionMap.get(corrdim).add(id);
      if(partitionProgress != null) {
        partitionProgress.setProcessed(processed++, logger);
      }
    }

    if(partitionProgress != null) {
      partitionProgress.ensureCompleted(logger);
    }
    if(logger.isVerbose()) {
      for(Integer corrDim : partitionMap.keySet()) {
        ModifiableDBIDs list = partitionMap.get(corrDim);
        logger.verbose("Partition [corrDim = " + corrDim + "]: " + list.size() + " objects.");
View Full Code Here

   * @return Clustering result
   * @throws de.lmu.ifi.dbs.elki.utilities.UnableToComplyException
   */
  public Clustering<Model> run(Database database, Relation<NumberVector<?, ?>> relation) throws UnableToComplyException {
    Clustering<Model> ret = new Clustering<Model>("LMCLUS Clustering", "lmclus-clustering");
    FiniteProgress progress = logger.isVerbose() ? new FiniteProgress("Clustered objects", relation.size(), logger) : null;
    IndefiniteProgress cprogress = logger.isVerbose() ? new IndefiniteProgress("Clusters found", logger) : null;
    ModifiableDBIDs unclustered = DBIDUtil.newHashSet(relation.getDBIDs());

    final int maxdim = Math.min(maxLMDim, DatabaseUtil.dimensionality(relation));
    int cnum = 0;
    while(unclustered.size() > minsize) {
      DBIDs current = unclustered;
      int lmDim = 1;
      for(int k = 1; k <= maxdim; k++) {
        // Implementation note: this while loop is from the original publication
        // and the published LMCLUS source code. It doesn't make sense to me -
        // it is lacking a stop criterion other than "cluster is too small" and
        // "cluster is inseparable"! Additionally, there is good criterion for
        // stopping at the appropriate dimensionality either.
        while(true) {
          Separation separation = findSeparation(relation, current, k);
          // logger.verbose("k: " + k + " goodness: " + separation.goodness +
          // " threshold: " + separation.threshold);
          if(separation.goodness <= sensitivityThreshold) {
            break;
          }
          ModifiableDBIDs subset = DBIDUtil.newArray(current.size());
          for(DBID id : current) {
            if(deviation(relation.get(id).getColumnVector().minusEquals(separation.originV), separation.basis) < separation.threshold) {
              subset.add(id);
            }
          }
          // logger.verbose("size:"+subset.size());
          if(subset.size() < minsize) {
            break;
          }
          current = subset;
          lmDim = k;
          // System.out.println("Partition: " + subset.size());
        }
      }
      // No more clusters found
      if(current.size() < minsize || current == unclustered) {
        break;
      }
      // New cluster found
      // TODO: annotate cluster with dimensionality
      final Cluster<Model> cluster = new Cluster<Model>(current);
      cluster.setName("Cluster_" + lmDim + "d_" + cnum);
      cnum++;
      ret.addCluster(cluster);
      // Remove from main working set.
      unclustered.removeDBIDs(current);
      if(progress != null) {
        progress.setProcessed(relation.size() - unclustered.size(), logger);
      }
      if(cprogress != null) {
        cprogress.setProcessed(cnum, logger);
      }
    }
    // Remaining objects are noise
    if(unclustered.size() > 0) {
      ret.addCluster(new Cluster<Model>(unclustered, true));
    }
    if(progress != null) {
      progress.setProcessed(relation.size(), logger);
      progress.ensureCompleted(logger);
    }
    if(cprogress != null) {
      cprogress.setCompleted(logger);
    }
    return ret;
View Full Code Here

   * @param distFunc the distance function
   * @param clusterOrder the cluster order to extract the clusters from
   * @return the extracted clusters
   */
  private Map<BitSet, List<Pair<BitSet, ArrayModifiableDBIDs>>> extractClusters(Relation<V> database, DiSHDistanceFunction.Instance<V> distFunc, ClusterOrderResult<PreferenceVectorBasedCorrelationDistance> clusterOrder) {
    FiniteProgress progress = logger.isVerbose() ? new FiniteProgress("Extract Clusters", database.size(), logger) : null;
    int processed = 0;
    Map<BitSet, List<Pair<BitSet, ArrayModifiableDBIDs>>> clustersMap = new HashMap<BitSet, List<Pair<BitSet, ArrayModifiableDBIDs>>>();
    Map<DBID, ClusterOrderEntry<PreferenceVectorBasedCorrelationDistance>> entryMap = new HashMap<DBID, ClusterOrderEntry<PreferenceVectorBasedCorrelationDistance>>();
    Map<DBID, Pair<BitSet, ArrayModifiableDBIDs>> entryToClusterMap = new HashMap<DBID, Pair<BitSet, ArrayModifiableDBIDs>>();
    for(Iterator<ClusterOrderEntry<PreferenceVectorBasedCorrelationDistance>> it = clusterOrder.iterator(); it.hasNext();) {
      ClusterOrderEntry<PreferenceVectorBasedCorrelationDistance> entry = it.next();
      entryMap.put(entry.getID(), entry);

      V object = database.get(entry.getID());
      BitSet preferenceVector = entry.getReachability().getCommonPreferenceVector();

      // get the list of (parallel) clusters for the preference vector
      List<Pair<BitSet, ArrayModifiableDBIDs>> parallelClusters = clustersMap.get(preferenceVector);
      if(parallelClusters == null) {
        parallelClusters = new ArrayList<Pair<BitSet, ArrayModifiableDBIDs>>();
        clustersMap.put(preferenceVector, parallelClusters);
      }

      // look for the proper cluster
      Pair<BitSet, ArrayModifiableDBIDs> cluster = null;
      for(Pair<BitSet, ArrayModifiableDBIDs> c : parallelClusters) {
        V c_centroid = DatabaseUtil.centroid(database, c.second, c.first);
        PreferenceVectorBasedCorrelationDistance dist = distFunc.correlationDistance(object, c_centroid, preferenceVector, preferenceVector);
        if(dist.getCorrelationValue() == entry.getReachability().getCorrelationValue()) {
          double d = distFunc.weightedDistance(object, c_centroid, dist.getCommonPreferenceVector());
          if(d <= 2 * epsilon) {
            cluster = c;
            break;
          }
        }
      }
      if(cluster == null) {
        cluster = new Pair<BitSet, ArrayModifiableDBIDs>(preferenceVector, DBIDUtil.newArray());
        parallelClusters.add(cluster);
      }
      cluster.second.add(entry.getID());
      entryToClusterMap.put(entry.getID(), cluster);

      if(progress != null) {
        progress.setProcessed(++processed, logger);
      }
    }
    if(progress != null) {
      progress.ensureCompleted(logger);
    }

    if(logger.isDebuggingFiner()) {
      StringBuffer msg = new StringBuffer("Step 0");
      for(List<Pair<BitSet, ArrayModifiableDBIDs>> clusterList : clustersMap.values()) {
View Full Code Here

    if(logger.isVerbose()) {
      logger.verbose("knnJoin...");
    }
    DataStore<KNNList<D>> knns = knnJoin.run(database, relation);

    FiniteProgress progress = logger.isVerbose() ? new FiniteProgress("DeLiClu", relation.size(), logger) : null;
    final int size = relation.size();

    ClusterOrderResult<D> clusterOrder = new ClusterOrderResult<D>("DeLiClu Clustering", "deliclu-clustering");
    heap = new UpdatableHeap<SpatialObjectPair>();

    // add start object to cluster order and (root, root) to priority queue
    DBID startID = getStartObject(relation);
    clusterOrder.add(startID, null, distFunction.getDistanceFactory().infiniteDistance());
    int numHandled = 1;
    index.setHandled(startID, relation.get(startID));
    SpatialDirectoryEntry rootEntry = (SpatialDirectoryEntry) index.getRootEntry();
    SpatialObjectPair spatialObjectPair = new SpatialObjectPair(distFunction.getDistanceFactory().nullDistance(), rootEntry, rootEntry, true);
    heap.add(spatialObjectPair);

    while(numHandled < size) {
      if(heap.isEmpty()) {
        throw new AbortException("DeLiClu heap was empty when it shouldn't have been.");
      }
      SpatialObjectPair dataPair = heap.poll();

      // pair of nodes
      if(dataPair.isExpandable) {
        expandNodes(index, distFunction, dataPair, knns);
      }
      // pair of objects
      else {
        // set handled
        LeafEntry e1 = (LeafEntry) dataPair.entry1;
        LeafEntry e2 = (LeafEntry) dataPair.entry2;
        final DBID e1id = e1.getDBID();
        List<TreeIndexPathComponent<DeLiCluEntry>> path = index.setHandled(e1id, relation.get(e1id));
        if(path == null) {
          throw new RuntimeException("snh: parent(" + e1id + ") = null!!!");
        }
        // add to cluster order
        clusterOrder.add(e1id, e2.getDBID(), dataPair.distance);
        numHandled++;
        // reinsert expanded leafs
        reinsertExpanded(distFunction, index, path, knns);

        if(progress != null) {
          progress.setProcessed(numHandled, logger);
        }
      }
    }
    if(progress != null) {
      progress.ensureCompleted(logger);
    }
    return clusterOrder;
  }
View Full Code Here

      epsilon = getDistanceFunction().getDistanceFactory().infiniteDistance();
    }
    RangeQuery<O, D> rangeQuery = QueryUtil.getRangeQuery(relation, getDistanceFunction(), epsilon);

    int size = relation.size();
    final FiniteProgress progress = logger.isVerbose() ? new FiniteProgress("OPTICS", size, logger) : null;

    processedIDs = DBIDUtil.newHashSet(size);
    ClusterOrderResult<D> clusterOrder = new ClusterOrderResult<D>("OPTICS Clusterorder", "optics-clusterorder");

    if(getDistanceFunction() instanceof PrimitiveDoubleDistanceFunction && DoubleDistance.class.isInstance(epsilon)) {
      // Optimized codepath for double-based distances. Avoids Java
      // boxing/unboxing.
      for(DBID id : relation.iterDBIDs()) {
        if(!processedIDs.contains(id)) {
          // We need to do some ugly casts to be able to run the optimized version, unfortunately.
          @SuppressWarnings("unchecked")
          final ClusterOrderResult<DoubleDistance> doubleClusterOrder = ClusterOrderResult.class.cast(clusterOrder);
          @SuppressWarnings("unchecked")
          final RangeQuery<O, DoubleDistance> doubleRangeQuery = RangeQuery.class.cast(rangeQuery);
          final DoubleDistance depsilon = DoubleDistance.class.cast(epsilon);
          expandClusterOrderDouble(doubleClusterOrder, database, doubleRangeQuery, id, depsilon, progress);
        }
      }
    }
    else {
      for(DBID id : relation.iterDBIDs()) {
        if(!processedIDs.contains(id)) {
          expandClusterOrder(clusterOrder, database, rangeQuery, id, epsilon, progress);
        }
      }
    }
    if(progress != null) {
      progress.ensureCompleted(logger);
    }

    return clusterOrder;
  }
View Full Code Here

    pi = store.getStorage(0, DBID.class);
    lambda = store.getStorage(1, distCls);
    // Temporary storage for m.
    WritableDataStore<D> m = DataStoreUtil.makeStorage(relation.getDBIDs(), DataStoreFactory.HINT_HOT | DataStoreFactory.HINT_TEMP, distCls);

    FiniteProgress progress = logger.isVerbose() ? new FiniteProgress("Clustering", relation.size(), logger) : null;
    // has to be an array for monotonicity reasons!
    ModifiableDBIDs processedIDs = DBIDUtil.newArray(relation.size());

    // apply the algorithm
    for(DBID id : relation.iterDBIDs()) {
      step1(id);
      step2(id, processedIDs, distQuery, m);
      step3(id, processedIDs, m);
      step4(id, processedIDs);

      processedIDs.add(id);

      if(progress != null) {
        progress.incrementProcessed(logger);
      }
    }

    if(progress != null) {
      progress.ensureCompleted(logger);
    }
    // We don't need m anymore.
    m.destroy();
    m = null;
View Full Code Here

   * @param minclusters Minimum number of clusters to extract
   *
   * @return Hierarchical clustering
   */
  private Clustering<DendrogramModel<D>> extractClusters(DBIDs ids, final DataStore<DBID> pi, final DataStore<D> lambda, int minclusters) {
    FiniteProgress progress = logger.isVerbose() ? new FiniteProgress("Extracting clusters", ids.size(), logger) : null;

    // stopdist
    D stopdist = null;
    // sort by lambda
    ArrayModifiableDBIDs order = DBIDUtil.newArray(ids);
    order.sort(new CompareByLambda<D>(lambda));
    int index = ids.size() - minclusters - 1;
    while(index >= 0) {
      if(lambda.get(order.get(index)).equals(lambda.get(order.get(index + 1)))) {
        index--;
      }
      else {
        stopdist = lambda.get(order.get(index));
        break;
      }
    }

    // extract the child clusters
    Map<DBID, ModifiableDBIDs> cluster_ids = new HashMap<DBID, ModifiableDBIDs>();
    Map<DBID, D> cluster_distances = new HashMap<DBID, D>();
    for(DBID id : ids) {
      DBID lastObjectInCluster = lastObjectInCluster(id, stopdist, pi, lambda);
      ModifiableDBIDs cluster = cluster_ids.get(lastObjectInCluster);
      if(cluster == null) {
        cluster = DBIDUtil.newArray();
        cluster_ids.put(lastObjectInCluster, cluster);
      }
      cluster.add(id);

      D lambda_id = lambda.get(id);
      if(stopdist != null && lambda_id.compareTo(stopdist) <= 0 && (cluster_distances.get(lastObjectInCluster) == null || lambda_id.compareTo(cluster_distances.get(lastObjectInCluster)) > 0)) {
        cluster_distances.put(lastObjectInCluster, lambda_id);
      }

      // Decrement counter
      if(progress != null) {
        progress.incrementProcessed(logger);
      }
    }
    if(progress != null) {
      progress.ensureCompleted(logger);
    }

    // build hierarchy
    final Clustering<DendrogramModel<D>> dendrogram = new Clustering<DendrogramModel<D>>("Single-Link-Dendrogram", "slink-dendrogram");
    ModifiableHierarchy<Cluster<DendrogramModel<D>>> hier = new HierarchyHashmapList<Cluster<DendrogramModel<D>>>();
View Full Code Here

TOP

Related Classes of de.lmu.ifi.dbs.elki.logging.progress.FiniteProgress

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.