Package io.druid.timeline.partition

Examples of io.druid.timeline.partition.NoneShardSpec


          shardSpecs = Lists.newArrayList();
          for (int i = 0; i < numShards; i++) {
            shardSpecs.add(new HashBasedNumberedShardSpec(i, numShards, jsonMapper));
          }
        } else {
          shardSpecs = ImmutableList.<ShardSpec>of(new NoneShardSpec());
        }
      }
      for (final ShardSpec shardSpec : shardSpecs) {
        final DataSegment segment = generateSegment(
            toolbox,
View Full Code Here


    };

    if (dimensionValueMultisets.isEmpty()) {
      // No suitable partition dimension. We'll make one big segment and hope for the best.
      log.info("No suitable partition dimension found");
      shardSpecs.add(new NoneShardSpec());
    } else {
      // Find best partition dimension (heuristic: highest cardinality).
      final Map.Entry<String, TreeMultiset<String>> partitionEntry =
          byCardinalityOrdering.max(dimensionValueMultisets.entrySet());

      final String partitionDim = partitionEntry.getKey();
      final TreeMultiset<String> partitionDimValues = partitionEntry.getValue();

      log.info(
          "Partitioning on dimension[%s] with cardinality[%d] over rows[%d]",
          partitionDim,
          partitionDimValues.elementSet().size(),
          partitionDimValues.size()
      );

      // Iterate over unique partition dimension values in sorted order
      String currentPartitionStart = null;
      int currentPartitionSize = 0;
      for (final String partitionDimValue : partitionDimValues.elementSet()) {
        currentPartitionSize += partitionDimValues.count(partitionDimValue);
        if (currentPartitionSize >= targetPartitionSize) {
          final ShardSpec shardSpec = new SingleDimensionShardSpec(
              partitionDim,
              currentPartitionStart,
              partitionDimValue,
              shardSpecs.size()
          );

          log.info("Adding shard: %s", shardSpec);
          shardSpecs.add(shardSpec);

          currentPartitionSize = partitionDimValues.count(partitionDimValue);
          currentPartitionStart = partitionDimValue;
        }
      }

      if (currentPartitionSize > 0) {
        // One last shard to go
        final ShardSpec shardSpec;

        if (shardSpecs.isEmpty()) {
          shardSpec = new NoneShardSpec();
        } else {
          shardSpec = new SingleDimensionShardSpec(
              partitionDim,
              currentPartitionStart,
              null,
View Full Code Here

    return DataSegment.builder()
                      .dataSource(dataSource)
                      .interval(mergedInterval)
                      .version(version)
                      .binaryVersion(IndexIO.CURRENT_VERSION_ID)
                      .shardSpec(new NoneShardSpec())
                      .dimensions(Lists.newArrayList(mergedDimensions))
                      .metrics(Lists.newArrayList(mergedMetrics))
                      .build();
  }
View Full Code Here

              new Interval(start, start.plusHours(1)),
              new DateTime().toString(),
              Maps.<String, Object>newHashMap(),
              Lists.<String>newArrayList(),
              Lists.<String>newArrayList(),
              new NoneShardSpec(),
              IndexIO.CURRENT_VERSION_ID,
              1
          )
      );
      start = start.plusHours(1);
View Full Code Here

        new Interval("2012-02-01/2012-02-02"),
        new DateTime().toString(),
        Maps.<String, Object>newHashMap(),
        Lists.<String>newArrayList(),
        Lists.<String>newArrayList(),
        new NoneShardSpec(),
        1,
        0
    );

    afterParams = ruleRunner.run(
View Full Code Here

        new Interval("2012-02-01/2012-02-02"),
        new DateTime().toString(),
        Maps.<String, Object>newHashMap(),
        Lists.<String>newArrayList(),
        Lists.<String>newArrayList(),
        new NoneShardSpec(),
        1,
        0
    );
    List<DataSegment> longerAvailableSegments = Lists.newArrayList(availableSegments);
    longerAvailableSegments.add(overFlowSegment);
View Full Code Here

          log.info("Creating [%,d] shards", numberOfShards);

          List<HadoopyShardSpec> actualSpecs = Lists.newArrayListWithExpectedSize(numberOfShards);
          if (numberOfShards == 1) {
            actualSpecs.add(new HadoopyShardSpec(new NoneShardSpec(), shardCount++));
          } else {
            for (int i = 0; i < numberOfShards; ++i) {
              actualSpecs.add(
                  new HadoopyShardSpec(
                      new HashBasedNumberedShardSpec(
View Full Code Here

        new Interval(start1, start1.plusHours(1)),
        version.toString(),
        Maps.<String, Object>newHashMap(),
        Lists.<String>newArrayList(),
        Lists.<String>newArrayList(),
        new NoneShardSpec(),
        0,
        11L
    );
    segment2 = new DataSegment(
        "datasource1",
        new Interval(start2, start2.plusHours(1)),
        version.toString(),
        Maps.<String, Object>newHashMap(),
        Lists.<String>newArrayList(),
        Lists.<String>newArrayList(),
        new NoneShardSpec(),
        0,
        7L
    );
    segment3 = new DataSegment(
        "datasource2",
        new Interval(start1, start1.plusHours(1)),
        version.toString(),
        Maps.<String, Object>newHashMap(),
        Lists.<String>newArrayList(),
        Lists.<String>newArrayList(),
        new NoneShardSpec(),
        0,
        4L
    );
    segment4 = new DataSegment(
        "datasource2",
        new Interval(start2, start2.plusHours(1)),
        version.toString(),
        Maps.<String, Object>newHashMap(),
        Lists.<String>newArrayList(),
        Lists.<String>newArrayList(),
        new NoneShardSpec(),
        0,
        8L
    );

    segments = Lists.newArrayList(segment1, segment2, segment3, segment4);
View Full Code Here

        new Interval(start1, start1.plusHours(1)),
        version.toString(),
        Maps.<String, Object>newHashMap(),
        Lists.<String>newArrayList(),
        Lists.<String>newArrayList(),
        new NoneShardSpec(),
        0,
        11L
    );
    segment2 = new DataSegment(
        "datasource1",
        new Interval(start2, start2.plusHours(1)),
        version.toString(),
        Maps.<String, Object>newHashMap(),
        Lists.<String>newArrayList(),
        Lists.<String>newArrayList(),
        new NoneShardSpec(),
        0,
        7L
    );
    segment3 = new DataSegment(
        "datasource2",
        new Interval(start1, start1.plusHours(1)),
        version.toString(),
        Maps.<String, Object>newHashMap(),
        Lists.<String>newArrayList(),
        Lists.<String>newArrayList(),
        new NoneShardSpec(),
        0,
        4L
    );
    segment4 = new DataSegment(
        "datasource2",
        new Interval(start2, start2.plusHours(1)),
        version.toString(),
        Maps.<String, Object>newHashMap(),
        Lists.<String>newArrayList(),
        Lists.<String>newArrayList(),
        new NoneShardSpec(),
        0,
        8L
    );

    segments = new HashMap<String, DataSegment>();
View Full Code Here

      VersionedIntervalTimeline<Integer, File> timeline = new VersionedIntervalTimeline<Integer, File>(
          Ordering.natural().nullsFirst()
      );

      ShardSpec noneShardSpec = new NoneShardSpec();

      for (int i = 0; i < intervals.size(); i++) {
        timeline.add(intervals.get(i), i, noneShardSpec.createChunk(filesToMap.get(i)));
      }

      List<Pair<File, Interval>> intervalsToMerge = Lists.transform(
          timeline.lookup(new Interval("1000-01-01/3000-01-01")),
          new Function<TimelineObjectHolder<Integer, File>, Pair<File, Interval>>()
View Full Code Here

TOP

Related Classes of io.druid.timeline.partition.NoneShardSpec

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.