Package com.metamx.common.guava

Examples of com.metamx.common.guava.Sequence


        .intervals("2011-01-05/2011-01-10")
        .aggregators(AGGS)
        .granularity(AllGranularity.ALL);

    final AssertingClosable closable = new AssertingClosable();
    final Sequence resultSeq = new ResourceClosingSequence(
        Sequences.simple(expectedRes), closable
    )
    {
      @Override
      public Yielder toYielder(Object initValue, YieldingAccumulator accumulator)
      {
        Assert.assertFalse(closable.isClosed());
        return super.toYielder(
            initValue,
            accumulator
        );
      }
    };

    Cache cache = MapCache.create(1024 * 1024);

    String segmentIdentifier = "segment";
    SegmentDescriptor segmentDescriptor = new SegmentDescriptor(new Interval("2011/2012"), "version", 0);

    TopNQueryQueryToolChest toolchest = new TopNQueryQueryToolChest(new TopNQueryConfig());
    DefaultObjectMapper objectMapper = new DefaultObjectMapper();
    CachingQueryRunner runner = new CachingQueryRunner(
        segmentIdentifier,
        segmentDescriptor,
        objectMapper,
        cache,
        toolchest,
        new QueryRunner()
        {
          @Override
          public Sequence run(Query query, Map context)
          {
            return resultSeq;
          }
        },
        new CacheConfig()

    );

    TopNQuery query = builder.build();
    CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> cacheStrategy = toolchest.getCacheStrategy(query);
    Cache.NamedKey cacheKey = CacheUtil.computeSegmentCacheKey(
        segmentIdentifier,
        segmentDescriptor,
        cacheStrategy.computeCacheKey(query)
    );

    HashMap<String,Object> context = new HashMap<String, Object>();
    Sequence res = runner.run(query, context);
    // base sequence is not closed yet
    Assert.assertFalse("sequence must not be closed", closable.isClosed());
    Assert.assertNull("cache must be empty", cache.get(cacheKey));

    ArrayList results = Sequences.toList(res, new ArrayList());
View Full Code Here


    );
    serverSelector.addServer(queryableDruidServer2);

    TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
    HashMap<String,List> context = new HashMap<String, List>();
    Sequence s1 = client1.run(query, context);
    Assert.assertEquals(1, client1.getNumOpenConnections());

    // simulate read timeout
    Sequence s2 = client1.run(query, context);
    Assert.assertEquals(2, client1.getNumOpenConnections());
    futureException.setException(new ReadTimeoutException());
    Assert.assertEquals(1, client1.getNumOpenConnections());

    // subsequent connections should work
    Sequence s3 = client1.run(query, context);
    Sequence s4 = client1.run(query, context);
    Sequence s5 = client1.run(query, context);

    Assert.assertTrue(client1.getNumOpenConnections() == 4);

    // produce result for first connection
    futureResult.set(new ByteArrayInputStream("[{\"timestamp\":\"2014-01-01T01:02:03Z\", \"result\": 42.0}]".getBytes()));
View Full Code Here

    serverSelector.addServer(queryableDruidServer1);

    TimeBoundaryQuery query = Druids.newTimeBoundaryQueryBuilder().dataSource("test").build();
    HashMap<String,List> context = new HashMap<String, List>();
    cancellationFuture.set(new StatusResponseHolder(HttpResponseStatus.OK, new StringBuilder("cancelled")));
    Sequence results = client1.run(query, context);
    Assert.assertEquals(0, client1.getNumOpenConnections());


    QueryInterruptedException exception = null;
    try {
View Full Code Here

            runner2,
            runner3
        )
    );
    HashMap<String,Object> context = new HashMap<String, Object>();
    final Sequence seq = chainedRunner.run(
        Druids.newTimeseriesQueryBuilder()
              .dataSource("test")
              .intervals("2014/2015")
              .aggregators(Lists.<AggregatorFactory>newArrayList(new CountAggregatorFactory("count")))
              .build(),
View Full Code Here

            runner2,
            runner3
        )
    );
    HashMap<String,Object> context = new HashMap<String, Object>();
    final Sequence seq = chainedRunner.run(
        Druids.newTimeseriesQueryBuilder()
              .dataSource("test")
              .intervals("2014/2015")
              .aggregators(Lists.<AggregatorFactory>newArrayList(new CountAggregatorFactory("count")))
              .context(ImmutableMap.<String, Object>of("timeout", 100, "queryId", "test"))
View Full Code Here

      if (log.isDebugEnabled()) {
        log.debug("Got query [%s]", query);
      }

      final Map<String, Object> responseContext = new MapMaker().makeMap();
      final Sequence res = query.run(texasRanger, responseContext);
      final Sequence results;
      if (res == null) {
        results = Sequences.empty();
      } else {
        results = res;
      }

      final Yielder yielder = results.toYielder(
          null,
          new YieldingAccumulator()
          {
            @Override
            public Object accumulate(Object accumulated, Object in)
View Full Code Here

        new QueryRunner()
        {
          @Override
          public Sequence run(Query query, Map context)
          {
            return new Sequence()
            {
              @Override
              public Object accumulate(Object initValue, Accumulator accumulator)
              {
                throw new SegmentMissingException("FAILSAUCE");
              }

              @Override
              public Yielder<Object> toYielder(
                  Object initValue, YieldingAccumulator accumulator
              )
              {
                return null;
              }
            };

          }
        },
        new SpecificSegmentSpec(
            descriptor
        )
    );

    final Map<String, Object> responseContext = Maps.newHashMap();
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder()
                                  .dataSource("foo")
                                  .granularity(QueryGranularity.ALL)
                                  .intervals(ImmutableList.of(new Interval("2012-01-01T00:00:00Z/P1D")))
                                  .aggregators(
                                      ImmutableList.<AggregatorFactory>of(
                                          new CountAggregatorFactory("rows")
                                      )
                                  )
                                  .build();
    Sequence results = queryRunner.run(
        query,
        responseContext
    );
    Sequences.toList(results, Lists.newArrayList());
View Full Code Here

                                      ImmutableList.<AggregatorFactory>of(
                                          new CountAggregatorFactory("rows")
                                      )
                                  )
                                  .build();
    Sequence results = queryRunner.run(
        query,
        responseContext
    );
    List<Result<TimeseriesResultValue>> res = Sequences.toList(
        results,
View Full Code Here

TOP

Related Classes of com.metamx.common.guava.Sequence

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.