Package com.urbanairship.datacube.ops

Examples of com.urbanairship.datacube.ops.LongOp


    private final static List<BucketType> bucketTypes = ImmutableList.of(BucketType.IDENTITY);
   
    @Override
    public SetMultimap<BucketType,CSerializable> bucketForWrite(Long coordinate) {
        return ImmutableSetMultimap.<BucketType,CSerializable>of(
                BucketType.IDENTITY, new LongOp(coordinate));
    }
View Full Code Here


        cube = new DataCube<LongOp>(dimensions, rollups);

        DataCubeIo<LongOp> dataCubeIo = new DataCubeIo<LongOp>(cube, dbHarness, 1, Long.MAX_VALUE,
                SyncLevel.BATCH_ASYNC);
       
        dataCubeIo.writeAsync(new LongOp(1), new WriteBuilder(cube)
            .at(zipcode, "97212"));

        dataCubeIo.writeAsync(new LongOp(1), new WriteBuilder(cube)
            .at(zipcode, "97212"));

        dataCubeIo.flush();

        try {
            dataCubeIo.writeAsync(new LongOp(1), new WriteBuilder(cube)
                .at(zipcode, "97212"));
            Assert.fail("Cube should not have accepted more writes after an error!");
        } catch (AsyncException e) {
            // This exception *should* happen. Because we wrote to a nonexistent table.
            Assert.assertTrue(e.getCause().getCause().getCause() instanceof TableNotFoundException);
View Full Code Here

        DataCubeIo<LongOp> cubeIo = new DataCubeIo<LongOp>(cube, dbHarness, Integer.MAX_VALUE,
                TimeUnit.SECONDS.toMillis(1), SyncLevel.BATCH_SYNC);
       
        // Immediately after the first write, the write should be hanging out in the batch and not yet
        // written to the backing dbHarness.
        cubeIo.writeSync(new LongOp(1), new WriteBuilder(cube).at(colorDimension, Color.RED));
        Assert.assertFalse(cubeIo.get(new ReadBuilder(cube).at(colorDimension, Color.RED)).isPresent());
       
        // If we wait one second for the batch timeout to expire and write again, both writes should
        // be flushed to the backing dbHarness.
        Thread.sleep(1001);
        cubeIo.writeSync(new LongOp(1), new WriteBuilder(cube).at(colorDimension, Color.RED));
        Assert.assertEquals(2,
                cubeIo.get(new ReadBuilder(cube).at(colorDimension, Color.RED)).get().getLong());
       
        // If we do another write, it should not be flushed to the database since it's part of a new
        // batch whose timeout has not yet expired.
        cubeIo.writeSync(new LongOp(1), new WriteBuilder(cube).at(colorDimension, Color.RED));
        Assert.assertEquals(2,
                cubeIo.get(new ReadBuilder(cube).at(colorDimension, Color.RED)).get().getLong());
       
    }
View Full Code Here

        public OldCubeWrapper() throws Exception {
            this.dataCubeIo = makeDataCubeIo(oldCube, LIVE_CUBE_TABLE);
        }

        public void put(Event event) throws IOException, InterruptedException  {
            dataCubeIo.writeSync(new LongOp(1), new WriteBuilder(oldCube)
                    .at(timeDimension, event.time));
        }
View Full Code Here

        public NewCubeWrapper(DataCubeIo<LongOp> dataCubeIo) throws IOException {
            this.dataCubeIo = dataCubeIo;
        }
       
        public void put(Event event) throws IOException, InterruptedException  {
            dataCubeIo.writeSync(new LongOp(1), new WriteBuilder(newCube)
                    .at(timeDimension, event.time)
                    .at(colorDimension, event.color));
        }
View Full Code Here

        DataCube<LongOp> cube = new DataCube<LongOp>(dims, rollups);
        DataCubeIo<LongOp> cubeIo = new DataCubeIo<LongOp>(cube, hbaseDbHarness, 1,
                Long.MAX_VALUE, SyncLevel.FULL_SYNC);
       
        // Before doing any snapshotting/backfilling, there's one value "5" in the cube.
        cubeIo.writeSync(new LongOp(5), new WriteBuilder(cube).at(onlyDimension, "coord1"));
       
        // Snapshot the source table
        Assert.assertTrue(new HBaseSnapshotter(conf, CUBE_DATA_TABLE, CF, SNAPSHOT_DEST_TABLE,
                new Path("hdfs:///test_hfiles"), false, null, null).runWithCheckedExceptions());

        // Simulate a backfill by copying the live cube
        Assert.assertTrue(new HBaseSnapshotter(conf, CUBE_DATA_TABLE, CF, BACKFILLED_TABLE,
                new Path("hdfs:///test_hfiles"), true, null, null).runWithCheckedExceptions());
       
        // Simulate two writes to the live table that wouldn't be seen by the app as it backfills.
        // This is like a client doing a write concurrently with a backfill.
        cubeIo.writeSync(new LongOp(6), new WriteBuilder(cube).at(onlyDimension, "coord1"));
        cubeIo.writeSync(new LongOp(7), new WriteBuilder(cube).at(onlyDimension, "coord2"));
       
        HBaseBackfillMerger backfiller = new HBaseBackfillMerger(conf, ArrayUtils.EMPTY_BYTE_ARRAY,
                CUBE_DATA_TABLE, SNAPSHOT_DEST_TABLE, BACKFILLED_TABLE, CF,
                LongOp.LongOpDeserializer.class);
        Assert.assertTrue(backfiller.runWithCheckedExceptions());
View Full Code Here

                "dh".getBytes(), DATA_CUBE_TABLE, CF, LongOp.DESERIALIZER, idService,
                DbHarness.CommitType.INCREMENT, new TestCallback(s), 1, 1 , 1, "none");
        // Do an increment of 5 for a certain time and zipcode
        DataCubeIo<LongOp> dataCubeIo = new DataCubeIo<LongOp>(dataCube, hbaseDbHarness, 1, 100000, SyncLevel.BATCH_SYNC);

        dataCubeIo.writeSync(new LongOp(5), new WriteBuilder(dataCube)
                .at(time, DateTime.now(DateTimeZone.UTC))
                .at(zipcode, "97201"));

        boolean callbackCalled = s.tryAcquire(1, TimeUnit.SECONDS);
        Assert.assertTrue(callbackCalled);

        dataCubeIo.writeAsync(new LongOp(5), new WriteBuilder(dataCube)
                .at(time, DateTime.now(DateTimeZone.UTC))
                .at(zipcode, "97202"));

        callbackCalled = s.tryAcquire(1, TimeUnit.SECONDS);
        Assert.assertTrue(callbackCalled);
View Full Code Here

                "dh".getBytes(), DATA_CUBE_TABLE, CF, LongOp.DESERIALIZER, idService,
                DbHarness.CommitType.INCREMENT, 1, 1 , 1, "none");

        DataCubeIo<LongOp> dataCubeIo = new DataCubeIo<LongOp>(dataCube, hbaseDbHarness, 1, 100000, SyncLevel.BATCH_SYNC);

        dataCubeIo.writeSync(new LongOp(5), new WriteBuilder(dataCube)
                .at(time, DateTime.now(DateTimeZone.UTC))
                .at(zipcode, "97201"));
    }
View Full Code Here

    WriteBuilder writeBuilder = new WriteBuilder(dataCube)
      .at(timeDimension, tweet.time)
      .at(userDimension, tweet.username)
      .at(retweetedFromDimension, tweet.retweetedFrom.or(""))
        .at(tagsDimension, tweet.hashTags);
    Batch<LongOp> cubeUpdates = dataCube.getWrites(writeBuilder, new LongOp(1));
   
        dataCubeIo.writeAsync(cubeUpdates);
  }
View Full Code Here

    /**
     * Get the total number of tweets.
     */
  public long getCount() throws InterruptedException, IOException {
      return dataCubeIo.get(new ReadBuilder(dataCube)).or(new LongOp(0)).getLong();
  }
View Full Code Here

TOP

Related Classes of com.urbanairship.datacube.ops.LongOp

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.