Package org.apache.hadoop.hbase.io

Examples of org.apache.hadoop.hbase.io.BatchUpdate


    long startTime = System.currentTimeMillis();

    for(int k = 0; k < vals1.length / 2; k++) {
      String kLabel = String.format("%1$03d", k);

      BatchUpdate batchUpdate =
        new BatchUpdate(Bytes.toBytes("row_vals1_" + kLabel),
          System.currentTimeMillis());
      batchUpdate.put(cols[0], vals1[k].getBytes(HConstants.UTF8_ENCODING));
      batchUpdate.put(cols[1], vals1[k].getBytes(HConstants.UTF8_ENCODING));
      region.commit(batchUpdate);
      numInserted += 2;
    }

    LOG.info("Write " + (vals1.length / 2) + " elapsed time: "
        + ((System.currentTimeMillis() - startTime) / 1000.0));

    // 2.  Scan from cache
   
    startTime = System.currentTimeMillis();

    InternalScanner s =
      r.getScanner(cols, HConstants.EMPTY_START_ROW, System.currentTimeMillis(), null);
    int numFetched = 0;
    try {
      HStoreKey curKey = new HStoreKey();
      TreeMap<byte [], Cell> curVals =
        new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
      int k = 0;
      while(s.next(curKey, curVals)) {
        for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
          byte [] col = it.next();
          byte [] val = curVals.get(col).getValue();
          int curval =
            Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
          for(int j = 0; j < cols.length; j++) {
            if (Bytes.compareTo(col, cols[j]) == 0) {
              assertEquals("Error at:" + curKey.getRow() + "/"
                  + curKey.getTimestamp()
                  + ", Value for " + col + " should be: " + k
                  + ", but was fetched as: " + curval, k, curval);
              numFetched++;
            }
          }
        }
        curVals.clear();
        k++;
      }
    } finally {
      s.close();
    }
    assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);

    LOG.info("Scanned " + (vals1.length / 2)
        + " rows from cache. Elapsed time: "
        + ((System.currentTimeMillis() - startTime) / 1000.0));

    // 3.  Flush to disk
   
    startTime = System.currentTimeMillis();
   
    region.flushcache();

    LOG.info("Cache flush elapsed time: "
        + ((System.currentTimeMillis() - startTime) / 1000.0));

    // 4.  Scan from disk
   
    startTime = System.currentTimeMillis();
   
    s = r.getScanner(cols, HConstants.EMPTY_START_ROW,
      System.currentTimeMillis(), null);
    numFetched = 0;
    try {
      HStoreKey curKey = new HStoreKey();
      TreeMap<byte [], Cell> curVals =
        new TreeMap<byte [], Cell>(Bytes.BYTES_COMPARATOR);
      int k = 0;
      while(s.next(curKey, curVals)) {
        for(Iterator<byte []> it = curVals.keySet().iterator(); it.hasNext(); ) {
          byte [] col = it.next();
          byte [] val = curVals.get(col).getValue();
          int curval =
            Integer.parseInt(new String(val, HConstants.UTF8_ENCODING).trim());
          for(int j = 0; j < cols.length; j++) {
            if (Bytes.compareTo(col, cols[j]) == 0) {
              assertEquals("Error at:" + curKey.getRow() + "/"
                  + curKey.getTimestamp()
                  + ", Value for " + col + " should be: " + k
                  + ", but was fetched as: " + curval, k, curval);
              numFetched++;
            }
          }
        }
        curVals.clear();
        k++;
      }
    } finally {
      s.close();
    }
    assertEquals("Inserted " + numInserted + " values, but fetched " + numFetched, numInserted, numFetched);

    LOG.info("Scanned " + (vals1.length / 2)
        + " rows from disk. Elapsed time: "
        + ((System.currentTimeMillis() - startTime) / 1000.0));

    // 5.  Insert more values
   
    startTime = System.currentTimeMillis();

    for(int k = vals1.length/2; k < vals1.length; k++) {
      String kLabel = String.format("%1$03d", k);
     
      BatchUpdate batchUpdate =
        new BatchUpdate(Bytes.toBytes("row_vals1_" + kLabel),
          System.currentTimeMillis());
      batchUpdate.put(cols[0], vals1[k].getBytes(HConstants.UTF8_ENCODING));
      batchUpdate.put(cols[1], vals1[k].getBytes(HConstants.UTF8_ENCODING));
      region.commit(batchUpdate);
      numInserted += 2;
    }

    LOG.info("Write " + (vals1.length / 2) + " rows. Elapsed time: "
View Full Code Here


  /**
   * @throws IOException
   */
  public void testBatchUpdate() throws IOException {
    BatchUpdate bu = new BatchUpdate("row1");
    bu.put(CONTENTS, value);
    bu.delete(CONTENTS);
    table.commit(bu);

    bu = new BatchUpdate("row2");
    bu.put(CONTENTS, value);
    byte[][] getColumns = bu.getColumns();
    assertEquals(getColumns.length, 1);
    assertTrue(Arrays.equals(getColumns[0], CONTENTS));
    assertTrue(bu.hasColumn(CONTENTS));
    assertFalse(bu.hasColumn(new byte[] {}));
    byte[] getValue = bu.get(getColumns[0]);
    assertTrue(Arrays.equals(getValue, value));
    table.commit(bu);

    byte [][] columns = { CONTENTS };
    Scanner scanner = table.getScanner(columns, HConstants.EMPTY_START_ROW);
View Full Code Here

    }
  }
 
  public void testBatchUpdateMaxLength() {
    // Test for a single good value
    BatchUpdate batchUpdate = new BatchUpdate("row1");
    batchUpdate.put(SMALLFAM, value);
    try {
      table.commit(batchUpdate);
      fail("Value is too long, should throw exception");
    } catch (IOException e) {
      // This is expected
    }
    // Try to see if it's still inserted
    try {
      Cell cell = table.get("row1", SMALLFAM_STR);
      assertNull(cell);
    } catch (IOException e) {
      e.printStackTrace();
      fail("This is unexpected");
    }
    // Try to put a good value
    batchUpdate = new BatchUpdate("row1");
    batchUpdate.put(SMALLFAM, smallValue);
    try {
      table.commit(batchUpdate);
    } catch (IOException e) {
      fail("Value is long enough, should not throw exception");
    }
View Full Code Here

  }
 
  public void testRowsBatchUpdate() {
    ArrayList<BatchUpdate> rowsUpdate = new ArrayList<BatchUpdate>();
    for(int i = 0; i < NB_BATCH_ROWS; i++) {
      BatchUpdate batchUpdate = new BatchUpdate("row"+i);
      batchUpdate.put(CONTENTS, value);
      rowsUpdate.add(batchUpdate);
    }
    try {
      table.commit(rowsUpdate)
   
View Full Code Here

 
  public void testRowsBatchUpdateBufferedOneFlush() {
    table.setAutoFlush(false);
    ArrayList<BatchUpdate> rowsUpdate = new ArrayList<BatchUpdate>();
    for(int i = 0; i < NB_BATCH_ROWS*10; i++) {
      BatchUpdate batchUpdate = new BatchUpdate("row"+i);
      batchUpdate.put(CONTENTS, value);
      rowsUpdate.add(batchUpdate);
    }
    try {
      table.commit(rowsUpdate)
   
View Full Code Here

  public void testRowsBatchUpdateBufferedManyManyFlushes() {
    table.setAutoFlush(false);
    table.setWriteBufferSize(10);
    ArrayList<BatchUpdate> rowsUpdate = new ArrayList<BatchUpdate>();
    for(int i = 0; i < NB_BATCH_ROWS*10; i++) {
      BatchUpdate batchUpdate = new BatchUpdate("row"+i);
      batchUpdate.put(CONTENTS, value);
      rowsUpdate.add(batchUpdate);
    }
    try {
      table.commit(rowsUpdate);
     
View Full Code Here

    byte [] colA = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "a");
    byte [] colB = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "b");
    byte [] colC = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "c");
    byte [] colD = Bytes.toBytes(Bytes.toString(COLUMNS[0]));

    BatchUpdate batchUpdate = new BatchUpdate(row, t0);
    batchUpdate.put(colA, cellData(0, flush).getBytes());
    batchUpdate.put(colB, cellData(0, flush).getBytes());
    batchUpdate.put(colC, cellData(0, flush).getBytes());     
    batchUpdate.put(colD, cellData(0, flush).getBytes());     
    region_incommon.commit(batchUpdate);

    batchUpdate = new BatchUpdate(row, t1);
    batchUpdate.put(colA, cellData(1, flush).getBytes());
    batchUpdate.put(colB, cellData(1, flush).getBytes());
    batchUpdate.put(colC, cellData(1, flush).getBytes());     
    batchUpdate.put(colD, cellData(1, flush).getBytes());     
    region_incommon.commit(batchUpdate);
   
    batchUpdate = new BatchUpdate(row, t2);
    batchUpdate.put(colA, cellData(2, flush).getBytes());
    batchUpdate.put(colB, cellData(2, flush).getBytes());
    batchUpdate.put(colC, cellData(2, flush).getBytes());     
    batchUpdate.put(colD, cellData(2, flush).getBytes());     
    region_incommon.commit(batchUpdate);

    if (flush) {region_incommon.flushcache();}

    // call delete all at a timestamp, make sure only the most recent stuff is left behind
View Full Code Here

      m_table.flushCommits();
    }

    public void write(@SuppressWarnings("unused") ImmutableBytesWritable key,
        BatchUpdate value) throws IOException {
      m_table.commit(new BatchUpdate(value));
    }
View Full Code Here

    if (cell == null) {
      throw new IOException("no information for row " + row);
    }
    // Throws exception if null.
    HRegionInfo info = Writables.getHRegionInfo(cell);
    BatchUpdate b = new BatchUpdate(row);
    info.setOffline(onlineOffline);
    b.put(HConstants.COL_REGIONINFO, Writables.getBytes(info));
    b.delete(HConstants.COL_SERVER);
    b.delete(HConstants.COL_STARTCODE);
    t.commit(b);
  }
View Full Code Here

      byte [] colA = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "a");
      byte [] colB = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "b");
      byte [] colC = Bytes.toBytes(Bytes.toString(COLUMNS[0]) + "c");
      byte [] colD = Bytes.toBytes(Bytes.toString(COLUMNS[0]));

      BatchUpdate batchUpdate = new BatchUpdate(row, t0);
      batchUpdate.put(colA, cellData(0, flush).getBytes());
      batchUpdate.put(colB, cellData(0, flush).getBytes());
      batchUpdate.put(colC, cellData(0, flush).getBytes());     
      batchUpdate.put(colD, cellData(0, flush).getBytes());     
      region_incommon.commit(batchUpdate);

      batchUpdate = new BatchUpdate(row, t1);
      batchUpdate.put(colA, cellData(1, flush).getBytes());
      batchUpdate.put(colB, cellData(1, flush).getBytes());
      batchUpdate.put(colC, cellData(1, flush).getBytes());     
      batchUpdate.put(colD, cellData(1, flush).getBytes());     
      region_incommon.commit(batchUpdate);
     
      batchUpdate = new BatchUpdate(row, t2);
      batchUpdate.put(colA, cellData(2, flush).getBytes());
      batchUpdate.put(colB, cellData(2, flush).getBytes());
      batchUpdate.put(colC, cellData(2, flush).getBytes());     
      batchUpdate.put(colD, cellData(2, flush).getBytes());     
      region_incommon.commit(batchUpdate);

      if (flush) {region_incommon.flushcache();}

      // call delete the matching columns at a timestamp,
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.BatchUpdate

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.