Package org.apache.hadoop.hbase.io

Examples of org.apache.hadoop.hbase.io.BatchUpdate


        r.get(hri.getRegionName(), HConstants.COL_REGIONINFO, -1, -1)[0].getValue());
      LOG.debug("Old " + Bytes.toString(HConstants.COL_REGIONINFO) +
        " for " + hri.toString() + " in " + r.toString() + " is: " +
        h.toString());
    }
    BatchUpdate b = new BatchUpdate(hri.getRegionName());
    b.put(HConstants.COL_REGIONINFO, Writables.getBytes(hri));
    r.batchUpdate(b, null);
    if (LOG.isDebugEnabled()) {
      HRegionInfo h = Writables.getHRegionInfoOrNull(
          r.get(hri.getRegionName(), HConstants.COL_REGIONINFO, -1, -1)[0].getValue());
        LOG.debug("New " + Bytes.toString(HConstants.COL_REGIONINFO) +
View Full Code Here


    public void mutateRowTs(byte[] tableName, byte[] row,
        List<Mutation> mutations, long timestamp) throws IOError, IllegalArgument {
      HTable table = null;
      try {
        table = getTable(tableName);
        BatchUpdate batchUpdate = new BatchUpdate(row, timestamp);
        for (Mutation m : mutations) {
          if (m.isDelete) {
            batchUpdate.delete(m.column);
          } else {
            batchUpdate.put(m.column, m.value);
          }
        }
        table.commit(batchUpdate);
      } catch (IOException e) {
        throw new IOError(e.getMessage());
View Full Code Here

      List<BatchUpdate> batchUpdates = new ArrayList<BatchUpdate>();
      
      for (BatchMutation batch : rowBatches) {
        byte[] row = batch.row;
        List<Mutation> mutations = batch.mutations;
        BatchUpdate batchUpdate = new BatchUpdate(row, timestamp);
        for (Mutation m : mutations) {
          if (m.isDelete) {
            batchUpdate.delete(m.column);
          } else {
            batchUpdate.put(m.column, m.value);
          }
        }
        batchUpdates.add(batchUpdate);
      }
View Full Code Here

  void enableBlockCache(HRegion mr, HRegionInfo oldHri)
  throws IOException {
    if (!enableBlockCache(oldHri)) {
      return;
    }
    BatchUpdate b = new BatchUpdate(oldHri.getRegionName());
    b.put(HConstants.COL_REGIONINFO, Writables.getBytes(oldHri));
    mr.batchUpdate(b);
    LOG.info("Enabled blockcache on " + oldHri.getRegionNameAsString());
  }
View Full Code Here

  void updateVersions(HRegion mr, HRegionInfo oldHri)
  throws IOException {
    if (!updateVersions(oldHri)) {
      return;
    }
    BatchUpdate b = new BatchUpdate(oldHri.getRegionName());
    b.put(HConstants.COL_REGIONINFO, Writables.getBytes(oldHri));
    mr.batchUpdate(b);
    LOG.info("Upped versions on " + oldHri.getRegionNameAsString());
  }
View Full Code Here

    long now = System.currentTimeMillis();

    for (HStore store : super.stores.values()) {
      List<HStoreKey> keys = store.getKeys(new HStoreKey(row, timestamp),
          ALL_VERSIONS, now, null);
      BatchUpdate deleteUpdate = new BatchUpdate(row, timestamp);

      for (HStoreKey key : keys) {
        deleteUpdate.delete(key.getColumn());
      }
     
      state.addWrite(deleteUpdate);
      logManager.writeUpdateToLog(transactionId, deleteUpdate);
View Full Code Here

      master.getConfiguration());

    // 3. Insert into meta
    HRegionInfo info = region.getRegionInfo();
    byte [] regionName = region.getRegionName();
    BatchUpdate b = new BatchUpdate(regionName);
    b.put(COL_REGIONINFO, Writables.getBytes(info));
    server.batchUpdate(metaRegionName, b, -1L);
   
    // 4. Close the new region to flush it to disk.  Close its log file too.
    region.close();
    region.getLog().closeAndDelete();
View Full Code Here

          // Register the newly-available Region's location.
          LOG.info("updating row " + regionInfo.getRegionNameAsString() +
              " in region " + Bytes.toString(metaRegionName) +
              " with startcode " + Bytes.toLong(startCode) + " and server " +
              serverAddress.toString());
          BatchUpdate b = new BatchUpdate(regionInfo.getRegionName());
          b.put(COL_SERVER, Bytes.toBytes(serverAddress.toString()));
          b.put(COL_STARTCODE, startCode);
          server.batchUpdate(metaRegionName, b, -1L);
          if (!this.historian.isOnline()) {
            // This is safest place to do the onlining of the historian in
            // the master.  When we get to here, we know there is a .META.
            // for the historian to go against.
View Full Code Here

   * @throws IOException
   */
  public static void offlineRegionInMETA(final HRegionInterface srvr,
    final byte [] metaRegionName, final HRegionInfo info)
  throws IOException {
    BatchUpdate b = new BatchUpdate(info.getRegionName());
    info.setOffline(true);
    b.put(COL_REGIONINFO, Writables.getBytes(info));
    b.delete(COL_SERVER);
    b.delete(COL_STARTCODE);
    // If carrying splits, they'll be in place when we show up on new
    // server.
    srvr.batchUpdate(metaRegionName, b, -1L);
  }
View Full Code Here

   * @throws IOException
   */
  public static void cleanRegionInMETA(final HRegionInterface srvr,
    final byte [] metaRegionName, final HRegionInfo info)
  throws IOException {
    BatchUpdate b = new BatchUpdate(info.getRegionName());
    b.delete(COL_SERVER);
    b.delete(COL_STARTCODE);
    // If carrying splits, they'll be in place when we show up on new
    // server.
    srvr.batchUpdate(metaRegionName, b, LATEST_TIMESTAMP);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.BatchUpdate

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.