Package org.apache.hadoop.hbase.io

Examples of org.apache.hadoop.hbase.io.BatchUpdate


       
        if (LOG.isDebugEnabled()) {
          LOG.debug("updating columns in row: " + i.regionName);
        }

        BatchUpdate b = new BatchUpdate(rand.nextLong());
        lockid = b.startUpdate(i.regionName);
        updateRegionInfo(b, i);
        b.delete(lockid, COL_SERVER);
        b.delete(lockid, COL_STARTCODE);
        server.batchUpdate(m.getRegionName(), System.currentTimeMillis(), b);
        if (LOG.isDebugEnabled()) {
          LOG.debug("updated columns in row: " + i.regionName);
        }
View Full Code Here


   * @see {@link #addRegionToMETA(HRegion, HRegion)}
   */
  static void removeRegionFromMETA(final HRegionInterface srvr,
      final Text metaRegionName, final Text regionName)
  throws IOException {
    BatchUpdate b = new BatchUpdate(rand.nextLong());
    long lockid = b.startUpdate(regionName);
    for (int i = 0; i < ALL_META_COLUMNS.length; i++) {
      b.delete(lockid, ALL_META_COLUMNS[i]);
    }
    srvr.batchUpdate(metaRegionName, System.currentTimeMillis(), b);
  }
View Full Code Here

   * @see {@link #addRegionToMETA(HRegion, HRegion)}
   */
  static void offlineRegionInMETA(final HRegionInterface srvr,
      final Text metaRegionName, final HRegionInfo info)
  throws IOException {
    BatchUpdate b = new BatchUpdate(rand.nextLong());
    long lockid = b.startUpdate(info.getRegionName());
    info.setOffline(true);
    b.put(lockid, COL_REGIONINFO, Writables.getBytes(info));
    b.delete(lockid, COL_SERVER);
    b.delete(lockid, COL_STARTCODE);
    // If carrying splits, they'll be in place when we show up on new
    // server.
    srvr.batchUpdate(metaRegionName, System.currentTimeMillis(), b);
  }
View Full Code Here

          oldRegion1,
          oldRegion2
      };
      for(int r = 0; r < regionsToDelete.length; r++) {
        long lockid = Math.abs(rand.nextLong());
        BatchUpdate b = new BatchUpdate(lockid);
        lockid = b.startUpdate(regionsToDelete[r]);
        b.delete(lockid, COL_REGIONINFO);
        b.delete(lockid, COL_SERVER);
        b.delete(lockid, COL_STARTCODE);
        b.delete(lockid, COL_SPLITA);
        b.delete(lockid, COL_SPLITB);
        root.batchUpdate(System.currentTimeMillis(), b);
        lockid = -1L;

        if(LOG.isDebugEnabled()) {
          LOG.debug("updated columns in row: " + regionsToDelete[r]);
        }
      }
      HRegionInfo newInfo = newRegion.getRegionInfo();
      newInfo.setOffline(true);
      long lockid = Math.abs(rand.nextLong());
      BatchUpdate b = new BatchUpdate(lockid);
      lockid = b.startUpdate(newRegion.getRegionName());
      b.put(lockid, COL_REGIONINFO, Writables.getBytes(newInfo));
      root.batchUpdate(System.currentTimeMillis(), b);
      if(LOG.isDebugEnabled()) {
        LOG.debug("updated columns in row: " + newRegion.getRegionName());
      }
    }
View Full Code Here

    // Insert values
   
    HTable table = new HTable(conf, getName());

    for (Map.Entry<byte [], SortedMap<byte [], Cell>> row: values.entrySet()) {
      BatchUpdate b = new BatchUpdate(row.getKey());
      for (Map.Entry<byte [], Cell> val: row.getValue().entrySet()) {
        b.put(val.getKey(), val.getValue().getValue());
      }
      table.commit(b);
    }

    HRegion region = null;
View Full Code Here

        /*
         * Insert data
         */
        for (int j = 0; j < rows[i].length; j++) {
          byte [] row = rows[i][j];
          BatchUpdate b = new BatchUpdate(row);
          b.put(COLUMN_NAME, new ImmutableBytesWritable(row).get());
          regions[i].batchUpdate(b);
        }
        HRegion.addRegionToMETA(meta, regions[i]);
      }
      // Close root and meta regions
View Full Code Here

    if (cell == null) {
      throw new IOException("no information for row " + row);
    }
    // Throws exception if null.
    HRegionInfo info = Writables.getHRegionInfo(cell);
    BatchUpdate b = new BatchUpdate(row);
    info.setOffline(onlineOffline);
    b.put(HConstants.COL_REGIONINFO, Writables.getBytes(info));
    b.delete(HConstants.COL_SERVER);
    b.delete(HConstants.COL_STARTCODE);
    t.commit(b);
  }
View Full Code Here

        r.get(hri.getRegionName(), HConstants.COL_REGIONINFO).getValue());
      LOG.debug("Old " + Bytes.toString(HConstants.COL_REGIONINFO) +
        " for " + hri.toString() + " in " + r.toString() + " is: " +
        h.toString());
    }
    BatchUpdate b = new BatchUpdate(hri.getRegionName());
    b.put(HConstants.COL_REGIONINFO, Writables.getBytes(hri));
    r.batchUpdate(b);
    if (LOG.isDebugEnabled()) {
      HRegionInfo h = Writables.getHRegionInfoOrNull(
          r.get(hri.getRegionName(), HConstants.COL_REGIONINFO).getValue());
        LOG.debug("New " + Bytes.toString(HConstants.COL_REGIONINFO) +
View Full Code Here

   
    HRegion region = createNewHRegion(desc, startKey, endKey);
   
    byte [] keyToWrite = startKey == null ? Bytes.toBytes("row_000") : startKey;
   
    BatchUpdate bu = new BatchUpdate(keyToWrite);
    bu.put(COLUMN_NAME, "test".getBytes());

    region.batchUpdate(bu);

    region.close();
    region.getLog().closeAndDelete();
View Full Code Here

   * Test BatchUpdate serialization
   * @throws Exception
   */
  public void testBatchUpdate() throws Exception {
    // Add row named 'testName'.
    BatchUpdate bu = new BatchUpdate(getName());
    // Add a column named same as row.
    bu.put(getName(), getName().getBytes());
    byte [] b = Writables.getBytes(bu);
    BatchUpdate bubu =
      (BatchUpdate)Writables.getWritable(b, new BatchUpdate());
    // Assert rows are same.
    assertTrue(Bytes.equals(bu.getRow(), bubu.getRow()));
    // Assert has same number of BatchOperations.
    int firstCount = 0;
    for (BatchOperation bo: bubu) {
      firstCount++;
    }
    // Now deserialize again into same instance to ensure we're not
    // accumulating BatchOperations on each deserialization.
    BatchUpdate bububu = (BatchUpdate)Writables.getWritable(b, bubu);
    // Assert rows are same again.
    assertTrue(Bytes.equals(bu.getRow(), bububu.getRow()));
    int secondCount = 0;
    for (BatchOperation bo: bububu) {
      secondCount++;
    }
    assertEquals(firstCount, secondCount);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.BatchUpdate

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.