Package org.apache.hadoop.hbase.client

Examples of org.apache.hadoop.hbase.client.Put


    delete.deleteFamily(fam1);

    region.delete(delete, null, true);

    // now create data.
    Put put = new Put(rowA);
    put.add(fam2, null, value);
    region.put(put);

    put = new Put(rowB);
    put.add(fam1, null, value);
    put.add(fam2, null, value);
    region.put(put);

    Scan scan = new Scan();
    scan.addFamily(fam1).addFamily(fam2);
    InternalScanner s = region.getScanner(scan);
View Full Code Here


  }

  public void doTestDelete_AndPostInsert(Delete delete)
      throws IOException, InterruptedException {
    initHRegion(tableName, getName(), fam1);
    Put put = new Put(row);
    put.add(fam1, qual1, value1);
    region.put(put);

    Thread.sleep(10);
   
    // now delete the value:
    region.delete(delete, null, true);

    Thread.sleep(10);

    // ok put data:
    put = new Put(row);
    put.add(fam1, qual1, value2);
    region.put(put);

    // ok get:
    Get get = new Get(row);
    get.addColumn(fam1, qual1);
View Full Code Here

    //Setting up region
    String method = this.getName();
    initHRegion(tableName, method, fam1);
   
    //Add to memstore
    Put put = new Put(row1);
    put.add(fam1, col1, null);
    put.add(fam1, col2, null);
    put.add(fam1, col3, null);
    put.add(fam1, col4, null);
    put.add(fam1, col5, null);
    region.put(put);

    Get get = new Get(row1);
    get.addColumn(fam1, col2);
    get.addColumn(fam1, col4);
View Full Code Here

    //Setting up region
    String method = this.getName();
    initHRegion(HConstants.ROOT_TABLE_NAME, method, HConstants.CATALOG_FAMILY);

    //Add to memstore
    Put put = new Put(HConstants.EMPTY_START_ROW);
    put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, null);
    region.put(put);
   
    Get get = new Get(HConstants.EMPTY_START_ROW);
    get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
View Full Code Here

    //Setting up region
    String method = this.getName();
    initHRegion(tableName, method, families);
   
    //Putting data in Region
    Put put = new Put(row1);
    put.add(fam1, null, null);
    put.add(fam2, null, null);
    put.add(fam3, null, null);
    put.add(fam4, null, null);
    region.put(put);
   
    Scan scan = null;
    InternalScanner is = null;
   
View Full Code Here

    //Setting up region
    String method = this.getName();
    initHRegion(tableName, method, families);
   
    //Putting data in Region
    Put put = null;
    put = new Put(row1);
    put.add(fam1, null, ts, null);
    put.add(fam2, null, ts, null);
    put.add(fam3, null, ts, null);
    put.add(fam4, null, ts, null);
    region.put(put);

    put = new Put(row2);
    put.add(fam1, null, ts, null);
    put.add(fam2, null, ts, null);
    put.add(fam3, null, ts, null);
    put.add(fam4, null, ts, null);
    region.put(put);
   
    Scan scan = new Scan();
    scan.addFamily(fam2);
    scan.addFamily(fam4);
View Full Code Here

    //Put
    cl = Put.class;
    expected = ClassSize.estimateBase(cl, false);
    //The actual TreeMap is not included in the above calculation
    expected += ClassSize.TREEMAP;
    Put put = new Put(Bytes.toBytes(""));
    actual = put.heapSize();
    if(expected != actual) {
      ClassSize.estimateBase(cl, true);
      assertEquals(expected, actual);
    }
   
View Full Code Here

      r = createNewHRegion(TESTTABLEDESC, null, null);
      region = new HRegionIncommon(r);
     
      // Write information to the meta table

      Put put = new Put(ROW_KEY);
      put.setTimeStamp(System.currentTimeMillis());

      ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
      DataOutputStream s = new DataOutputStream(byteStream);
      REGION_INFO.write(s);
      put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER,
          byteStream.toByteArray());
      region.put(put);

      // What we just committed is in the memstore. Verify that we can get
      // it back both with scanning and get
     
      scan(false, null);
      getRegionInfo();
     
      // Close and re-open
     
      r.close();
      r = openClosedRegion(r);
      region = new HRegionIncommon(r);

      // Verify we can get the data back now that it is on disk.
     
      scan(false, null);
      getRegionInfo();
     
      // Store some new information
      HServerAddress address = new HServerAddress("foo.bar.com:1234");

      put = new Put(ROW_KEY);
      put.setTimeStamp(System.currentTimeMillis());
      put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
          Bytes.toBytes(address.toString()));

//      put.add(HConstants.COL_STARTCODE, Bytes.toBytes(START_CODE));

      region.put(put);
     
      // Validate that we can still get the HRegionInfo, even though it is in
      // an older row on disk and there is a newer row in the memstore
     
      scan(true, address.toString());
      getRegionInfo();
     
      // flush cache

      region.flushcache();

      // Validate again
     
      scan(true, address.toString());
      getRegionInfo();

      // Close and reopen
     
      r.close();
      r = openClosedRegion(r);
      region = new HRegionIncommon(r);

      // Validate again
     
      scan(true, address.toString());
      getRegionInfo();

      // Now update the information again

      address = new HServerAddress("bar.foo.com:4321");
     
      put = new Put(ROW_KEY);
      put.setTimeStamp(System.currentTimeMillis());

      put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
          Bytes.toBytes(address.toString()));
      region.put(put);
     
      // Validate again
     
View Full Code Here

    final byte[] val1 = Bytes.toBytes(1);
    final byte[] val2 = Bytes.toBytes(-1);
    /**
     * prime the region.
     */
    Put put1 = new Put(row1);
    put1.add(HConstants.CATALOG_FAMILY,qual1, val1);
    put1.add(HConstants.CATALOG_FAMILY,qual2, val1);
    r.put(put1);
    Put put2 = new Put(row2);
    put2.add(HConstants.CATALOG_FAMILY, qual1, val2);
    put2.add(HConstants.CATALOG_FAMILY, qual2, val2);
    r.put(put2);
    /**
     * Scan for the second row.
     */
    Scan scan = new Scan();
View Full Code Here

  throws IOException {
    // Puts and Deletes used to be "atomic" here.  We can use row locks if
    // we need to keep that property, or we can expand Puts and Deletes to
    // allow them to be committed at once.
    byte [] row = info.getRegionName();
    Put put = new Put(row);
    info.setOffline(true);
    put.add(CATALOG_FAMILY, REGIONINFO_QUALIFIER, Writables.getBytes(info));
    srvr.put(metaRegionName, put);
    cleanRegionInMETA(srvr, metaRegionName, info);
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.client.Put

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.