Package org.apache.hadoop.hbase.client

Examples of org.apache.hadoop.hbase.client.Delete


      boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL,
          new BinaryComparator(val1), put, lockId, true);
      assertEquals(true, res);

      //checkAndDelete with correct value
      Delete delete = new Delete(row1);
      delete.deleteColumn(fam1, qf1);
      res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL,
          new BinaryComparator(val1), put, lockId, true);
      assertEquals(true, res);
    } finally {
      HRegion.closeHRegion(this.region);
View Full Code Here


      put.add(fam2, qf3, val1);
      put.add(fam1, qf3, val1);
      region.put(put);

      //Multi-column delete
      Delete delete = new Delete(row1);
      delete.deleteColumn(fam1, qf1);
      delete.deleteColumn(fam2, qf1);
      delete.deleteColumn(fam1, qf3);
      boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL,
          new BinaryComparator(val2), delete, lockId, true);
      assertEquals(true, res);

      Get get = new Get(row1);
      get.addColumn(fam1, qf1);
      get.addColumn(fam1, qf3);
      get.addColumn(fam2, qf2);
      Result r = region.get(get, null);
      assertEquals(2, r.size());
      assertEquals(val1, r.getValue(fam1, qf1));
      assertEquals(val2, r.getValue(fam2, qf2));

      //Family delete
      delete = new Delete(row1);
      delete.deleteFamily(fam2);
      res = region.checkAndMutate(row1, fam2, qf1, CompareOp.EQUAL,
          new BinaryComparator(emptyVal), delete, lockId, true);
      assertEquals(true, res);

      get = new Get(row1);
      r = region.get(get, null);
      assertEquals(1, r.size());
      assertEquals(val1, r.getValue(fam1, qf1));

      //Row delete
      delete = new Delete(row1);
      res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL,
          new BinaryComparator(val1), delete, lockId, true);
      assertEquals(true, res);
      get = new Get(row1);
      r = region.get(get, null);
View Full Code Here

    this.region = initHRegion(tableName, method, conf, fam1);
    try {
      region.put(put);

      // We do support deleting more than 1 'latest' version
      Delete delete = new Delete(row1);
      delete.deleteColumn(fam1, qual);
      delete.deleteColumn(fam1, qual);
      region.delete(delete, null, false);

      Get get = new Get(row1);
      get.addFamily(fam1);
      Result r = region.get(get, null);
View Full Code Here

      put = new Put(row);
      put.add(fam, serverinfo, Bytes.toBytes("ip_address"));
      region.put(put);

      // ok now delete a split:
      Delete delete = new Delete(row);
      delete.deleteColumns(fam, splitA);
      region.delete(delete, null, true);

      // assert some things:
      Get get = new Get(row).addColumn(fam, serverinfo);
      Result result = region.get(get, null);
      assertEquals(1, result.size());

      get = new Get(row).addColumn(fam, splitA);
      result = region.get(get, null);
      assertEquals(0, result.size());

      get = new Get(row).addColumn(fam, splitB);
      result = region.get(get, null);
      assertEquals(1, result.size());

      // Assert that after a delete, I can put.
      put = new Put(row);
      put.add(fam, splitA, Bytes.toBytes("reference_A"));
      region.put(put);
      get = new Get(row);
      result = region.get(get, null);
      assertEquals(3, result.size());

      // Now delete all... then test I can add stuff back
      delete = new Delete(row);
      region.delete(delete, null, false);
      assertEquals(0, region.get(get, null).size());

      region.put(new Put(row).add(fam, splitA, Bytes.toBytes("reference_A")));
      result = region.get(get, null);
View Full Code Here

      Put put = new Put(row);
      put.add(fam, serverinfo, HConstants.LATEST_TIMESTAMP-5,Bytes.toBytes("value"));
      region.put(put);

      // now delete something in the present
      Delete delete = new Delete(row);
      region.delete(delete, null, true);

      // make sure we still see our data
      Get get = new Get(row).addColumn(fam, serverinfo);
      Result result = region.get(get, null);
      assertEquals(1, result.size());

      // delete the future row
      delete = new Delete(row,HConstants.LATEST_TIMESTAMP-3,null);
      region.delete(delete, null, true);

      // make sure it is gone
      get = new Get(row).addColumn(fam, serverinfo);
      result = region.get(get, null);
View Full Code Here

      byte [] rowA = Bytes.toBytes("rowA");
      byte [] rowB = Bytes.toBytes("rowB");

      byte [] value = Bytes.toBytes("value");

      Delete delete = new Delete(rowA);
      delete.deleteFamily(fam1);

      region.delete(delete, null, true);

      // now create data.
      Put put = new Put(rowA);
View Full Code Here

   * @param writeToWAL
   * @throws IOException
   */
  void delete(Map<byte[], List<KeyValue>> familyMap, UUID clusterId,
      boolean writeToWAL) throws IOException {
    Delete delete = new Delete();
    delete.setFamilyMap(familyMap);
    delete.setClusterId(clusterId);
    delete.setWriteToWAL(writeToWAL);
    internalDelete(delete, clusterId, writeToWAL);
  }
View Full Code Here

    }
  }

  public void testDeleteColumns_PostInsert() throws IOException,
      InterruptedException {
    Delete delete = new Delete(row);
    delete.deleteColumns(fam1, qual1);
    doTestDelete_AndPostInsert(delete);
  }
View Full Code Here

    delete.deleteColumns(fam1, qual1);
    doTestDelete_AndPostInsert(delete);
  }

  public void testDeleteFamily_PostInsert() throws IOException, InterruptedException {
    Delete delete = new Delete(row);
    delete.deleteFamily(fam1);
    doTestDelete_AndPostInsert(delete);
  }
View Full Code Here

          // originating cluster. A slave cluster receives the result as a Put
          // or Delete
          if (isPut) {
            internalPut(((Put) w), HConstants.DEFAULT_CLUSTER_ID, writeToWAL);
          } else {
            Delete d = (Delete)w;
            prepareDelete(d);
            internalDelete(d, HConstants.DEFAULT_CLUSTER_ID, writeToWAL);
          }
          return true;
        }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.client.Delete

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.