Package org.apache.hadoop.hbase.client

Examples of org.apache.hadoop.hbase.client.Delete


      put.add(family, qual1, 1L, Bytes.toBytes(1L));
      region.put(put);

      region.flushcache();

      Delete delete = new Delete(Bytes.toBytes(1L), 1L, null);
      //delete.deleteColumn(family, qual1);
      region.delete(delete, null, true);

      put = new Put(Bytes.toBytes(2L));
      put.add(family, qual1, 2L, Bytes.toBytes(2L));
View Full Code Here


            if (coprocessorHost.prePut((Put) m, walEdit, m.getWriteToWAL())) {
              // by pass everything
              return;
            }
          } else if (m instanceof Delete) {
            Delete d = (Delete) m;
            prepareDelete(d);
            if (coprocessorHost.preDelete(d, walEdit, d.getWriteToWAL())) {
              // by pass everything
              return;
            }
          }
        }
      }

      long txid = 0;
      boolean walSyncSuccessful = false;
      boolean locked = false;

      // 2. acquire the row lock(s)
      acquiredLocks = new ArrayList<Integer>(rowsToLock.size());
      for (byte[] row : rowsToLock) {
        // attempt to lock all involved rows, fail if one lock times out
        Integer lid = getLock(null, row, true);
        if (lid == null) {
          throw new IOException("Failed to acquire lock on "
              + Bytes.toStringBinary(row));
        }
        acquiredLocks.add(lid);
      }

      // 3. acquire the region lock
      lock(this.updatesLock.readLock(), acquiredLocks.size());
      locked = true;

      // 4. Get a mvcc write number
      MultiVersionConsistencyControl.WriteEntry w = mvcc.beginMemstoreInsert();

      long now = EnvironmentEdgeManager.currentTimeMillis();
      byte[] byteNow = Bytes.toBytes(now);
      try {
        // 5. Check mutations and apply edits to a single WALEdit
        for (Mutation m : mutations) {
          if (m instanceof Put) {
            Map<byte[], List<KeyValue>> familyMap = m.getFamilyMap();
            checkFamilies(familyMap.keySet());
            checkTimestamps(familyMap, now);
            updateKVTimestamps(familyMap.values(), byteNow);
          } else if (m instanceof Delete) {
            Delete d = (Delete) m;
            prepareDelete(d);
            prepareDeleteTimestamps(d.getFamilyMap(), byteNow);
          } else {
            throw new DoNotRetryIOException(
                "Action must be Put or Delete. But was: "
                    + m.getClass().getName());
          }
View Full Code Here

      Put put = new Put(row);
      put.add(familyName, col, 1, Bytes.toBytes("SomeRandomValue"));
      region.put(put);
      region.flushcache();

      Delete del = new Delete(row);
      region.delete(del, null, true);
      region.flushcache();

      // Get remaining rows (should have none)
      Get get = new Get(row);
View Full Code Here

                            ByteBuffer row,
                            ByteBuffer column,
        long timestamp, Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
      try {
        HTable table = getTable(tableName);
        Delete delete  = new Delete(getBytes(row));
        addAttributes(delete, attributes);
        byte [][] famAndQf = KeyValue.parseColumn(getBytes(column));
        if (famAndQf.length == 1) {
          delete.deleteFamily(famAndQf[0], timestamp);
        } else {
          delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
        }
        table.delete(delete);

      } catch (IOException e) {
        LOG.warn(e.getMessage(), e);
View Full Code Here

    public void deleteAllRowTs(
        ByteBuffer tableName, ByteBuffer row, long timestamp,
        Map<ByteBuffer, ByteBuffer> attributes) throws IOError {
      try {
        HTable table = getTable(tableName);
        Delete delete  = new Delete(getBytes(row), timestamp, null);
        addAttributes(delete, attributes);
        table.delete(delete);
      } catch (IOException e) {
        LOG.warn(e.getMessage(), e);
        throw new IOError(e.getMessage());
View Full Code Here

      try {
        table = getTable(tableName);
        Put put = new Put(getBytes(row), timestamp, null);
        addAttributes(put, attributes);

        Delete delete = new Delete(getBytes(row));
        addAttributes(delete, attributes);
        if (metrics != null) {
          metrics.incNumRowKeysInBatchMutate(mutations.size());
        }

        // I apologize for all this mess :)
        for (Mutation m : mutations) {
          byte[][] famAndQf = KeyValue.parseColumn(getBytes(m.column));
          if (m.isDelete) {
            if (famAndQf.length == 1) {
              delete.deleteFamily(famAndQf[0], timestamp);
            } else {
              delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
            }
            delete.setWriteToWAL(m.writeToWAL);
          } else {
            if(famAndQf.length == 1) {
              put.add(famAndQf[0], HConstants.EMPTY_BYTE_ARRAY,
                  m.value != null ? getBytes(m.value)
                      : HConstants.EMPTY_BYTE_ARRAY);
            } else {
              put.add(famAndQf[0], famAndQf[1],
                  m.value != null ? getBytes(m.value)
                      : HConstants.EMPTY_BYTE_ARRAY);
            }
            put.setWriteToWAL(m.writeToWAL);
          }
        }
        if (!delete.isEmpty())
          table.delete(delete);
        if (!put.isEmpty())
          table.put(put);
      } catch (IOException e) {
        LOG.warn(e.getMessage(), e);
View Full Code Here

      List<Delete> deletes = new ArrayList<Delete>();

      for (BatchMutation batch : rowBatches) {
        byte[] row = getBytes(batch.row);
        List<Mutation> mutations = batch.mutations;
        Delete delete = new Delete(row);
        addAttributes(delete, attributes);
        Put put = new Put(row, timestamp, null);
        addAttributes(put, attributes);
        for (Mutation m : mutations) {
          byte[][] famAndQf = KeyValue.parseColumn(getBytes(m.column));
          if (m.isDelete) {
            // no qualifier, family only.
            if (famAndQf.length == 1) {
              delete.deleteFamily(famAndQf[0], timestamp);
            } else {
              delete.deleteColumns(famAndQf[0], famAndQf[1], timestamp);
            }
            delete.setWriteToWAL(m.writeToWAL);
          } else {
            if(famAndQf.length == 1) {
              put.add(famAndQf[0], HConstants.EMPTY_BYTE_ARRAY,
                  m.value != null ? getBytes(m.value)
                      : HConstants.EMPTY_BYTE_ARRAY);
            } else {
              put.add(famAndQf[0], famAndQf[1],
                  m.value != null ? getBytes(m.value)
                      : HConstants.EMPTY_BYTE_ARRAY);
            }
            put.setWriteToWAL(m.writeToWAL);
          }
        }
        if (!delete.isEmpty())
          deletes.add(delete);
        if (!put.isEmpty())
          puts.add(put);
      }
View Full Code Here

  public HTable truncateTable(byte [] tableName) throws IOException {
    HTable table = new HTable(getConfiguration(), tableName);
    Scan scan = new Scan();
    ResultScanner resScan = table.getScanner(scan);
    for(Result res : resScan) {
      Delete del = new Delete(res.getRow());
      table.delete(del);
    }
    resScan = table.getScanner(scan);
    resScan.close();
    return table;
View Full Code Here

    }
    // see comment above, remove "old" (or previous) single region
    for (byte[] row : rows) {
      LOG.info("createMultiRegions: deleting meta row -> " +
        Bytes.toStringBinary(row));
      meta.delete(new Delete(row));
    }
    if (cleanupFS) {
      // see HBASE-7417 - this confused TestReplication
      // remove the "old" region from FS
      Path tableDir = new Path(getDefaultRootDirPath().toString()
View Full Code Here

      for (int iRow = 0; iRow < numRowsPerFlush; ++iRow) {
        final byte[] row = Bytes.toBytes(String.format(keyFormat,
            actualStartKey + rand.nextInt(actualEndKey - actualStartKey)));

        Put put = new Put(row);
        Delete del = new Delete(row);
        for (int iCol = 0; iCol < numColsPerRow; ++iCol) {
          final byte[] cf = cfBytes[rand.nextInt(numCF)];
          final long ts = rand.nextInt();
          final byte[] qual = Bytes.toBytes("col" + iCol);
          if (rand.nextBoolean()) {
            final byte[] value = Bytes.toBytes("value_for_row_" + iRow +
                "_cf_" + Bytes.toStringBinary(cf) + "_col_" + iCol + "_ts_" +
                ts + "_random_" + rand.nextLong());
            put.add(cf, qual, ts, value);
          } else if (rand.nextDouble() < 0.8) {
            del.deleteColumn(cf, qual, ts);
          } else {
            del.deleteColumns(cf, qual, ts);
          }
        }

        if (!put.isEmpty()) {
          table.put(put);
        }

        if (!del.isEmpty()) {
          table.delete(del);
        }
      }
      LOG.info("Initiating flush #" + iFlush + " for table " + tableName);
      table.flushCommits();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.client.Delete

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.