Package org.apache.hadoop.hbase.client

Examples of org.apache.hadoop.hbase.client.Append


        return newReturn(value);
    }

    private Append newReturn(SequenceValue value) {
        byte[] key = SchemaUtil.getSequenceKey(this.key.getTenantId(), this.key.getSchemaName(), this.key.getSequenceName());
        Append append = new Append(key);
        byte[] opBuf = new byte[] {(byte)MetaOp.RETURN_SEQUENCE.ordinal()};
        append.setAttribute(SequenceRegionObserver.OPERATION_ATTRIB, opBuf);
        append.setAttribute(SequenceRegionObserver.CURRENT_VALUE_ATTRIB, PDataType.LONG.toBytes(value.nextValue));
        Map<byte[], List<KeyValue>> familyMap = append.getFamilyMap();
        familyMap.put(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, Arrays.<KeyValue>asList(
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, value.timestamp, PDataType.LONG.toBytes(value.currentValue)),
            // set LIMIT_REACHED flag to false since we are returning unused sequence values
            KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.LIMIT_REACHED_FLAG_BYTES, value.timestamp, PDataType.FALSE_BYTES)
                ));
View Full Code Here


            .build().buildException();
    }

    public Append createSequence(long startWith, long incrementBy, long cacheSize, long timestamp, long minValue, long maxValue, boolean cycle) {
        byte[] key = SchemaUtil.getSequenceKey(this.key.getTenantId(), this.key.getSchemaName(), this.key.getSequenceName());
        Append append = new Append(key);
        append.setAttribute(SequenceRegionObserver.OPERATION_ATTRIB, new byte[] {(byte)MetaOp.CREATE_SEQUENCE.ordinal()});
        if (timestamp != HConstants.LATEST_TIMESTAMP) {
            append.setAttribute(SequenceRegionObserver.MAX_TIMERANGE_ATTRIB, Bytes.toBytes(timestamp));
        }
        Map<byte[], List<KeyValue>> familyMap = append.getFamilyMap();
        byte[] startWithBuf = PDataType.LONG.toBytes(startWith);
        familyMap.put(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, Arrays.<KeyValue>asList(
                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY),
                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.CURRENT_VALUE_BYTES, timestamp, startWithBuf),
                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, PhoenixDatabaseMetaData.START_WITH_BYTES, timestamp, startWithBuf),
View Full Code Here

            .build().buildException();
    }

    public Append dropSequence(long timestamp) {
        byte[] key = SchemaUtil.getSequenceKey(this.key.getTenantId(), this.key.getSchemaName(), this.key.getSequenceName());
        Append append = new Append(key);
        append.setAttribute(SequenceRegionObserver.OPERATION_ATTRIB, new byte[] {(byte)MetaOp.DROP_SEQUENCE.ordinal()});
        if (timestamp != HConstants.LATEST_TIMESTAMP) {
            append.setAttribute(SequenceRegionObserver.MAX_TIMERANGE_ATTRIB, Bytes.toBytes(timestamp));
        }
        Map<byte[], List<KeyValue>> familyMap = append.getFamilyMap();
        familyMap.put(PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, Arrays.<KeyValue>asList(
                KeyValueUtil.newKeyValue(key, PhoenixDatabaseMetaData.SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, timestamp, ByteUtil.EMPTY_BYTE_ARRAY)));
        return append;
    }
View Full Code Here

    @Override
    public void run() {
      int count = 0;
      while (count < appendCounter) {
        Append app = new Append(appendRow);
        app.add(family, qualifier, CHAR);
        count++;
        try {
          region.append(app, null, true);
        } catch (IOException e) {
          e.printStackTrace();
View Full Code Here

    assertArrayEquals(ROW_A, rowOrBeforeResult.getRow());
  }

  private void checkAppend() throws IOException {
    final byte[] appendValue = Bytes.toBytes("append");
    Append append = new Append(qualifierCol1).add(TEST_FAMILY, qualifierCol1, appendValue);
    Result appendResult = hTableInterface.append(append);
    byte[] appendedRow = appendResult.getRow();
    checkRowValue(appendedRow, appendValue);
  }
View Full Code Here

  public void testAppendWithReadOnlyTable() throws Exception {
    byte[] TABLE = Bytes.toBytes("readOnlyTable");
    this.region = initHRegion(TABLE, getName(), conf, true, Bytes.toBytes("somefamily"));
    boolean exceptionCaught = false;
    Append append = new Append(Bytes.toBytes("somerow"));
    append.add(Bytes.toBytes("somefamily"), Bytes.toBytes("somequalifier"),
        Bytes.toBytes("somevalue"));
    try {
      region.append(append, false);
    } catch (IOException e) {
      exceptionCaught = true;
View Full Code Here

        byte[][] columnFamilies = dataGenerator.getColumnFamilies();
        while ((rowKeyBase = getNextKeyToUpdate()) < endKey) {
          if (RandomUtils.nextInt(100) < updatePercent) {
            byte[] rowKey = dataGenerator.getDeterministicUniqueKey(rowKeyBase);
            Increment inc = new Increment(rowKey);
            Append app = new Append(rowKey);
            numKeys.addAndGet(1);
            int columnCount = 0;
            for (byte[] cf : columnFamilies) {
              long cfHash = Arrays.hashCode(cf);
              inc.addColumn(cf, INCREMENT, cfHash);
              buf.setLength(0); // Clear the buffer
              buf.append("#").append(Bytes.toString(INCREMENT));
              buf.append(":").append(MutationType.INCREMENT.getNumber());
              app.add(cf, MUTATE_INFO, Bytes.toBytes(buf.toString()));
              ++columnCount;
              if (!isBatchUpdate) {
                mutate(table, inc, rowKeyBase);
                numCols.addAndGet(1);
                inc = new Increment(rowKey);
                mutate(table, app, rowKeyBase);
                numCols.addAndGet(1);
                app = new Append(rowKey);
              }
              Result result = null;
              try {
                Get get = new Get(rowKey);
                get.addFamily(cf);
                result = table.get(get);
              } catch (IOException ie) {
                LOG.warn("Failed to get the row for key = ["
                  + rowKey + "], column family = [" + Bytes.toString(cf) + "]", ie);
              }
              Map<byte[], byte[]> columnValues =
                result != null ? result.getFamilyMap(cf) : null;
              if (columnValues == null) {
                failedKeySet.add(rowKeyBase);
                LOG.error("Failed to update the row with key = ["
                  + rowKey + "], since we could not get the original row");
              }
              for (byte[] column : columnValues.keySet()) {
                if (Bytes.equals(column, INCREMENT)
                    || Bytes.equals(column, MUTATE_INFO)) {
                  continue;
                }
                MutationType mt = MutationType.valueOf(
                  RandomUtils.nextInt(MutationType.values().length));
                long columnHash = Arrays.hashCode(column);
                long hashCode = cfHash + columnHash;
                byte[] hashCodeBytes = Bytes.toBytes(hashCode);
                byte[] checkedValue = HConstants.EMPTY_BYTE_ARRAY;
                if (hashCode % 2 == 0) {
                  Cell kv = result.getColumnLatestCell(cf, column);
                  checkedValue = kv != null ? CellUtil.cloneValue(kv) : null;
                  Preconditions.checkNotNull(checkedValue,
                    "Column value to be checked should not be null");
                }
                buf.setLength(0); // Clear the buffer
                buf.append("#").append(Bytes.toString(column)).append(":");
                ++columnCount;
                switch (mt) {
                case PUT:
                  Put put = new Put(rowKey);
                  put.add(cf, column, hashCodeBytes);
                  mutate(table, put, rowKeyBase, rowKey, cf, column, checkedValue);
                  buf.append(MutationType.PUT.getNumber());
                  break;
                case DELETE:
                  Delete delete = new Delete(rowKey);
                  // Delete all versions since a put
                  // could be called multiple times if CM is used
                  delete.deleteColumns(cf, column);
                  mutate(table, delete, rowKeyBase, rowKey, cf, column, checkedValue);
                  buf.append(MutationType.DELETE.getNumber());
                  break;
                default:
                  buf.append(MutationType.APPEND.getNumber());
                  app.add(cf, column, hashCodeBytes);
                }
                app.add(cf, MUTATE_INFO, Bytes.toBytes(buf.toString()));
                if (!isBatchUpdate) {
                  mutate(table, app, rowKeyBase);
                  numCols.addAndGet(1);
                  app = new Append(rowKey);
                }
              }
            }
            if (isBatchUpdate) {
              if (verbose) {
View Full Code Here

      public Object run() throws Exception {
        byte[] row = Bytes.toBytes("random_row");
        byte[] qualifier = Bytes.toBytes("q");
        Put put = new Put(row);
        put.add(TEST_FAMILY, qualifier, Bytes.toBytes(1));
        Append append = new Append(row);
        append.add(TEST_FAMILY, qualifier, Bytes.toBytes(2));
        HTable t = new HTable(conf, TEST_TABLE.getTableName());
        try {
          t.put(put);
          t.append(append);
        } finally {
View Full Code Here

    // set the default value for equal comparison
    mutateBuilder = MutationProto.newBuilder(proto);
    mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT);

    Append append = ProtobufUtil.toAppend(proto, null);

    // append always use the latest timestamp,
    // reset the timestamp to the original mutate
    mutateBuilder.setTimestamp(append.getTimeStamp());
    assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.APPEND, append));
  }
View Full Code Here

    @Override
    public void run() {
      int count = 0;
      while (count < appendCounter) {
        Append app = new Append(appendRow);
        app.add(family, qualifier, CHAR);
        count++;
        try {
          region.append(app);
        } catch (IOException e) {
          e.printStackTrace();
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.client.Append

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.