Package org.apache.hadoop.hbase.io

Examples of org.apache.hadoop.hbase.io.BatchUpdate


      } else {
        if (amount == 0) return Bytes.toLong(value);
        value = Bytes.incrementBytes(value, amount);
      }

      BatchUpdate b = new BatchUpdate(row, ts);
      b.put(column, value);
      batchUpdate(b, lid, true);
      return Bytes.toLong(value);
    } finally {
      splitsAndClosesLock.readLock().unlock();
      releaseRowLock(lid);
View Full Code Here


    return true;
  }

  private void updateIndex(IndexSpecification indexSpec, byte[] row,
      SortedMap<byte[], byte[]> columnValues) throws IOException {
    BatchUpdate indexUpdate = createIndexUpdate(indexSpec, row, columnValues);
    getIndexTable(indexSpec).commit(indexUpdate);
    LOG.debug("Index [" + indexSpec.getIndexId() + "] adding new entry ["
        + Bytes.toString(indexUpdate.getRow()) + "] for row ["
        + Bytes.toString(row) + "]");

  }
View Full Code Here

  private BatchUpdate createIndexUpdate(IndexSpecification indexSpec,
      byte[] row, SortedMap<byte[], byte[]> columnValues) {
    byte[] indexRow = indexSpec.getKeyGenerator().createIndexKey(row,
        columnValues);
    BatchUpdate update = new BatchUpdate(indexRow);

    update.put(IndexedTable.INDEX_BASE_ROW_COLUMN, row);

    for (byte[] col : indexSpec.getIndexedColumns()) {
      byte[] val = columnValues.get(col);
      if (val == null) {
        throw new RuntimeException("Unexpected missing column value. ["+Bytes.toString(col)+"]");
      }
      update.put(col, val);
    }
   
    for (byte [] col : indexSpec.getAdditionalColumns()) {
      byte[] val = columnValues.get(col);
      if (val != null) {
        update.put(col, val);
      }
    }

    return update;
  }
View Full Code Here

        }
        continue;
      }

      // Update meta table
      BatchUpdate b = new BatchUpdate(i.getRegionName());
      updateRegionInfo(b, i);
      b.delete(COL_SERVER);
      b.delete(COL_STARTCODE);
      server.batchUpdate(m.getRegionName(), b, -1L);
      if (LOG.isDebugEnabled()) {
        LOG.debug("Updated columns in row: " + i.getRegionNameAsString());
      }
View Full Code Here

    if (LOG.isDebugEnabled()) {
      LOG.debug(split.getRegionNameAsString() +
        " no longer has references to " + Bytes.toString(parent));
    }
   
    BatchUpdate b = new BatchUpdate(parent);
    b.delete(splitColumn);
    srvr.batchUpdate(metaRegionName, b, -1L);
     
    return result;
  }
View Full Code Here

            false);
      byte [] currentRegion = location.getRegionInfo().getRegionName();
      byte [] region = currentRegion;
      boolean isLastRow = false;
      for (int i = 0; i < list.size() && tries < numRetries; i++) {
        BatchUpdate batchUpdate = list.get(i);
        tempUpdates.add(batchUpdate);
        isLastRow = (i + 1) == list.size();
        if (!isLastRow) {
          location = getRegionLocationForRowWithRetries(tableName,
            list.get(i+1).getRow(), false);
          region = location.getRegionInfo().getRegionName();
        }
        if (!Bytes.equals(currentRegion, region) || isLastRow || retryOnlyOne) {
          final BatchUpdate[] updates = tempUpdates.toArray(new BatchUpdate[0]);
          int index = getRegionServerWithRetries(new ServerCallable<Integer>(
              this, tableName, batchUpdate.getRow()) {
            public Integer call() throws IOException {
              int i = server.batchUpdates(location.getRegionInfo()
                  .getRegionName(), updates);
              return i;
            }
          });
          if (index != -1) {
            if (tries == numRetries - 1) {
              throw new RetriesExhaustedException("Some server",
                  currentRegion, batchUpdate.getRow(),
                  tries, new ArrayList<Throwable>());
            }
            long sleepTime = getPauseTime(tries);
            if (LOG.isDebugEnabled()) {
              LOG.debug("Reloading region " + Bytes.toString(currentRegion) +
View Full Code Here

    transactionManager = new TransactionManager(conf);
    writeInitalRows();
  }

  private void writeInitalRows() throws IOException {
    BatchUpdate update = new BatchUpdate(ROW1);
    update.put(COL_A, Bytes.toBytes(TOTAL_VALUE));
    table.commit(update);
    update = new BatchUpdate(ROW2);
    update.put(COL_A, Bytes.toBytes(0));
    table.commit(update);
    update = new BatchUpdate(ROW3);
    update.put(COL_A, Bytes.toBytes(0));
    table.commit(update);
  }
View Full Code Here

    if (flushMidWay) {
      flushRegionServer();
    }

    // Writes
    BatchUpdate write = new BatchUpdate(ROW1);
    write.put(COL_A, Bytes.toBytes(row1));
    table.commit(transactionState, write);

    write = new BatchUpdate(ROW2);
    write.put(COL_A, Bytes.toBytes(row2));
    table.commit(transactionState, write);

    write = new BatchUpdate(ROW3);
    write.put(COL_A, Bytes.toBytes(row3));
    table.commit(transactionState, write);

    return transactionState;
  }
View Full Code Here

    // Write columns named 1, 2, 3, etc. and then values of single byte
    // 1, 2, 3...
    long transactionId = 1;
    logMangaer.writeStartToLog(transactionId);

    BatchUpdate update1 = new BatchUpdate(row1);
    update1.put(col, val1);
    logMangaer.writeUpdateToLog(transactionId, update1);

    BatchUpdate update2 = new BatchUpdate(row2);
    update2.put(col, val2);
    logMangaer.writeUpdateToLog(transactionId, update2);

    BatchUpdate update3 = new BatchUpdate(row3);
    update3.put(col, val3);
    logMangaer.writeUpdateToLog(transactionId, update3);

    logMangaer.writeCommitToLog(transactionId);

    // log.completeCacheFlush(regionName, tableName, logSeqId);

    log.close();
    Path filename = log.computeFilename(log.getFilenum());

    Map<Long, List<BatchUpdate>> commits = logMangaer.getCommitsFromLog(
        filename, -1, null);

    assertEquals(1, commits.size());
    assertTrue(commits.containsKey(transactionId));
    assertEquals(3, commits.get(transactionId).size());

    List<BatchUpdate> updates = commits.get(transactionId);

    update1 = updates.get(0);
    assertTrue(Bytes.equals(row1, update1.getRow()));
    assertTrue(Bytes.equals(val1, update1.iterator().next().getValue()));

    update2 = updates.get(1);
    assertTrue(Bytes.equals(row2, update2.getRow()));
    assertTrue(Bytes.equals(val2, update2.iterator().next().getValue()));

    update3 = updates.get(2);
    assertTrue(Bytes.equals(row3, update3.getRow()));
    assertTrue(Bytes.equals(val3, update3.iterator().next().getValue()));

  }
View Full Code Here

        regionInfo, conf);

    long transactionId = 1;
    logMangaer.writeStartToLog(transactionId);

    BatchUpdate update1 = new BatchUpdate(row1);
    update1.put(col, val1);
    logMangaer.writeUpdateToLog(transactionId, update1);

    BatchUpdate update2 = new BatchUpdate(row2);
    update2.put(col, val2);
    logMangaer.writeUpdateToLog(transactionId, update2);

    BatchUpdate update3 = new BatchUpdate(row3);
    update3.put(col, val3);
    logMangaer.writeUpdateToLog(transactionId, update3);

    logMangaer.writeAbortToLog(transactionId);

    // log.completeCacheFlush(regionName, tableName, logSeqId);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.io.BatchUpdate

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.