Examples of HLogKey


Examples of org.apache.hadoop.hbase.regionserver.HLogKey

    SequenceFile.Reader logReader = new SequenceFile.Reader(fileSystem,
        reconstructionLog, conf);

    try {
      HLogKey key = new HLogKey();
      HLogEdit val = new HLogEdit();
      long skippedEdits = 0;
      long totalEdits = 0;
      long startCount = 0;
      long writeCount = 0;
      long abortCount = 0;
      long commitCount = 0;
      // How many edits to apply before we send a progress report.
      int reportInterval = conf.getInt("hbase.hstore.report.interval.edits",
          2000);
      while (logReader.next(key, val)) {
        LOG.debug("Processing edit: key: " + key.toString() + " val: "
            + val.toString());
        if (key.getLogSeqNum() < maxSeqID) {
          skippedEdits++;
          continue;
        }

        // Check this edit is for me.
        byte[] column = val.getColumn();
        Long transactionId = val.getTransactionId();
        if (!val.isTransactionEntry() || HLog.isMetaColumn(column)
            || !Bytes.equals(key.getRegionName(), regionInfo.getRegionName())) {
          continue;
        }

        List<BatchUpdate> updates = pendingTransactionsById.get(transactionId);
        switch (val.getOperation()) {
        case START:
          if (updates != null || abortedTransactions.contains(transactionId)
              || commitedTransactionsById.containsKey(transactionId)) {
            LOG.error("Processing start for transaction: " + transactionId
                + ", but have already seen start message");
            throw new IOException("Corrupted transaction log");
          }
          updates = new LinkedList<BatchUpdate>();
          pendingTransactionsById.put(transactionId, updates);
          startCount++;
          break;

        case WRITE:
          if (updates == null) {
            LOG.error("Processing edit for transaction: " + transactionId
                + ", but have not seen start message");
            throw new IOException("Corrupted transaction log");
          }

          BatchUpdate tranUpdate = new BatchUpdate(key.getRow());
          if (val.getVal() != null) {
            tranUpdate.put(val.getColumn(), val.getVal());
          } else {
            tranUpdate.delete(val.getColumn());
          }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.wal.HLogKey

            this.conf.getInt("hbase.master.assignment.timeoutmonitor.timeout",
                180000) / 2);
        long lastReport = EnvironmentEdgeManager.currentTimeMillis();

        while ((entry = reader.next()) != null) {
          HLogKey key = entry.getKey();
          WALEdit val = entry.getEdit();

          if (reporter != null) {
            intervalEdits += val.size();
            if (intervalEdits >= interval) {
              // Number of edits interval reached
              intervalEdits = 0;
              long cur = EnvironmentEdgeManager.currentTimeMillis();
              if (lastReport + period <= cur) {
                status.setStatus("Replaying edits..." +
                    " skipped=" + skippedEdits +
                    " edits=" + editsCount);
                // Timeout reached
                if(!reporter.progress()) {
                  msg = "Progressable reporter failed, stopping replay";
                  LOG.warn(msg);
                  status.abort(msg);
                  throw new IOException(msg);
                }
                reported_once = true;
                lastReport = cur;
              }
            }
          }

          // Start coprocessor replay here. The coprocessor is for each WALEdit
          // instead of a KeyValue.
          if (coprocessorHost != null) {
            status.setStatus("Running pre-WAL-restore hook in coprocessors");
            if (coprocessorHost.preWALRestore(this.getRegionInfo(), key, val)) {
              // if bypass this log entry, ignore it ...
              continue;
            }
          }

          if (firstSeqIdInLog == -1) {
            firstSeqIdInLog = key.getLogSeqNum();
          }
          // Now, figure if we should skip this edit.
          if (key.getLogSeqNum() <= currentEditSeqId) {
            skippedEdits++;
            continue;
          }
          currentEditSeqId = key.getLogSeqNum();
          boolean flush = false;
          for (KeyValue kv: val.getKeyValues()) {
            // Check this edit is for me. Also, guard against writing the special
            // METACOLUMN info such as HBASE::CACHEFLUSH entries
            if (kv.matchingFamily(HLog.METAFAMILY) ||
                !Bytes.equals(key.getEncodedRegionName(), this.regionInfo.getEncodedNameAsBytes())) {
              skippedEdits++;
              continue;
                }
            // Figure which store the edit is meant for.
            if (store == null || !kv.matchingFamily(store.getFamily().getName())) {
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.wal.HLogKey

    while (entry != null) {
      WALEdit edit = entry.getEdit();
      this.metrics.logEditsReadRate.inc(1);
      seenEntries++;
      // Remove all KVs that should not be replicated
      HLogKey logKey = entry.getKey();
      // don't replicate if the log entries originated in the peer
      if (!logKey.getClusterId().equals(peerClusterId)) {
        removeNonReplicableEdits(edit);
        // Don't replicate catalog entries, if the WALEdit wasn't
        // containing anything to replicate and if we're currently not set to replicate
        if (!(Bytes.equals(logKey.getTablename(), HConstants.ROOT_TABLE_NAME) ||
            Bytes.equals(logKey.getTablename(), HConstants.META_TABLE_NAME)) &&
            edit.size() != 0 && replicating.get()) {
          // Only set the clusterId if is a local key.
          // This ensures that the originator sets the cluster id
          // and all replicas retain the initial cluster id.
          // This is *only* place where a cluster id other than the default is set.
          if (HConstants.DEFAULT_CLUSTER_ID == logKey.getClusterId()) {
            logKey.setClusterId(this.clusterId);
          }
          currentNbOperations += countDistinctRowKeys(edit);
          currentNbEntries++;
          currentSize += entry.getEdit().heapSize();
        } else {
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.wal.HLogKey

    while (entry != null) {
      WALEdit edit = entry.getEdit();
      this.metrics.logEditsReadRate.inc(1);
      seenEntries++;
      // Remove all KVs that should not be replicated
      HLogKey logKey = entry.getKey();
      // don't replicate if the log entries originated in the peer
      if (!logKey.getClusterId().equals(peerClusterId)) {
        removeNonReplicableEdits(edit);
        // Don't replicate catalog entries, if the WALEdit wasn't
        // containing anything to replicate and if we're currently not set to replicate
        if (!(Bytes.equals(logKey.getTablename(), HConstants.ROOT_TABLE_NAME) ||
            Bytes.equals(logKey.getTablename(), HConstants.META_TABLE_NAME)) &&
            edit.size() != 0 && replicating.get()) {
          // Only set the clusterId if is a local key.
          // This ensures that the originator sets the cluster id
          // and all replicas retain the initial cluster id.
          // This is *only* place where a cluster id other than the default is set.
          if (HConstants.DEFAULT_CLUSTER_ID == logKey.getClusterId()) {
            logKey.setClusterId(this.clusterId);
          }
          currentNbOperations += countDistinctRowKeys(edit);
          currentNbEntries++;
        } else {
          this.metrics.logEditsFilteredRate.inc(1);
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.wal.HLogKey

        long time = System.nanoTime();
        WALEdit edit = new WALEdit();
        edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
            .toBytes(i)));
        writer.append(new HLog.Entry(new HLogKey(regionName, tableName, i, time,
            HConstants.DEFAULT_CLUSTER_ID), edit));

        writer.close();
      }
      MonitoredTask status = TaskMonitor.get().createStatus(method);
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.wal.HLogKey

        long time = System.nanoTime();
        WALEdit edit = new WALEdit();
        edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
            .toBytes(i)));
        writer.append(new HLog.Entry(new HLogKey(regionName, tableName, i, time,
            HConstants.DEFAULT_CLUSTER_ID), edit));

        writer.close();
      }
      long recoverSeqId = 1030;
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.wal.HLogKey

      fs.create(recoveredEdits);
      HLog.Writer writer = HLogFactory.createRecoveredEditsWriter(fs, recoveredEdits, conf);

      long time = System.nanoTime();

      writer.append(new HLog.Entry(new HLogKey(regionName, tableName, 10, time,
          HConstants.DEFAULT_CLUSTER_ID), WALEdit.createCompaction(compactionDescriptor)));
      writer.close();

      // close the region now, and reopen again
      HTableDescriptor htd = region.getTableDesc();
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.wal.HLogKey

        long time = System.nanoTime();
        WALEdit edit = new WALEdit();
        edit.add(new KeyValue(row, family, Bytes.toBytes(i),
            time, KeyValue.Type.Put, Bytes.toBytes(i)));
        writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
            i, time, HConstants.DEFAULT_CLUSTER_ID), edit));

        writer.close();
      }
      MonitoredTask status = TaskMonitor.get().createStatus(method);
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.wal.HLogKey

        long time = System.nanoTime();
        WALEdit edit = new WALEdit();
        edit.add(new KeyValue(row, family, Bytes.toBytes(i),
            time, KeyValue.Type.Put, Bytes.toBytes(i)));
        writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
            i, time, HConstants.DEFAULT_CLUSTER_ID), edit));

        writer.close();
      }
      long recoverSeqId = 1030;
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.wal.HLogKey

    for(long i = 1; i < 101; i++) {
      if(i > 1 && i % 20 == 0) {
        hlog.rollWriter();
      }
      LOG.info(i);
      HLogKey key = new HLogKey(hri.getRegionName(), test, seq++,
          System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
      hlog.append(hri, key, edit, htd, true);
    }

    // Simulate a rapid insert that's followed
    // by a report that's still not totally complete (missing last one)
    LOG.info(baseline + " and " + time);
    baseline += 101;
    time = baseline;
    LOG.info(baseline + " and " + time);

    for (int i = 0; i < 3; i++) {
      HLogKey key = new HLogKey(hri.getRegionName(), test, seq++,
          System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
      hlog.append(hri, key, edit, htd, true);
    }

    assertEquals(6, manager.getHLogs().get(slaveId).size());

    hlog.rollWriter();

    manager.logPositionAndCleanOldLogs(manager.getSources().get(0).getCurrentPath(),
        "1", 0, false, false);

    HLogKey key = new HLogKey(hri.getRegionName(), test, seq++,
        System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
    hlog.append(hri, key, edit, htd, true);

    assertEquals(1, manager.getHLogs().size());
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.