Package org.apache.hadoop.hbase.regionserver.wal

Examples of org.apache.hadoop.hbase.regionserver.wal.HLogKey


    for(long i = 1; i < 101; i++) {
      if(i > 1 && i % 20 == 0) {
        hlog.rollWriter();
      }
      LOG.info(i);
      HLogKey key = new HLogKey(hri.getRegionName(), test, seq++,
          System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
      hlog.append(hri, key, edit, htd, true);
    }

    // Simulate a rapid insert that's followed
    // by a report that's still not totally complete (missing last one)
    LOG.info(baseline + " and " + time);
    baseline += 101;
    time = baseline;
    LOG.info(baseline + " and " + time);

    for (int i = 0; i < 3; i++) {
      HLogKey key = new HLogKey(hri.getRegionName(), test, seq++,
          System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
      hlog.append(hri, key, edit, htd, true);
    }

    assertEquals(6, manager.getHLogs().get(slaveId).size());

    hlog.rollWriter();

    manager.logPositionAndCleanOldLogs(manager.getSources().get(0).getCurrentPath(),
        "1", 0, false, false);

    HLogKey key = new HLogKey(hri.getRegionName(), test, seq++,
        System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
    hlog.append(hri, key, edit, htd, true);

    assertEquals(1, manager.getHLogs().size());
View Full Code Here


    while (entry != null) {
      WALEdit edit = entry.getEdit();
      this.metrics.logEditsReadRate.inc(1);
      seenEntries++;
      // Remove all KVs that should not be replicated
      HLogKey logKey = entry.getKey();
      List<UUID> consumedClusterIds = edit.getClusterIds();
      // This cluster id has been added to resolve the scenario of A -> B -> A where A has old
      // point release and B has the new point release which has the fix HBASE-7709. A change on
      // cluster A would infinitely replicate to
      // cluster B if we don't add the original cluster id to the set.
      consumedClusterIds.add(logKey.getClusterId());
      // don't replicate if the log entries if it has not already been replicated
      if (!consumedClusterIds.contains(peerClusterId)) {
        removeNonReplicableEdits(edit);
        // Don't replicate catalog entries, if the WALEdit wasn't
        // containing anything to replicate and if we're currently not set to replicate
        if (!(Bytes.equals(logKey.getTablename(), HConstants.ROOT_TABLE_NAME) ||
            Bytes.equals(logKey.getTablename(), HConstants.META_TABLE_NAME)) &&
            edit.size() != 0 && replicating.get()) {
          // Only set the clusterId if is a local key.
          // This ensures that the originator sets the cluster id
          // and all replicas retain the initial cluster id.
          // This is *only* place where a cluster id other than the default is set.
          if (HConstants.DEFAULT_CLUSTER_ID == logKey.getClusterId()) {
            logKey.setClusterId(this.clusterId);
          } else if (logKey.getClusterId() != this.clusterId) {
            edit.addClusterId(clusterId);
          }
          currentNbOperations += countDistinctRowKeys(edit);
          entries.add(entry);
          currentSize += entry.getEdit().heapSize();
View Full Code Here

    while (entry != null) {
      WALEdit edit = entry.getEdit();
      this.metrics.logEditsReadRate.inc(1);
      seenEntries++;
      // Remove all KVs that should not be replicated
      HLogKey logKey = entry.getKey();
      List<UUID> consumedClusterIds = edit.getClusterIds();
      // This cluster id has been added to resolve the scenario of A -> B -> A where A has old
      // point release and B has the new point release which has the fix HBASE-7709. A change on
      // cluster A would infinitely replicate to
      // cluster B if we don't add the original cluster id to the set.
      consumedClusterIds.add(logKey.getClusterId());
      // don't replicate if the log entries if it has not already been replicated
      if (!consumedClusterIds.contains(peerClusterId)) {
        removeNonReplicableEdits(edit);
        // Don't replicate catalog entries, if the WALEdit wasn't
        // containing anything to replicate and if we're currently not set to replicate
        if (!(Bytes.equals(logKey.getTablename(), HConstants.ROOT_TABLE_NAME) ||
            Bytes.equals(logKey.getTablename(), HConstants.META_TABLE_NAME)) &&
            edit.size() != 0 && replicating.get()) {
          // Only set the clusterId if is a local key.
          // This ensures that the originator sets the cluster id
          // and all replicas retain the initial cluster id.
          // This is *only* place where a cluster id other than the default is set.
          if (HConstants.DEFAULT_CLUSTER_ID == logKey.getClusterId()) {
            logKey.setClusterId(this.clusterId);
          } else if (logKey.getClusterId() != this.clusterId) {
            edit.addClusterId(clusterId);
          }
          currentNbOperations += countDistinctRowKeys(edit);
          entries.add(entry);
          currentSize += entry.getEdit().heapSize();
View Full Code Here

        long time = System.nanoTime();
        WALEdit edit = new WALEdit();
        edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
            .toBytes(i)));
        writer.append(new HLog.Entry(new HLogKey(regionName, tableName, i, time,
            HConstants.DEFAULT_CLUSTER_ID), edit));

        writer.close();
      }
      MonitoredTask status = TaskMonitor.get().createStatus(method);
View Full Code Here

        long time = System.nanoTime();
        WALEdit edit = new WALEdit();
        edit.add(new KeyValue(row, family, Bytes.toBytes(i), time, KeyValue.Type.Put, Bytes
            .toBytes(i)));
        writer.append(new HLog.Entry(new HLogKey(regionName, tableName, i, time,
            HConstants.DEFAULT_CLUSTER_ID), edit));

        writer.close();
      }
      long recoverSeqId = 1030;
View Full Code Here

      fs.create(recoveredEdits);
      HLog.Writer writer = HLogFactory.createWriter(fs, recoveredEdits, conf);

      long time = System.nanoTime();

      writer.append(new HLog.Entry(new HLogKey(regionName, tableName, 10, time,
          HConstants.DEFAULT_CLUSTER_ID), WALEdit.createCompaction(compactionDescriptor)));
      writer.close();

      // close the region now, and reopen again
      HTableDescriptor htd = region.getTableDesc();
View Full Code Here

    while (entry != null) {
      WALEdit edit = entry.getEdit();
      this.metrics.incrLogEditsRead();
      seenEntries++;
      // Remove all KVs that should not be replicated
      HLogKey logKey = entry.getKey();
      // don't replicate if the log entries have already been consumed by the cluster
      if (!logKey.getClusterIds().contains(peerClusterId)) {
        removeNonReplicableEdits(entry);
        // Don't replicate catalog entries, if the WALEdit wasn't
        // containing anything to replicate and if we're currently not set to replicate
        if (!logKey.getTablename().equals(TableName.META_TABLE_NAME) &&
            edit.size() != 0) {
          //Mark that the current cluster has the change
          logKey.addClusterId(clusterId);
          currentNbOperations += countDistinctRowKeys(edit);
          entries.add(entry);
          currentSize += entry.getEdit().heapSize();
        } else {
          this.metrics.incrLogEditsFiltered();
View Full Code Here

      for (FileStatus logStatus: FSUtils.listStatus(fs, regionEdits)) {
        HLog.Reader reader = HLog.getReader(fs, logStatus.getPath(), conf);
        try {
          HLog.Entry entry;
          while ((entry = reader.next()) != null) {
            HLogKey key = entry.getKey();
            assertArrayEquals(tableName, key.getTablename());
            assertArrayEquals(regionName, key.getEncodedRegionName());
          }
        } finally {
          reader.close();
        }
      }
View Full Code Here

        byte[] tableName = getTableName(i);
        for (int j = 0; j < 10; ++j) {
          byte[] regionName = getRegionName(tableName, j);
          for (int k = 0; k < 50; ++k) {
            byte[] rowkey = Bytes.toBytes("row-" + k);
            HLogKey key = new HLogKey(regionName, tableName, (long)k,
              System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
            WALEdit edit = new WALEdit();
            edit.add(new KeyValue(rowkey, TEST_FAMILY, TEST_QUALIFIER, rowkey));
            writer.append(new HLog.Entry(key, edit));
          }
View Full Code Here

          this.conf.getInt(AssignmentManager.ASSIGNMENT_TIMEOUT,
            AssignmentManager.DEFAULT_ASSIGNMENT_TIMEOUT_DEFAULT) / 2);
        long lastReport = EnvironmentEdgeManager.currentTimeMillis();

        while ((entry = reader.next()) != null) {
          HLogKey key = entry.getKey();
          WALEdit val = entry.getEdit();

          if (ng != null) { // some test, or nonces disabled
            ng.reportOperationFromWal(key.getNonceGroup(), key.getNonce(), key.getWriteTime());
          }

          if (reporter != null) {
            intervalEdits += val.size();
            if (intervalEdits >= interval) {
              // Number of edits interval reached
              intervalEdits = 0;
              long cur = EnvironmentEdgeManager.currentTimeMillis();
              if (lastReport + period <= cur) {
                status.setStatus("Replaying edits..." +
                    " skipped=" + skippedEdits +
                    " edits=" + editsCount);
                // Timeout reached
                if(!reporter.progress()) {
                  msg = "Progressable reporter failed, stopping replay";
                  LOG.warn(msg);
                  status.abort(msg);
                  throw new IOException(msg);
                }
                reported_once = true;
                lastReport = cur;
              }
            }
          }

          // Start coprocessor replay here. The coprocessor is for each WALEdit
          // instead of a KeyValue.
          if (coprocessorHost != null) {
            status.setStatus("Running pre-WAL-restore hook in coprocessors");
            if (coprocessorHost.preWALRestore(this.getRegionInfo(), key, val)) {
              // if bypass this log entry, ignore it ...
              continue;
            }
          }

          if (firstSeqIdInLog == -1) {
            firstSeqIdInLog = key.getLogSeqNum();
          }
          boolean flush = false;
          for (KeyValue kv: val.getKeyValues()) {
            // Check this edit is for me. Also, guard against writing the special
            // METACOLUMN info such as HBASE::CACHEFLUSH entries
            if (kv.matchingFamily(WALEdit.METAFAMILY) ||
                !Bytes.equals(key.getEncodedRegionName(),
                  this.getRegionInfo().getEncodedNameAsBytes())) {
              //this is a special edit, we should handle it
              CompactionDescriptor compaction = WALEdit.getCompaction(kv);
              if (compaction != null) {
                //replay the compaction
                completeCompactionMarker(compaction);
              }

              skippedEdits++;
              continue;
            }
            // Figure which store the edit is meant for.
            if (store == null || !kv.matchingFamily(store.getFamily().getName())) {
              store = this.stores.get(kv.getFamily());
            }
            if (store == null) {
              // This should never happen.  Perhaps schema was changed between
              // crash and redeploy?
              LOG.warn("No family for " + kv);
              skippedEdits++;
              continue;
            }
            // Now, figure if we should skip this edit.
            if (key.getLogSeqNum() <= maxSeqIdInStores.get(store.getFamily()
                .getName())) {
              skippedEdits++;
              continue;
            }
            currentEditSeqId = key.getLogSeqNum();
            // Once we are over the limit, restoreEdit will keep returning true to
            // flush -- but don't flush until we've played all the kvs that make up
            // the WALEdit.
            flush = restoreEdit(store, kv);
            editsCount++;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.wal.HLogKey

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.