Package org.apache.hadoop.hbase.regionserver.wal

Examples of org.apache.hadoop.hbase.regionserver.wal.HLogKey


    AdminProtos.ReplicateWALEntryRequest.Builder builder =
      AdminProtos.ReplicateWALEntryRequest.newBuilder();
    for (HLog.Entry entry: entries) {
      entryBuilder.clear();
      WALProtos.WALKey.Builder keyBuilder = entryBuilder.getKeyBuilder();
      HLogKey key = entry.getKey();
      keyBuilder.setEncodedRegionName(
        ByteString.copyFrom(key.getEncodedRegionName()));
      keyBuilder.setTableName(ByteString.copyFrom(key.getTablename().getName()));
      keyBuilder.setLogSequenceNumber(key.getLogSeqNum());
      keyBuilder.setWriteTime(key.getWriteTime());
      UUID clusterId = key.getClusterId();
      if (clusterId != null) {
        HBaseProtos.UUID.Builder uuidBuilder = keyBuilder.getClusterIdBuilder();
        uuidBuilder.setLeastSigBits(clusterId.getLeastSignificantBits());
        uuidBuilder.setMostSigBits(clusterId.getMostSignificantBits());
      }
      WALEdit edit = entry.getEdit();
      NavigableMap<byte[], Integer> scopes = key.getScopes();
      if (scopes != null && !scopes.isEmpty()) {
        for (Map.Entry<byte[], Integer> scope: scopes.entrySet()) {
          scopeBuilder.setFamily(ByteString.copyFrom(scope.getKey()));
          WALProtos.ScopeType scopeType =
              WALProtos.ScopeType.valueOf(scope.getValue().intValue());
View Full Code Here


      HLog.Entry entry;
      LogWriter writer = null;
      byte[] regionName = null;
      byte[] newRegionName = null;
      while ((entry = log.next()) != null) {
        HLogKey key = entry.getKey();

        // We're interested only in the snapshot table that we're restoring
        if (!key.getTablename().equals(snapshotTableName)) continue;

        // Writer for region.
        if (!Bytes.equals(regionName, key.getEncodedRegionName())) {
          regionName = key.getEncodedRegionName().clone();

          // Get the new region name in case of clone, or use the original one
          newRegionName = regionsMap.get(regionName);
          if (newRegionName == null) newRegionName = regionName;

          writer = getOrCreateWriter(newRegionName, key.getLogSeqNum());
          LOG.debug("+ regionName=" + Bytes.toString(regionName));
        }

        // Append Entry
        key = new HLogKey(newRegionName, tableName,
                          key.getLogSeqNum(), key.getWriteTime(), key.getClusterId());
        writer.append(new HLog.Entry(key, entry.getEdit()));
      }
    } catch (IOException e) {
      LOG.warn("Something wrong during the log split", e);
    } finally {
View Full Code Here

            this.conf.getInt("hbase.master.assignment.timeoutmonitor.timeout",
                180000) / 2);
        long lastReport = EnvironmentEdgeManager.currentTimeMillis();

        while ((entry = reader.next()) != null) {
          HLogKey key = entry.getKey();
          WALEdit val = entry.getEdit();

          if (reporter != null) {
            intervalEdits += val.size();
            if (intervalEdits >= interval) {
              // Number of edits interval reached
              intervalEdits = 0;
              long cur = EnvironmentEdgeManager.currentTimeMillis();
              if (lastReport + period <= cur) {
                status.setStatus("Replaying edits..." +
                    " skipped=" + skippedEdits +
                    " edits=" + editsCount);
                // Timeout reached
                if(!reporter.progress()) {
                  msg = "Progressable reporter failed, stopping replay";
                  LOG.warn(msg);
                  status.abort(msg);
                  throw new IOException(msg);
                }
                reported_once = true;
                lastReport = cur;
              }
            }
          }

          // Start coprocessor replay here. The coprocessor is for each WALEdit
          // instead of a KeyValue.
          if (coprocessorHost != null) {
            status.setStatus("Running pre-WAL-restore hook in coprocessors");
            if (coprocessorHost.preWALRestore(this.getRegionInfo(), key, val)) {
              // if bypass this log entry, ignore it ...
              continue;
            }
          }

          if (firstSeqIdInLog == -1) {
            firstSeqIdInLog = key.getLogSeqNum();
          }
          // Now, figure if we should skip this edit.
          if (key.getLogSeqNum() <= currentEditSeqId) {
            skippedEdits++;
            continue;
          }
          currentEditSeqId = key.getLogSeqNum();
          boolean flush = false;
          for (KeyValue kv: val.getKeyValues()) {
            // Check this edit is for me. Also, guard against writing the special
            // METACOLUMN info such as HBASE::CACHEFLUSH entries
            if (kv.matchingFamily(HLog.METAFAMILY) ||
                !Bytes.equals(key.getEncodedRegionName(), this.regionInfo.getEncodedNameAsBytes())) {
              skippedEdits++;
              continue;
                }
            // Figure which store the edit is meant for.
            if (store == null || !kv.matchingFamily(store.getFamily().getName())) {
View Full Code Here

    while (entry != null) {
      WALEdit edit = entry.getEdit();
      this.metrics.logEditsReadRate.inc(1);
      seenEntries++;
      // Remove all KVs that should not be replicated
      HLogKey logKey = entry.getKey();
      // don't replicate if the log entries originated in the peer
      if (!logKey.getClusterId().equals(peerClusterId)) {
        removeNonReplicableEdits(edit);
        // Don't replicate catalog entries, if the WALEdit wasn't
        // containing anything to replicate and if we're currently not set to replicate
        if (!(Bytes.equals(logKey.getTablename(), HConstants.ROOT_TABLE_NAME) ||
            Bytes.equals(logKey.getTablename(), HConstants.META_TABLE_NAME)) &&
            edit.size() != 0 && replicating.get()) {
          // Only set the clusterId if is a local key.
          // This ensures that the originator sets the cluster id
          // and all replicas retain the initial cluster id.
          // This is *only* place where a cluster id other than the default is set.
          if (HConstants.DEFAULT_CLUSTER_ID == logKey.getClusterId()) {
            logKey.setClusterId(this.clusterId);
          }
          currentNbOperations += countDistinctRowKeys(edit);
          currentNbEntries++;
        } else {
          this.metrics.logEditsFilteredRate.inc(1);
View Full Code Here

    } else if (type.getCode() == KeyValue.Type.DeleteFamily.getCode()) {
        kv = new KeyValue(rowBytes, fam, null,
            now, KeyValue.Type.DeleteFamily);
    }

    HLogKey key = new HLogKey(table, table, now, now,
        HConstants.DEFAULT_CLUSTER_ID);

    WALEdit edit = new WALEdit();
    edit.add(kv);
View Full Code Here

    for(long i = 1; i < 101; i++) {
      if(i > 1 && i % 20 == 0) {
        hlog.rollWriter();
      }
      LOG.info(i);
      HLogKey key = new HLogKey(hri.getRegionName(), test, seq++,
          System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
      hlog.append(hri, key, edit, htd);
    }

    // Simulate a rapid insert that's followed
    // by a report that's still not totally complete (missing last one)
    LOG.info(baseline + " and " + time);
    baseline += 101;
    time = baseline;
    LOG.info(baseline + " and " + time);

    for (int i = 0; i < 3; i++) {
      HLogKey key = new HLogKey(hri.getRegionName(), test, seq++,
          System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
      hlog.append(hri, key, edit, htd);
    }

    assertEquals(6, manager.getHLogs().get(slaveId).size());

    hlog.rollWriter();

    manager.logPositionAndCleanOldLogs(manager.getSources().get(0).getCurrentPath(),
        "1", 0, false);

    HLogKey key = new HLogKey(hri.getRegionName(), test, seq++,
        System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
    hlog.append(hri, key, edit, htd);

    assertEquals(1, manager.getHLogs().size());
View Full Code Here

      long time = System.nanoTime();
      WALEdit edit = new WALEdit();
      edit.add(new KeyValue(row, family, Bytes.toBytes(i),
          time, KeyValue.Type.Put, Bytes.toBytes(i)));
      writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
          i, time, HConstants.DEFAULT_CLUSTER_ID), edit));

      writer.close();
    }
    MonitoredTask status = TaskMonitor.get().createStatus(method);
View Full Code Here

      long time = System.nanoTime();
      WALEdit edit = new WALEdit();
      edit.add(new KeyValue(row, family, Bytes.toBytes(i),
          time, KeyValue.Type.Put, Bytes.toBytes(i)));
      writer.append(new HLog.Entry(new HLogKey(regionName, tableName,
          i, time, HConstants.DEFAULT_CLUSTER_ID), edit));

      writer.close();
    }
    long recoverSeqId = 1030;
View Full Code Here

    while (entry != null) {
      WALEdit edit = entry.getEdit();
      this.metrics.incrLogEditsRead();
      seenEntries++;
      // Remove all KVs that should not be replicated
      HLogKey logKey = entry.getKey();
      // don't replicate if the log entries originated in the peer
      if (!logKey.getClusterId().equals(peerClusterId)) {
        removeNonReplicableEdits(entry);
        // Don't replicate catalog entries, if the WALEdit wasn't
        // containing anything to replicate and if we're currently not set to replicate
        if (!(logKey.getTablename().equals(TableName.ROOT_TABLE_NAME) ||
            logKey.getTablename().equals(TableName.META_TABLE_NAME)) &&
            edit.size() != 0) {
          // Only set the clusterId if is a local key.
          // This ensures that the originator sets the cluster id
          // and all replicas retain the initial cluster id.
          // This is *only* place where a cluster id other than the default is set.
          if (HConstants.DEFAULT_CLUSTER_ID == logKey.getClusterId()) {
            logKey.setClusterId(this.clusterId);
          }
          currentNbOperations += countDistinctRowKeys(edit);
          currentNbEntries++;
          currentSize += entry.getEdit().size();
        } else {
View Full Code Here

            this.conf.getInt("hbase.master.assignment.timeoutmonitor.timeout",
                180000) / 2);
        long lastReport = EnvironmentEdgeManager.currentTimeMillis();

        while ((entry = reader.next()) != null) {
          HLogKey key = entry.getKey();
          WALEdit val = entry.getEdit();

          if (reporter != null) {
            intervalEdits += val.size();
            if (intervalEdits >= interval) {
              // Number of edits interval reached
              intervalEdits = 0;
              long cur = EnvironmentEdgeManager.currentTimeMillis();
              if (lastReport + period <= cur) {
                status.setStatus("Replaying edits..." +
                    " skipped=" + skippedEdits +
                    " edits=" + editsCount);
                // Timeout reached
                if(!reporter.progress()) {
                  msg = "Progressable reporter failed, stopping replay";
                  LOG.warn(msg);
                  status.abort(msg);
                  throw new IOException(msg);
                }
                reported_once = true;
                lastReport = cur;
              }
            }
          }

          // Start coprocessor replay here. The coprocessor is for each WALEdit
          // instead of a KeyValue.
          if (coprocessorHost != null) {
            status.setStatus("Running pre-WAL-restore hook in coprocessors");
            if (coprocessorHost.preWALRestore(this.getRegionInfo(), key, val)) {
              // if bypass this log entry, ignore it ...
              continue;
            }
          }

          if (firstSeqIdInLog == -1) {
            firstSeqIdInLog = key.getLogSeqNum();
          }
          boolean flush = false;
          for (KeyValue kv: val.getKeyValues()) {
            // Check this edit is for me. Also, guard against writing the special
            // METACOLUMN info such as HBASE::CACHEFLUSH entries
            if (kv.matchingFamily(WALEdit.METAFAMILY) ||
                !Bytes.equals(key.getEncodedRegionName(),
                  this.getRegionInfo().getEncodedNameAsBytes())) {
              //this is a special edit, we should handle it
              CompactionDescriptor compaction = WALEdit.getCompaction(kv);
              if (compaction != null) {
                //replay the compaction
                completeCompactionMarker(compaction);
              }

              skippedEdits++;
              continue;
            }
            // Figure which store the edit is meant for.
            if (store == null || !kv.matchingFamily(store.getFamily().getName())) {
              store = this.stores.get(kv.getFamily());
            }
            if (store == null) {
              // This should never happen.  Perhaps schema was changed between
              // crash and redeploy?
              LOG.warn("No family for " + kv);
              skippedEdits++;
              continue;
            }
            // Now, figure if we should skip this edit.
            if (key.getLogSeqNum() <= maxSeqIdInStores.get(store.getFamily()
                .getName())) {
              skippedEdits++;
              continue;
            }
            currentEditSeqId = key.getLogSeqNum();
            // Once we are over the limit, restoreEdit will keep returning true to
            // flush -- but don't flush until we've played all the kvs that make up
            // the WALEdit.
            flush = restoreEdit(store, kv);
            editsCount++;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hbase.regionserver.wal.HLogKey

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.