Examples of FlushDescriptor


Examples of org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor

          committedFiles.put(s.getFamily().getName(), null); // for writing stores to WAL
        }

        // write the snapshot start to WAL
        if (wal != null) {
          FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH,
            getRegionInfo(), flushSeqId, committedFiles);
          trxId = HLogUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(),
            desc, sequenceId, false); // no sync. Sync is below where we do not hold the updates lock
        }

        // Prepare flush (take a snapshot)
        for (StoreFlushContext flush : storeFlushCtxs) {
          flush.prepare();
        }
      } catch (IOException ex) {
        if (wal != null) {
          if (trxId > 0) { // check whether we have already written START_FLUSH to WAL
            try {
              FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
                getRegionInfo(), flushSeqId, committedFiles);
              HLogUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(),
                desc, sequenceId, false);
            } catch (Throwable t) {
              LOG.warn("Received unexpected exception trying to write ABORT_FLUSH marker to WAL:" +
                  StringUtils.stringifyException(t));
              // ignore this since we will be aborting the RS with DSE.
            }
          }
          // we have called wal.startCacheFlush(), now we have to abort it
          wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
          throw ex; // let upper layers deal with it.
        }
      } finally {
        this.updatesLock.writeLock().unlock();
      }
      String s = "Finished memstore snapshotting " + this +
        ", syncing WAL and waiting on mvcc, flushsize=" + totalFlushableSize;
      status.setStatus(s);
      if (LOG.isTraceEnabled()) LOG.trace(s);
      // sync unflushed WAL changes
      // see HBASE-8208 for details
      if (wal != null) {
        try {
          wal.sync(); // ensure that flush marker is sync'ed
        } catch (IOException ioe) {
          LOG.warn("Unexpected exception while log.sync(), ignoring. Exception: "
              + StringUtils.stringifyException(ioe));
        }
      }

      // wait for all in-progress transactions to commit to HLog before
      // we can start the flush. This prevents
      // uncommitted transactions from being written into HFiles.
      // We have to block before we start the flush, otherwise keys that
      // were removed via a rollbackMemstore could be written to Hfiles.
      w.setWriteNumber(flushSeqId);
      mvcc.waitForPreviousTransactionsComplete(w);
      // set w to null to prevent mvcc.advanceMemstore from being called again inside finally block
      w = null;
      s = "Flushing stores of " + this;
      status.setStatus(s);
      if (LOG.isTraceEnabled()) LOG.trace(s);
    } finally {
      if (w != null) {
        // in case of failure just mark current w as complete
        mvcc.advanceMemstore(w);
      }
    }

    // Any failure from here on out will be catastrophic requiring server
    // restart so hlog content can be replayed and put back into the memstore.
    // Otherwise, the snapshot content while backed up in the hlog, it will not
    // be part of the current running servers state.
    boolean compactionRequested = false;
    try {
      // A.  Flush memstore to all the HStores.
      // Keep running vector of all store files that includes both old and the
      // just-made new flush store file. The new flushed file is still in the
      // tmp directory.

      for (StoreFlushContext flush : storeFlushCtxs) {
        flush.flushCache(status);
      }

      // Switch snapshot (in memstore) -> new hfile (thus causing
      // all the store scanners to reset/reseek).
      Iterator<Store> it = stores.values().iterator(); // stores.values() and storeFlushCtxs have
      // same order
      for (StoreFlushContext flush : storeFlushCtxs) {
        boolean needsCompaction = flush.commit(status);
        if (needsCompaction) {
          compactionRequested = true;
        }
        committedFiles.put(it.next().getFamily().getName(), flush.getCommittedFiles());
      }
      storeFlushCtxs.clear();

      // Set down the memstore size by amount of flush.
      this.addAndGetGlobalMemstoreSize(-totalFlushableSize);

      if (wal != null) {
        // write flush marker to WAL. If fail, we should throw DroppedSnapshotException
        FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.COMMIT_FLUSH,
          getRegionInfo(), flushSeqId, committedFiles);
        HLogUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(),
          desc, sequenceId, true);
      }
    } catch (Throwable t) {
      // An exception here means that the snapshot was not persisted.
      // The hlog needs to be replayed so its content is restored to memstore.
      // Currently, only a server restart will do this.
      // We used to only catch IOEs but its possible that we'd get other
      // exceptions -- e.g. HBASE-659 was about an NPE -- so now we catch
      // all and sundry.
      if (wal != null) {
        try {
          FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
            getRegionInfo(), flushSeqId, committedFiles);
          HLogUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(),
            desc, sequenceId, false);
        } catch (Throwable ex) {
          LOG.warn("Received unexpected exception trying to write ABORT_FLUSH marker to WAL:" +
View Full Code Here

Examples of org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor

        if (entry == null) {
          break;
        }
        Cell cell = entry.getEdit().getCells().get(0);
        if (WALEdit.isMetaEditFamily(cell)) {
          FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(cell);
          assertNotNull(flushDesc);
          assertArrayEquals(tableName.getName(), flushDesc.getTableName().toByteArray());
          if (flushDesc.getAction() == FlushAction.START_FLUSH) {
            assertTrue(flushDesc.getFlushSequenceNumber() > lastFlushSeqId);
          } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
            assertTrue(flushDesc.getFlushSequenceNumber() == lastFlushSeqId);
          }
          lastFlushSeqId = flushDesc.getFlushSequenceNumber();
          assertArrayEquals(regionName, flushDesc.getEncodedRegionName().toByteArray());
          assertEquals(1, flushDesc.getStoreFlushesCount()); //only one store
          StoreFlushDescriptor storeFlushDesc = flushDesc.getStoreFlushes(0);
          assertArrayEquals(family, storeFlushDesc.getFamilyName().toByteArray());
          assertEquals("family", storeFlushDesc.getStoreHomeDir());
          if (flushDesc.getAction() == FlushAction.START_FLUSH) {
            assertEquals(0, storeFlushDesc.getFlushOutputCount());
          } else {
            assertEquals(1, storeFlushDesc.getFlushOutputCount()); //only one file from flush
            assertTrue(storeFiles.contains(storeFlushDesc.getFlushOutput(0)));
          }
View Full Code Here

Examples of org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor

      List<Cell> cells = ((WALEdit)edit).getCells();
      if (cells.isEmpty()) {
        return false;
      }
      if (WALEdit.isMetaEditFamily(cells.get(0))) {
        FlushDescriptor desc = null;
        try {
          desc = WALEdit.getFlushDescriptor(cells.get(0));
        } catch (IOException e) {
          LOG.warn(e);
          return false;
        }
        if (desc != null) {
          for (FlushAction action : actions) {
            if (desc.getAction() == action) {
              return true;
            }
          }
        }
      }
View Full Code Here

Examples of org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor

          committedFiles.put(s.getFamily().getName(), null); // for writing stores to WAL
        }

        // write the snapshot start to WAL
        if (wal != null) {
          FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.START_FLUSH,
            getRegionInfo(), flushSeqId, committedFiles);
          trxId = HLogUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(),
            desc, sequenceId, false); // no sync. Sync is below where we do not hold the updates lock
        }

        // Prepare flush (take a snapshot)
        for (StoreFlushContext flush : storeFlushCtxs) {
          flush.prepare();
        }
      } catch (IOException ex) {
        if (wal != null) {
          if (trxId > 0) { // check whether we have already written START_FLUSH to WAL
            try {
              FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
                getRegionInfo(), flushSeqId, committedFiles);
              HLogUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(),
                desc, sequenceId, false);
            } catch (Throwable t) {
              LOG.warn("Received unexpected exception trying to write ABORT_FLUSH marker to WAL:" +
                  StringUtils.stringifyException(t));
              // ignore this since we will be aborting the RS with DSE.
            }
          }
          // we have called wal.startCacheFlush(), now we have to abort it
          wal.abortCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
          throw ex; // let upper layers deal with it.
        }
      } finally {
        this.updatesLock.writeLock().unlock();
      }
      String s = "Finished memstore snapshotting " + this +
        ", syncing WAL and waiting on mvcc, flushsize=" + totalFlushableSize;
      status.setStatus(s);
      if (LOG.isTraceEnabled()) LOG.trace(s);
      // sync unflushed WAL changes
      // see HBASE-8208 for details
      if (wal != null) {
        try {
          wal.sync(); // ensure that flush marker is sync'ed
        } catch (IOException ioe) {
          LOG.warn("Unexpected exception while log.sync(), ignoring. Exception: "
              + StringUtils.stringifyException(ioe));
        }
      }

      // wait for all in-progress transactions to commit to HLog before
      // we can start the flush. This prevents
      // uncommitted transactions from being written into HFiles.
      // We have to block before we start the flush, otherwise keys that
      // were removed via a rollbackMemstore could be written to Hfiles.
      w.setWriteNumber(flushSeqId);
      mvcc.waitForPreviousTransactionsComplete(w);
      // set w to null to prevent mvcc.advanceMemstore from being called again inside finally block
      w = null;
      s = "Flushing stores of " + this;
      status.setStatus(s);
      if (LOG.isTraceEnabled()) LOG.trace(s);
    } finally {
      if (w != null) {
        // in case of failure just mark current w as complete
        mvcc.advanceMemstore(w);
      }
    }

    // Any failure from here on out will be catastrophic requiring server
    // restart so hlog content can be replayed and put back into the memstore.
    // Otherwise, the snapshot content while backed up in the hlog, it will not
    // be part of the current running servers state.
    boolean compactionRequested = false;
    try {
      // A.  Flush memstore to all the HStores.
      // Keep running vector of all store files that includes both old and the
      // just-made new flush store file. The new flushed file is still in the
      // tmp directory.

      for (StoreFlushContext flush : storeFlushCtxs) {
        flush.flushCache(status);
      }

      // Switch snapshot (in memstore) -> new hfile (thus causing
      // all the store scanners to reset/reseek).
      Iterator<Store> it = stores.values().iterator(); // stores.values() and storeFlushCtxs have
      // same order
      for (StoreFlushContext flush : storeFlushCtxs) {
        boolean needsCompaction = flush.commit(status);
        if (needsCompaction) {
          compactionRequested = true;
        }
        committedFiles.put(it.next().getFamily().getName(), flush.getCommittedFiles());
      }
      storeFlushCtxs.clear();

      // Set down the memstore size by amount of flush.
      this.addAndGetGlobalMemstoreSize(-totalFlushableSize);

      if (wal != null) {
        // write flush marker to WAL. If fail, we should throw DroppedSnapshotException
        FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.COMMIT_FLUSH,
          getRegionInfo(), flushSeqId, committedFiles);
        HLogUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(),
          desc, sequenceId, true);
      }
    } catch (Throwable t) {
      // An exception here means that the snapshot was not persisted.
      // The hlog needs to be replayed so its content is restored to memstore.
      // Currently, only a server restart will do this.
      // We used to only catch IOEs but its possible that we'd get other
      // exceptions -- e.g. HBASE-659 was about an NPE -- so now we catch
      // all and sundry.
      if (wal != null) {
        try {
          FlushDescriptor desc = ProtobufUtil.toFlushDescriptor(FlushAction.ABORT_FLUSH,
            getRegionInfo(), flushSeqId, committedFiles);
          HLogUtil.writeFlushMarker(wal, this.htableDescriptor, getRegionInfo(),
            desc, sequenceId, false);
        } catch (Throwable ex) {
          LOG.warn("Received unexpected exception trying to write ABORT_FLUSH marker to WAL:" +
View Full Code Here

Examples of org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor

        if (entry == null) {
          break;
        }
        Cell cell = entry.getEdit().getCells().get(0);
        if (WALEdit.isMetaEditFamily(cell)) {
          FlushDescriptor flushDesc = WALEdit.getFlushDescriptor(cell);
          assertNotNull(flushDesc);
          assertArrayEquals(tableName.getName(), flushDesc.getTableName().toByteArray());
          if (flushDesc.getAction() == FlushAction.START_FLUSH) {
            assertTrue(flushDesc.getFlushSequenceNumber() > lastFlushSeqId);
          } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
            assertTrue(flushDesc.getFlushSequenceNumber() == lastFlushSeqId);
          }
          lastFlushSeqId = flushDesc.getFlushSequenceNumber();
          assertArrayEquals(regionName, flushDesc.getEncodedRegionName().toByteArray());
          assertEquals(1, flushDesc.getStoreFlushesCount()); //only one store
          StoreFlushDescriptor storeFlushDesc = flushDesc.getStoreFlushes(0);
          assertArrayEquals(family, storeFlushDesc.getFamilyName().toByteArray());
          assertEquals("family", storeFlushDesc.getStoreHomeDir());
          if (flushDesc.getAction() == FlushAction.START_FLUSH) {
            assertEquals(0, storeFlushDesc.getFlushOutputCount());
          } else {
            assertEquals(1, storeFlushDesc.getFlushOutputCount()); //only one file from flush
            assertTrue(storeFiles.contains(storeFlushDesc.getFlushOutput(0)));
          }
View Full Code Here

Examples of org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor

      List<Cell> cells = ((WALEdit)edit).getCells();
      if (cells.isEmpty()) {
        return false;
      }
      if (WALEdit.isMetaEditFamily(cells.get(0))) {
        FlushDescriptor desc = null;
        try {
          desc = WALEdit.getFlushDescriptor(cells.get(0));
        } catch (IOException e) {
          LOG.warn(e);
          return false;
        }
        if (desc != null) {
          for (FlushAction action : actions) {
            if (desc.getAction() == action) {
              return true;
            }
          }
        }
      }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.