Package com.sleepycat.je.log

Examples of com.sleepycat.je.log.LogManager


         */
        if (undo.ln.isDeleted()) {
            return;
        }

        LogManager logManager = envImpl.getLogManager();

        if (obsoleteDupsAllowed) {
            logManager.countObsoleteNodeDupsAllowed
                (undoLsn,
                 null, // type
                 undo.ln.getLastLoggedSize(),
                 undo.db);
        } else {
            logManager.countObsoleteNode(undoLsn,
                                         null,  // type
                                         undo.ln.getLastLoggedSize(),
                                         undo.db,
                                         true); // countExact
        }
View Full Code Here


                                               byte[] beginData,
                                               long diffSize)
        throws Exception {

        HashSet<Record> records = new HashSet<Record>();
        LogManager logManager = DbInternal.getEnvironmentImpl
            (cursor.getDatabase().getEnvironment()).getLogManager();

        DatabaseEntry key = new DatabaseEntry(beginKey);
        DatabaseEntry data = new DatabaseEntry(beginData);
        boolean scanToEnd = (diffSize == DATABASE_END ? true : false);
View Full Code Here

             * be present in the log for recovery. It also ensures that all log
             * entries will be flushed to disk and the TxnChain will not have
             * to worry about entries that are in log buffers when constructing
             * the rollback information.
             */
            LogManager logManager = repImpl.getLogManager();
            LogEntry rollbackStart =
                new SingleItemEntry(LogEntryType.LOG_ROLLBACK_START,
                                    new RollbackStart(matchpointVLSN,
                                                      matchpointLsn,
                                                      activeTxns.keySet()));
            long rollbackStartLsn =
                logManager.logForceFlush(rollbackStart,
                                         true, // fsyncRequired,
                                         ReplicationContext.NO_REPLICATE);
            rollbackStatus = RBSTATUS_LOG_RBSTART;

            /*
             * 2. Do rollback in memory. Undo any operations that were logged
             * after the matchpointLsn, and save the LSNs for those log
             * entries.. There should be something to undo, because we checked
             * earlier that there were log entries after the matchpoint.
             */
            List<Long> rollbackLsns = new ArrayList<Long>();
            for (ReplayTxn replayTxn : activeTxns.values()) {
                Collection<Long> txnRollbackLsns =
                    replayTxn.rollback(matchpointLsn);

                /*
                 * Txns that were entirely rolled back should have been removed
                 * from the activeTxns map.
                 */
                assert checkRemoved(replayTxn) :
                    "Should have removed " + replayTxn;

                rollbackLsns.addAll(txnRollbackLsns);
            }
            rollbackStatus = RBSTATUS_MEM_ROLLBACK;
            assert rollbackLsns.size() != 0;

            /*
             * 3 & 4 - Mark the rolled back log entries as invisible. After all
             * are done, fsync the set of files. By waiting, some may have made
             * it out on their own.
             */
            RollbackTracker.makeInvisible(repImpl, rollbackLsns);
            rollbackStatus = RBSTATUS_INVISIBLE;

            /*
             * 5. Log RollbackEnd. Flush it so that we can use it to optimize
             * recoveries later on. If the RollbackEnd exists, we can skip the
             * step of re-making LNs invisible.
             */
            logManager.logForceFlush
                (new SingleItemEntry(LogEntryType.LOG_ROLLBACK_END,
                                     new RollbackEnd(matchpointLsn,
                                                     rollbackStartLsn)),
                 true, // fsyncRequired
                 ReplicationContext.NO_REPLICATE);
View Full Code Here

     */
    void removePerDbMetadata(final Collection<Long> fileNums,
                             final Set<DatabaseId> databases)
        throws DatabaseException {

        final LogManager logManager = env.getLogManager();
        final DbTree dbTree = env.getDbTree();
        /* Only call logMapTreeRoot once for ID and NAME DBs. */
        DatabaseImpl idDatabase = dbTree.getDb(DbTree.ID_DB_ID);
        DatabaseImpl nameDatabase = dbTree.getDb(DbTree.NAME_DB_ID);
        boolean logRoot = false;
        if (logManager.removeDbFileSummaries(idDatabase, fileNums)) {
            logRoot = true;
        }
        if (logManager.removeDbFileSummaries(nameDatabase, fileNums)) {
            logRoot = true;
        }
        if (logRoot) {
            env.logMapTreeRoot();
        }
        /* Use DB ID set if available to avoid full scan of ID DB. */
        if (databases != null) {
            for (DatabaseId dbId : databases) {
                if (!dbId.equals(DbTree.ID_DB_ID) &&
                    !dbId.equals(DbTree.NAME_DB_ID)) {
                    DatabaseImpl db = dbTree.getDb(dbId);
                    try {
                        if (db != null &&
                            logManager.removeDbFileSummaries(db, fileNums)) {
                            dbTree.modifyDbRoot(db);
                        }
                    } finally {
                        dbTree.releaseDb(db);
                    }
                }
            }
        } else {

            /*
             * Use LockType.NONE for traversing the ID DB so that a lock is not
             * held when calling modifyDbRoot, which must release locks to
             * handle deadlocks.
             */
            CursorImpl.traverseDbWithCursor(idDatabase,
                                            LockType.NONE,
                                            true /*allowEviction*/,
                                            new CursorImpl.WithCursor() {
                public boolean withCursor(CursorImpl cursor,
                                          DatabaseEntry key,
                                          DatabaseEntry data)
                    throws DatabaseException {

                    MapLN mapLN = (MapLN) cursor.getCurrentLN(LockType.NONE);
                    if (mapLN != null) {
                        DatabaseImpl db = mapLN.getDatabase();
                        if (logManager.removeDbFileSummaries(db, fileNums)) {

                            /*
                             * Because we're using dirty-read, silently do
                             * nothing if the DB does not exist
                             * (mustExist=false).
 
View Full Code Here

             * get logged last, as all referred-to children must preceed
             * it. Provisional entries guarantee that all three are processed
             * as a unit. Recovery skips provisional entries, so the changed
             * children are only used if the parent makes it out to the log.
             */
            LogManager logManager = env.getLogManager();

            long newSiblingLsn =
                newSibling.optionalLogProvisional(logManager, parent);

            long myNewLsn = optionalLogProvisional(logManager, parent);
View Full Code Here

                                         lnKey,
                                         logAbortLsn,
                                         logAbortKnownDeleted,
                                         logTxn);

            LogManager logManager = env.getLogManager();
            newLsn = logManager.log(logEntry, false, oldLsn);

        } else {

            /*
             * Non duplicate LN, just log the normal way.
View Full Code Here

                                             key,
                                             logAbortLsn,
               logAbortKnownDeleted,
                                             logTxn);

        LogManager logManager = env.getLogManager();
        return logManager.log(logEntry, isProvisional, oldLsn);
    }
View Full Code Here

       * it. Provisional entries guarantee that all three are processed
       * as a unit. Recovery skips provisional entries, so the changed
       * children are only used if the parent makes it out to the log.
       */
      EnvironmentImpl env = databaseImpl.getDbEnvironment();
      LogManager logManager = env.getLogManager();
      INList inMemoryINs = env.getInMemoryINs();

      long newSiblingLsn = newSibling.logProvisional(logManager, parent);
      long myNewLsn = logProvisional(logManager, parent);

View Full Code Here

         * root, IN.deleteEntry will traverse downwards.
         */
        subtreeRoot = search
            (idKey, SearchType.DELETE, -1, null, true /*updateGeneration*/);

        LogManager logManager =
            database.getDbEnvironment().getLogManager();
        if (subtreeRoot == null) {

            /*
             * The root is the top of this subtree. If there are no more
             * entries left in the root, delete the whole tree.  There's a
             * window on the rootLatch between the time that search releases
             * the rootLatch and the acquire below.  Something could insert
             * into the tree.  Use validateSubtreeForDelete to ensure that it's
             * still empty.
             */
            rootLatch.acquire();
            try {
                IN rootIN = (IN) root.fetchTarget(database, null);
               
    DbConfigManager configManager =
        database.getDbEnvironment().getConfigManager();
    boolean purgeRoot = configManager.getBoolean
        (EnvironmentParams.COMPRESSOR_PURGE_ROOT);

    /**
     * We've encountered the last empty subtree of the tree.  In
     * general, there's no reason to delete this last
     * IN->...IN->BIN subtree since we're likely to to add more
     * nodes to this tree again.  Deleting the subtree also adds to
     * the space used by the log since a MapLN needs to be written
     * when the root is nulled, and a MapLN, IN (root), BIN needs
     * to be written when the root is recreated.
     *
     * Consider a queue application which frequently inserts and
     * deletes entries and often times leaves the tree empty, but
     * will insert new records again.
     *
     * An optimization might be to prune the multiple IN path to
     * the last BIN (if it even exists) to just a root IN pointing
     * to the single BIN, but this doesn't feel like it's worth the
     * trouble since the extra depth doesn't matter all that much.
     *
     * If je.compressor.purgeRoot is true, then we null the root.
     */
                if (purgeRoot &&
        (rootIN.getNEntries() <= 1) &&
                    (rootIN.validateSubtreeBeforeDelete(0))) {

                    /*
                     * The tree is empty, clear out the IN list.  Can't just
                     * call clear() because there are IN's from more than one
                     * Database on the list.
                     */
                    root = null;
                    treeEmpty = true;

                    /*
                     * Record the root deletion for recovery. Do this within
                     * the root latch. We need to put this log entry into the
                     * log before another thread comes in and creates a new
                     * rootIN for this database.
                     *
                     * For example,
                     * LSN 1000 IN delete info entry
                     * LSN 1010 new IN, for next set of inserts
                     * LSN 1020 new BIN, for next set of inserts.
                     *
                     * The entry at 1000 is needed so that LSN 1010 will
                     * properly supercede all previous IN entries in the tree.
                     */
                    logManager.log(new INDeleteInfo
                                   (rootIN.getNodeId(),
                                    rootIN.getIdentifierKey(),
                                    database.getId()));

                    /* Count obsolete nodes after logging the delete info. */
                    accountForSubtreeRemoval(inMemoryINs, rootIN, tracker);
                }
            } finally {
                rootLatch.release();
            }

        } else {
      try {
    int index = subtreeRoot.findEntry(idKey, false, false);
    IN subtreeRootIN = (IN) subtreeRoot.fetchTarget(index);
    boolean deleteOk = subtreeRoot.deleteEntry(index, true);
    assert deleteOk;

                /*
                 * Record in the log the nodeid of the highest node in the
                 * subtree that we're deleting. We'll use this later to
                 * navigate to the right place if we need to replay this
                 * delete.
                 */
                logManager.log(new INDeleteInfo
             (subtreeRootIN.getNodeId(),
        subtreeRootIN.getIdentifierKey(),
        database.getId()));

                /* Count obsolete nodes after logging the delete info. */
 
View Full Code Here

  DIN duplicateRoot = null;
  boolean dupCountLNLocked = false;
  DupCountLN dcl = null;
  BasicLocker locker = new BasicLocker(env);
        LogManager logManager =
            database.getDbEnvironment().getLogManager();

  try {
            int index = in.findEntry(dupKey, false, true);
      if (index >= 0) {
    duplicateRoot = (DIN) in.fetchTarget(index);
    duplicateRoot.latch();

    ChildReference dclRef = duplicateRoot.getDupCountLNRef();
    dcl = (DupCountLN)
        dclRef.fetchTarget(database, duplicateRoot);

    /* Read lock the dup count LN. */
    if (locker.nonBlockingReadLock(dcl.getNodeId(), database) ==
                    LockGrantType.DENIED) {
        return false;
    } else {
        dupCountLNLocked = true;
    }

    /*
     * We don't release the latch on 'in' before we search the
     * duplicate tree below because we might be deleting the whole
     * subtree from the IN and we want to keep it latched until we
     * know.
     */
    IN subtreeRoot;
    try {
        subtreeRoot = searchSubTree(duplicateRoot,
            idKey,
            SearchType.DELETE,
            -1,
                                                null,
                                                true /*updateGeneration*/);
    } catch (NodeNotEmptyException NNEE) {

        /*
         * We can't delete the subtree because there are still
         * cursors pointing to the lowest node on it.
         */
        in.releaseLatch();
        throw NNEE;
    }

    if (subtreeRoot == null) {
        /* We're deleting the duplicate root. */
        BIN bin = (BIN) in;
        if (bin.nCursors() == 0) {
      try {

          /*
           * duplicateRoot is not currently latched.  Relatch
           * it and recheck if it still is deletable.
           */
          duplicateRoot.latch();
          if (duplicateRoot.isValidForDelete()) {
        boolean deleteOk =
            bin.deleteEntry(index, true);
        assert deleteOk;

        logManager.log(new INDupDeleteInfo
                 (duplicateRoot.getNodeId(),
            duplicateRoot.getMainTreeKey(),
            duplicateRoot.getDupTreeKey(),
            database.getId()));

                                /*
                                 * Count obsolete nodes after logging the
                                 * delete info.
                                 */
                                accountForSubtreeRemoval
            (inMemoryINs, duplicateRoot, tracker);

        if (bin.getNEntries() == 0) {
            database.getDbEnvironment().
          addToCompressorQueue(bin, null, false);
        }
          }
      } finally {
          duplicateRoot.releaseLatch();
      }
        } else {

      /*
       * Don't delete anything off this IN if there are
       * cursors referring to it.
       */
      ret = false;
        }
        in.releaseLatch();
    } else {
        try {
      /* We're deleting a portion of the duplicate tree. */
      in.releaseLatch();
      int dupIndex =
          subtreeRoot.findEntry(idKey, false, false);
      IN rootIN = (IN) subtreeRoot.fetchTarget(dupIndex);
      boolean deleteOk =
          subtreeRoot.deleteEntry(dupIndex, true);
      assert deleteOk;

      /*
       * Record in the log the nodeid of the highest node in
       * the subtree that we're deleting. We'll use this
       * later to navigate to the right place if we need to
       * replay this delete.
       */
      logManager.log(new INDupDeleteInfo
               (rootIN.getNodeId(),
          rootIN.getMainTreeKey(),
          rootIN.getDupTreeKey(),
          database.getId()));

View Full Code Here

TOP

Related Classes of com.sleepycat.je.log.LogManager

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.