Package com.sleepycat.je.cleaner

Examples of com.sleepycat.je.cleaner.UtilizationTracker


     * Called within the log write critical section.
     */
    void serialLogInternal(LogItem[] itemArray, LogContext context)
        throws IOException, DatabaseException {

        UtilizationTracker tracker = envImpl.getUtilizationTracker();
        LogItem firstItem = itemArray[0];
        LogItem lastItem = itemArray[itemArray.length - 1];

        for (LogItem item : itemArray) {
            boolean marshallOutsideLatch = (item.buffer != null);
            boolean isFirstItem = (item == firstItem);
            boolean isLastItem = (item == lastItem);

            /*
             * Do obsolete tracking before marshalling a FileSummaryLN into the
             * log buffer so that a FileSummaryLN counts itself.
             * countObsoleteNode must be called before computing the entry
             * size, since it can change the size of a FileSummaryLN entry that
             * we're logging
             */
            LogEntryType entryType = item.entry.getLogType();
            if (!DbLsn.isTransientOrNull(item.oldLsn)) {
                if (context.obsoleteDupsAllowed) {
                    tracker.countObsoleteNodeDupsAllowed
                        (item.oldLsn, entryType, item.oldSize, context.nodeDb);
                } else {
                    tracker.countObsoleteNode
                        (item.oldLsn, entryType, item.oldSize, context.nodeDb);
                }
            }

            /* Count auxOldLsn for same database; no specified size. */
            if (!DbLsn.isTransientOrNull(item.auxOldLsn)) {
                if (context.obsoleteDupsAllowed) {
                    tracker.countObsoleteNodeDupsAllowed
                        (item.auxOldLsn, entryType, 0, context.nodeDb);
                } else {
                    tracker.countObsoleteNode
                        (item.auxOldLsn, entryType, 0, context.nodeDb);
                }
            }

            /*
             * If an entry must be protected within the log write latch for
             * marshalling, take care to also calculate its size in the
             * protected section. Note that we have to get the size *before*
             * marshalling so that the currentLsn and size are correct for
             * utilization tracking.
             */
            int entrySize;
            if (marshallOutsideLatch) {
                entrySize = item.buffer.limit();
                assert item.header != null;
            } else {
                assert item.header == null;
                item.header = new LogEntryHeader
                    (item.entry, item.provisional, item.repContext);
                entrySize = item.header.getSize() + item.header.getItemSize();
            }

            /*
             * Get the next free slot in the log, under the log write latch.
             * Bump the LSN values, which gives us a valid previous pointer,
             * which is part of the log entry header. That's why doing the
             * checksum must be in the log write latch -- we need to bump the
             * LSN first, and bumping the LSN must be done within the log write
             * latch.
             */
            if (isFirstItem && context.forceNewLogFile) {
                fileManager.forceNewLogFile();
            }

            boolean flippedFile = fileManager.bumpLsn(entrySize);
            long currentLsn = DbLsn.NULL_LSN;
            boolean usedTemporaryBuffer = false;
            boolean success = false;
            try {
                currentLsn = fileManager.getLastUsedLsn();

                /*
                 * countNewLogEntry and countObsoleteNodeInexact cannot change
                 * a FileSummaryLN size, so they are safe to call after
                 * getSizeForWrite.
                 */
                if (tracker.countNewLogEntry
                    (currentLsn, entryType, entrySize, context.nodeDb)) {
                    context.wakeupCleaner = true;
                }

                /*
                 * LN deletions are obsolete immediately.  Inexact counting is
                 * used to save resources because the cleaner knows that all
                 * deleted LNs are obsolete.
                 */
                if (item.entry.isDeleted()) {
                    tracker.countObsoleteNodeInexact
                        (currentLsn, entryType, entrySize, context.nodeDb);
                }

                /*
                 * This entry must be marshalled within the log write latch.
                 */
                if (!marshallOutsideLatch) {
                    assert item.buffer == null;
                    item.buffer = marshallIntoBuffer(item.header, item.entry);
                }

                /* Sanity check */
                if (entrySize != item.buffer.limit()) {
                    throw EnvironmentFailureException.unexpectedState
                        ("Logged entry entrySize= " + entrySize +
                         " but marshalledSize=" + item.buffer.limit() +
                         " type=" + entryType + " currentLsn=" +
                         DbLsn.getNoFormatString(currentLsn));
                }

                /*
                 * Ask for a log buffer suitable for holding this new entry.
                 * If the current log buffer is full, or if we flipped into a
                 * new file, write it to disk and get a new, empty log buffer
                 * to use. The returned buffer will be latched for write.
                 */
                LogBuffer useLogBuffer =
                    logBufferPool.getWriteBuffer(entrySize, flippedFile);

                /* Add checksum, prev offset, and VLSN to the entry. */
                item.buffer = item.header.addPostMarshallingInfo
                    (envImpl, item.buffer, fileManager.getPrevEntryOffset(),
                     item.repContext);

                /*
                 * If the LogBufferPool buffer (useBuffer) doesn't have
                 * sufficient space (since they're fixed size), just use the
                 * temporary buffer and throw it away when we're done.  That
                 * way we don't grow the LogBuffers in the pool permanently.
                 * We risk an OOME on this temporary usage, but we'll risk it.
                 * [#12674]
                 */
                useLogBuffer.latchForWrite();
                try {
                    ByteBuffer useBuffer = useLogBuffer.getDataBuffer();
                    if (useBuffer.capacity() - useBuffer.position() <
                        entrySize) {
                        final boolean flushRequired = isLastItem &&
                             context.flushRequired && !context.fsyncRequired;
                        fileManager.writeLogBuffer
                            (new LogBuffer(item.buffer, currentLsn),
                             flushRequired);
                        usedTemporaryBuffer = true;
                        assert useBuffer.position() == 0;
                        nTempBufferWrites.increment();
                    } else {
                        /* Copy marshalled object into write buffer. */
                        useBuffer.put(item.buffer);
                    }
                } finally {
                    useLogBuffer.release();
                }

                success = true;
            } finally {
                if (!success) {

                    /*
                     * The LSN pointer, log buffer position, and corresponding
                     * file position march in lockstep.
                     *
                     * 1. We bump the LSN.
                     * 2. We copy loggable entry into the log buffer.
                     * 3. We may try to write the log buffer.
                     *
                     * If we've failed to put the entry into the log buffer
                     * (2), we need to restore old LSN state so that the log
                     * buffer doesn't have a hole. [SR #12638] If we fail after
                     * (2), we don't need to restore state, because log buffers
                     * will still match file positions.
                     *
                     * This assumes that the last possible activity was the
                     * write of the log buffers.
                     */
                    fileManager.restoreLastPosition();

                    /*
                     * If the entry was not written to the log, it will not be
                     * part of the replication stream, and we should reuse the
                     * vlsn.
                     */
                    if (item.header.getVLSN() != null) {
                        envImpl.decrementVLSN();
                    }
                }
            }

            /*
             * Tell the log buffer pool that we finished the write.  Record the
             * LSN against this logbuffer, and write the buffer to disk if
             * needed.
             */
            if (!usedTemporaryBuffer) {
                logBufferPool.writeCompleted
                    (currentLsn, isLastItem && context.flushRequired,
                     context.fsyncRequired);
            }

            /*
             * If the txn is not null, the first entry is an LN. Update the txn
             * with info about the latest LSN. Note that this has to happen
             * within the log write latch.
             */
            item.entry.postLogWork(item.header, currentLsn);

            item.newLsn = currentLsn;
            context.totalNewSize += entrySize;
        }

        /* Count other obsolete info under the log write latch. */
        if (context.packedObsoleteInfo != null) {
            context.packedObsoleteInfo.countObsoleteInfo
                (tracker, context.nodeDb);
        }
        if (context.obsoleteWriteLockInfo != null) {
            for (WriteLockInfo info : context.obsoleteWriteLockInfo) {
                tracker.countObsoleteNode(info.getAbortLsn(),
                                          null /*type*/,
                                          info.getAbortLogSize(),
                                          info.getAbortDb());
            }
        }
View Full Code Here


    void countObsoleteNodeInternal(long lsn,
                                   LogEntryType type,
                                   int size,
                                   DatabaseImpl nodeDb,
                                   boolean countExact) {
        UtilizationTracker tracker = envImpl.getUtilizationTracker();
        if (countExact) {
            tracker.countObsoleteNode(lsn, type, size, nodeDb);
        } else {
            tracker.countObsoleteNodeInexact(lsn, type, size, nodeDb);
        }
    }
View Full Code Here

    void countObsoleteNodeDupsAllowedInternal(long lsn,
                                              LogEntryType type,
                                              int size,
                                              DatabaseImpl nodeDb) {
        UtilizationTracker tracker = envImpl.getUtilizationTracker();
        tracker.countObsoleteNodeDupsAllowed(lsn, type, size, nodeDb);
    }
View Full Code Here

    void transferToUtilizationTrackerInternal(LocalUtilizationTracker
                                              localTracker)
        throws DatabaseException {

        UtilizationTracker tracker = envImpl.getUtilizationTracker();
        localTracker.transferToUtilizationTracker(tracker);
    }
View Full Code Here

     * @see LogManager#countObsoleteLNs
     */
    public void countObsoleteNode(long lsn, LogEntryType type)
        throws DatabaseException {

        UtilizationTracker tracker = envImpl.getUtilizationTracker();
        logWriteLatch.acquire();
        try {
            countObsoleteNodeInternal(tracker, lsn, type);
        } finally {
            logWriteLatch.release();
View Full Code Here

     * @see LogManager#countObsoleteNodes
     */
    public void countObsoleteNodes(TrackedFileSummary[] summaries)
        throws DatabaseException {

        UtilizationTracker tracker = envImpl.getUtilizationTracker();
        logWriteLatch.acquire();
        try {
            countObsoleteNodesInternal(tracker, summaries);
        } finally {
            logWriteLatch.release();
View Full Code Here

     * @see LogManager#countObsoleteLNs
     */
    public void countObsoleteNode(long lsn, LogEntryType type)
        throws DatabaseException {

        UtilizationTracker tracker = envImpl.getUtilizationTracker();
        synchronized (logWriteLatch) {
            countObsoleteNodeInternal(tracker, lsn, type);
        }
    }
View Full Code Here

     * @see LogManager#countObsoleteNodes
     */
    public void countObsoleteNodes(TrackedFileSummary[] summaries)
        throws DatabaseException {

        UtilizationTracker tracker = envImpl.getUtilizationTracker();
        synchronized (logWriteLatch) {
            countObsoleteNodesInternal(tracker, summaries);
        }
    }
View Full Code Here

            /*
             * Create the file utilization objects before creating the Cleaner
             * or running recovery.
             */
            utilizationTracker = new UtilizationTracker(this);
            utilizationProfile =
    new UtilizationProfile(this, utilizationTracker);

            /*
             * Daemons are always made here, but only started after recovery.
View Full Code Here

             boolean abortKnownDeleted,
                                     LN ln,
             TxnNodeId txnNodeId,
                                     Set countedAbortLsnNodes) {

        UtilizationTracker tracker = env.getUtilizationTracker();

        /*
         * If the LN is marked deleted and its LSN follows the FileSummaryLN
         * for its file, count it as obsolete.
         */
        if (ln.isDeleted()) {
            Long logFileNum = new Long(DbLsn.getFileNumber(logLsn));
            long fileSummaryLsn =
    DbLsn.longToLsn((Long) fileSummaryLsns.get(logFileNum));
            int cmpFsLsnToLogLsn =
    (fileSummaryLsn != DbLsn.NULL_LSN) ?
    DbLsn.compareTo(fileSummaryLsn, logLsn) : -1;
            if (cmpFsLsnToLogLsn < 0) {
                tracker.countObsoleteNode(logLsn, null);
            }
        }

        /* Was the LN found in the tree? */
        if (treeLsn != DbLsn.NULL_LSN) {
            int cmpLogLsnToTreeLsn = DbLsn.compareTo(logLsn, treeLsn);

            /*
             * If the oldLsn and newLsn differ and the newLsn follows the
             * FileSummaryLN for the file of the oldLsn, count the oldLsn as
             * obsolete.
             */
            if (cmpLogLsnToTreeLsn != 0) {
                long newLsn = (cmpLogLsnToTreeLsn < 0) ? treeLsn : logLsn;
                long oldLsn = (cmpLogLsnToTreeLsn > 0) ? treeLsn : logLsn;
                Long oldLsnFile = new Long(DbLsn.getFileNumber(oldLsn));
    long oldFsLsn =
        DbLsn.longToLsn((Long) fileSummaryLsns.get(oldLsnFile));
                int cmpOldFsLsnToNewLsn =
        (oldFsLsn != DbLsn.NULL_LSN) ?
        DbLsn.compareTo(oldFsLsn, newLsn) : -1;
                if (cmpOldFsLsnToNewLsn < 0) {
                    tracker.countObsoleteNode(oldLsn, null);
                }
            }

            /*
             * If the logLsn is equal to or precedes the treeLsn and the entry
             * has an abortLsn that was not previously deleted, consider the
             * set of entries for the given node.  If the logLsn is the first
             * in the set that follows the FileSummaryLN of the abortLsn, count
             * the abortLsn as obsolete.
             */
            if (cmpLogLsnToTreeLsn <= 0 &&
                abortLsn != DbLsn.NULL_LSN &&
                !abortKnownDeleted &&
                !countedAbortLsnNodes.contains(txnNodeId)) {
                /* We have not counted this abortLsn yet. */
                Long abortFileNum = new Long(DbLsn.getFileNumber(abortLsn));
    long abortFsLsn =
        DbLsn.longToLsn((Long) fileSummaryLsns.get(abortFileNum));
                int cmpAbortFsLsnToLogLsn =
        (abortFsLsn != DbLsn.NULL_LSN) ?
        DbLsn.compareTo(abortFsLsn, logLsn) : -1;
                if (cmpAbortFsLsnToLogLsn < 0) {

                    /*
                     * logLsn follows the FileSummaryLN of the abortLsn.  The
                     * abortLsn is only an approximation of the prior LSN, so
                     * use inexact counting.
                     */
                    tracker.countObsoleteNodeInexact(abortLsn, null);

                    /* Don't count this abortLsn (this node) again. */
                    countedAbortLsnNodes.add(txnNodeId);
                }
            }
View Full Code Here

TOP

Related Classes of com.sleepycat.je.cleaner.UtilizationTracker

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.