Package org.apache.jackrabbit.oak.plugins.segment

Examples of org.apache.jackrabbit.oak.plugins.segment.Compactor$CompactDiff


    public void compact() {
        long start = System.nanoTime();
        log.info("TarMK compaction running");

        SegmentWriter writer = new SegmentWriter(this, tracker);
        Compactor compactor = new Compactor(writer);

        SegmentNodeState before = getHead();
        long existing = before.getChildNode(SegmentNodeStore.CHECKPOINTS)
                .getChildNodeCount(Long.MAX_VALUE);
        if (existing > 1) {
            log.warn(
                    "TarMK compaction found {} checkpoints, you might need to run checkpoint cleanup",
                    existing);
        }

        SegmentNodeState after = compactor.compact(EMPTY_NODE, before);
        writer.flush();
        while (!setHead(before, after)) {
            // Some other concurrent changes have been made.
            // Rebase (and compact) those changes on top of the
            // compacted state before retrying to set the head.
            SegmentNodeState head = getHead();
            after = compactor.compact(before, head);
            before = head;
            writer.flush();
        }
        tracker.setCompactionMap(compactor.getCompactionMap());

        // Drop the SegmentWriter caches and flush any existing state
        // in an attempt to prevent new references to old pre-compacted
        // content. TODO: There should be a cleaner way to do this.
        tracker.getWriter().dropCache();
View Full Code Here


        // First simulate the case where during compaction a reference to the
        // older segments is added to a segment that the compactor is writing
        store = new FileStore(directory, 1, false);
        head = store.getHead();
        assertTrue(store.size() > largeBinarySize);
        Compactor compactor = new Compactor(writer);
        SegmentNodeState compacted =
                compactor.compact(EmptyNodeState.EMPTY_NODE, head);
        builder = head.builder();
        builder.setChildNode("old", head); // reference to pre-compacted state
        builder.getNodeState();
        assertTrue(store.setHead(head, compacted));
        store.close();

        // In this case the revision cleanup is unable to reclaim the old data
        store = new FileStore(directory, 1, false);
        assertTrue(store.size() > largeBinarySize);
        store.cleanup();
        assertTrue(store.size() > largeBinarySize);
        store.close();

        // Now we do the same thing, but let the compactor use a different
        // SegmentWriter
        store = new FileStore(directory, 1, false);
        head = store.getHead();
        assertTrue(store.size() > largeBinarySize);
        writer = new SegmentWriter(store, store.getTracker());
        compactor = new Compactor(writer);
        compacted = compactor.compact(EmptyNodeState.EMPTY_NODE, head);
        builder = head.builder();
        builder.setChildNode("old", head); // reference to pre-compacted state
        builder.getNodeState();
        writer.flush();
        assertTrue(store.setHead(head, compacted));
View Full Code Here

    public void compact() {
        long start = System.nanoTime();
        log.info("TarMK compaction started");

        SegmentWriter writer = new SegmentWriter(this, tracker);
        Compactor compactor = new Compactor(writer);

        SegmentNodeState before = getHead();
        SegmentNodeState after = compactor.compact(EMPTY_NODE, before);
        writer.flush();
        while (!setHead(before, after)) {
            // Some other concurrent changes have been made.
            // Rebase (and compact) those changes on top of the
            // compacted state before retrying to set the head.
            SegmentNodeState head = getHead();
            after = compactor.compact(before, head);
            before = head;
            writer.flush();
        }
        tracker.setCompactionMap(compactor.getCompactionMap());

        // Drop the SegmentWriter caches and flush any existing state
        // in an attempt to prevent new references to old pre-compacted
        // content. TODO: There should be a cleaner way to do this.
        tracker.getWriter().dropCache();
View Full Code Here

                    // incremental backup diff
                    before = state.getChildNode("root");
                }
            }

            Compactor compactor = new Compactor(backup.getTracker().getWriter());
            SegmentNodeState after = compactor.compact(before, current);

            // 4. commit the backup
            SegmentNodeBuilder builder = state.builder();
            builder.setProperty("checkpoint", checkpoint);
            builder.setChildNode("root", after);
View Full Code Here

    public void compact() {
        long start = System.nanoTime();
        log.info("TarMK compaction started");

        SegmentWriter writer = new SegmentWriter(this, tracker);
        Compactor compactor = new Compactor(writer);

        SegmentNodeState before = getHead();
        SegmentNodeState after = compactor.compact(EMPTY_NODE, before);
        writer.flush();
        while (!setHead(before, after)) {
            // Some other concurrent changes have been made.
            // Rebase (and compact) those changes on top of the
            // compacted state before retrying to set the head.
            SegmentNodeState head = getHead();
            after = compactor.compact(before, head);
            before = head;
            writer.flush();
        }
        tracker.setCompactionMap(compactor.getCompactionMap());

        // Drop the SegmentWriter caches and flush any existing state
        // in an attempt to prevent new references to old pre-compacted
        // content. TODO: There should be a cleaner way to do this.
        tracker.getWriter().dropCache();
View Full Code Here

    public void cleanDir() throws IOException {
        FileUtils.deleteDirectory(directory);
    }

    private static void cleanup(FileStore fileStore) throws IOException {
        fileStore.getTracker().setCompactionMap(new Compactor(null).getCompactionMap());
        fileStore.getTracker().getWriter().dropCache();

        fileStore.cleanup();
    }
View Full Code Here

        // First simulate the case where during compaction a reference to the
        // older segments is added to a segment that the compactor is writing
        store = new FileStore(directory, 1, false);
        head = store.getHead();
        assertTrue(store.size() > largeBinarySize);
        Compactor compactor = new Compactor(writer);
        SegmentNodeState compacted =
                compactor.compact(EmptyNodeState.EMPTY_NODE, head);
        builder = head.builder();
        builder.setChildNode("old", head); // reference to pre-compacted state
        builder.getNodeState();
        assertTrue(store.setHead(head, compacted));
        store.close();

        // In this case the revision cleanup is unable to reclaim the old data
        store = new FileStore(directory, 1, false);
        assertTrue(store.size() > largeBinarySize);
        store.cleanup();
        assertTrue(store.size() > largeBinarySize);
        store.close();

        // Now we do the same thing, but let the compactor use a different
        // SegmentWriter
        store = new FileStore(directory, 1, false);
        head = store.getHead();
        assertTrue(store.size() > largeBinarySize);
        writer = new SegmentWriter(store, store.getTracker());
        compactor = new Compactor(writer);
        compacted = compactor.compact(EmptyNodeState.EMPTY_NODE, head);
        builder = head.builder();
        builder.setChildNode("old", head); // reference to pre-compacted state
        builder.getNodeState();
        writer.flush();
        assertTrue(store.setHead(head, compacted));
View Full Code Here

    public void compact() {
        long start = System.nanoTime();
        log.info("TarMK compaction running");

        SegmentWriter writer = new SegmentWriter(this, tracker);
        Compactor compactor = new Compactor(writer);

        SegmentNodeState before = getHead();
        long existing = before.getChildNode(SegmentNodeStore.CHECKPOINTS)
                .getChildNodeCount(Long.MAX_VALUE);
        if (existing > 1) {
            log.warn(
                    "TarMK compaction found {} checkpoints, you might need to run checkpoint cleanup",
                    existing);
        }

        SegmentNodeState after = compactor.compact(EMPTY_NODE, before);
        writer.flush();
        while (!setHead(before, after)) {
            // Some other concurrent changes have been made.
            // Rebase (and compact) those changes on top of the
            // compacted state before retrying to set the head.
            SegmentNodeState head = getHead();
            after = compactor.compact(before, head);
            before = head;
            writer.flush();
        }
        tracker.setCompactionMap(compactor.getCompactionMap());

        // Drop the SegmentWriter caches and flush any existing state
        // in an attempt to prevent new references to old pre-compacted
        // content. TODO: There should be a cleaner way to do this.
        tracker.getWriter().dropCache();
View Full Code Here

TOP

Related Classes of org.apache.jackrabbit.oak.plugins.segment.Compactor$CompactDiff

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.