Package org.apache.jackrabbit.oak.plugins.segment

Examples of org.apache.jackrabbit.oak.plugins.segment.SegmentWriter


    public synchronized void merge() {
        if (parent != null) {
            NodeState before = new SegmentNodeState(store, base);
            NodeState after = new SegmentNodeState(store, head);

            SegmentWriter writer = new SegmentWriter(store);
            while (!parent.setHead(base, head)) {
                RecordId newBase = parent.getHead();
                NodeBuilder builder =
                        new SegmentNodeState(store, newBase).builder();
                after.compareAgainstBaseState(before, new MergeDiff(builder));
                NodeState state = builder.getNodeState();
                RecordId newHead = writer.writeNode(state).getRecordId();
                writer.flush();

                base = newBase;
                head = newHead;
            }
View Full Code Here


    public void compact() {
        long start = System.nanoTime();
        log.info("TarMK compaction running");

        SegmentWriter writer = new SegmentWriter(this, tracker);
        Compactor compactor = new Compactor(writer);

        SegmentNodeState before = getHead();
        long existing = before.getChildNode(SegmentNodeStore.CHECKPOINTS)
                .getChildNodeCount(Long.MAX_VALUE);
        if (existing > 1) {
            log.warn(
                    "TarMK compaction found {} checkpoints, you might need to run checkpoint cleanup",
                    existing);
        }

        SegmentNodeState after = compactor.compact(EMPTY_NODE, before);
        writer.flush();
        while (!setHead(before, after)) {
            // Some other concurrent changes have been made.
            // Rebase (and compact) those changes on top of the
            // compacted state before retrying to set the head.
            SegmentNodeState head = getHead();
            after = compactor.compact(before, head);
            before = head;
            writer.flush();
        }
        tracker.setCompactionMap(compactor.getCompactionMap());

        // Drop the SegmentWriter caches and flush any existing state
        // in an attempt to prevent new references to old pre-compacted
View Full Code Here

    public void compact() {
        long start = System.nanoTime();
        log.info("TarMK compaction started");

        SegmentWriter writer = new SegmentWriter(this, tracker);
        Compactor compactor = new Compactor(writer);

        SegmentNodeState before = getHead();
        SegmentNodeState after = compactor.compact(EMPTY_NODE, before);
        writer.flush();
        while (!setHead(before, after)) {
            // Some other concurrent changes have been made.
            // Rebase (and compact) those changes on top of the
            // compacted state before retrying to set the head.
            SegmentNodeState head = getHead();
            after = compactor.compact(before, head);
            before = head;
            writer.flush();
        }
        tracker.setCompactionMap(compactor.getCompactionMap());

        // Drop the SegmentWriter caches and flush any existing state
        // in an attempt to prevent new references to old pre-compacted
View Full Code Here

    @Test
    public void testCompaction() throws IOException {
        int largeBinarySize = 10 * 1024 * 1024;

        FileStore store = new FileStore(directory, 1, false);
        SegmentWriter writer = store.getTracker().getWriter();

        SegmentNodeState base = store.getHead();
        SegmentNodeBuilder builder = base.builder();
        byte[] data = new byte[largeBinarySize];
        new Random().nextBytes(data);
        SegmentBlob blob = writer.writeStream(new ByteArrayInputStream(data));
        builder.setProperty("foo", blob);
        builder.getNodeState(); // write the blob reference to the segment
        builder.setProperty("foo", "bar");
        SegmentNodeState head = builder.getNodeState();
        assertTrue(store.setHead(base, head));
        assertEquals("bar", store.getHead().getString("foo"));
        store.close();

        // First simulate the case where during compaction a reference to the
        // older segments is added to a segment that the compactor is writing
        store = new FileStore(directory, 1, false);
        head = store.getHead();
        assertTrue(store.size() > largeBinarySize);
        Compactor compactor = new Compactor(writer);
        SegmentNodeState compacted =
                compactor.compact(EmptyNodeState.EMPTY_NODE, head);
        builder = head.builder();
        builder.setChildNode("old", head); // reference to pre-compacted state
        builder.getNodeState();
        assertTrue(store.setHead(head, compacted));
        store.close();

        // In this case the revision cleanup is unable to reclaim the old data
        store = new FileStore(directory, 1, false);
        assertTrue(store.size() > largeBinarySize);
        store.cleanup();
        assertTrue(store.size() > largeBinarySize);
        store.close();

        // Now we do the same thing, but let the compactor use a different
        // SegmentWriter
        store = new FileStore(directory, 1, false);
        head = store.getHead();
        assertTrue(store.size() > largeBinarySize);
        writer = new SegmentWriter(store, store.getTracker());
        compactor = new Compactor(writer);
        compacted = compactor.compact(EmptyNodeState.EMPTY_NODE, head);
        builder = head.builder();
        builder.setChildNode("old", head); // reference to pre-compacted state
        builder.getNodeState();
        writer.flush();
        assertTrue(store.setHead(head, compacted));
        store.close();

        // Revision cleanup is now able to reclaim the extra space (OAK-1932)
        store = new FileStore(directory, 1, false);
View Full Code Here

    @Test
    public void testCompaction() throws IOException {
        int largeBinarySize = 10 * 1024 * 1024;

        FileStore store = new FileStore(directory, 1, false);
        SegmentWriter writer = store.getTracker().getWriter();

        SegmentNodeState base = store.getHead();
        SegmentNodeBuilder builder = base.builder();
        byte[] data = new byte[largeBinarySize];
        new Random().nextBytes(data);
        SegmentBlob blob = writer.writeStream(new ByteArrayInputStream(data));
        builder.setProperty("foo", blob);
        builder.getNodeState(); // write the blob reference to the segment
        builder.setProperty("foo", "bar");
        SegmentNodeState head = builder.getNodeState();
        assertTrue(store.setHead(base, head));
        assertEquals("bar", store.getHead().getString("foo"));
        store.close();

        // First simulate the case where during compaction a reference to the
        // older segments is added to a segment that the compactor is writing
        store = new FileStore(directory, 1, false);
        head = store.getHead();
        assertTrue(store.size() > largeBinarySize);
        Compactor compactor = new Compactor(writer);
        SegmentNodeState compacted =
                compactor.compact(EmptyNodeState.EMPTY_NODE, head);
        builder = head.builder();
        builder.setChildNode("old", head); // reference to pre-compacted state
        builder.getNodeState();
        assertTrue(store.setHead(head, compacted));
        store.close();

        // In this case the revision cleanup is unable to reclaim the old data
        store = new FileStore(directory, 1, false);
        assertTrue(store.size() > largeBinarySize);
        store.cleanup();
        assertTrue(store.size() > largeBinarySize);
        store.close();

        // Now we do the same thing, but let the compactor use a different
        // SegmentWriter
        store = new FileStore(directory, 1, false);
        head = store.getHead();
        assertTrue(store.size() > largeBinarySize);
        writer = new SegmentWriter(store, store.getTracker());
        compactor = new Compactor(writer);
        compacted = compactor.compact(EmptyNodeState.EMPTY_NODE, head);
        builder = head.builder();
        builder.setChildNode("old", head); // reference to pre-compacted state
        builder.getNodeState();
        writer.flush();
        assertTrue(store.setHead(head, compacted));
        store.close();

        // Revision cleanup is now able to reclaim the extra space (OAK-1932)
        store = new FileStore(directory, 1, false);
View Full Code Here

    @Test  // See OAK-2049
    public void segmentOverflow() throws IOException {
        for (int n = 1; n < 255; n++) {  // 255 = ListRecord.LEVEL_SIZE
            FileStore store = new FileStore(directory, 1, false);
            SegmentWriter writer = store.getTracker().getWriter();
            // writer.length == 32  (from the root node)

            // adding 15 strings with 16516 bytes each
            for (int k = 0; k < 15; k++) {
                // 16516 = (Segment.MEDIUM_LIMIT - 1 + 2 + 3)
                // 1 byte per char, 2 byte to store the length and 3 bytes for the
                // alignment to the integer boundary
                writer.writeString(Strings.repeat("abcdefghijklmno".substring(k, k + 1),
                        Segment.MEDIUM_LIMIT - 1));
            }

            // adding 14280 bytes. 1 byte per char, and 2 bytes to store the length
            RecordId x = writer.writeString(Strings.repeat("x", 14278));
            // writer.length == 262052

            // Adding 765 bytes (255 recordIds)
            // This should cause the current segment to flush
            List<RecordId> list = Collections.nCopies(n, x);
            writer.writeList(list);

            writer.flush();

            // Don't close the store in a finally clause as if a failure happens
            // this will also fail an cover up the earlier exception
            store.close();
        }
View Full Code Here

                    + " is not a valid FileStore directory");
        }

        // 2. init filestore
        FileStore restore = new FileStore(source, MAX_FILE_SIZE, false);
        SegmentWriter writer = restore.getTracker().getWriter();
        try {
            SegmentNodeState state = restore.getHead();
            restore(state.getChildNode("root"), store, writer);
        } finally {
            restore.close();
View Full Code Here

     */
    public void compact() {
        long start = System.nanoTime();
        log.info("TarMK compaction running");

        SegmentWriter writer = new SegmentWriter(this, tracker);
        Compactor compactor = new Compactor(writer);

        SegmentNodeState before = getHead();
        long existing = before.getChildNode(SegmentNodeStore.CHECKPOINTS)
                .getChildNodeCount(Long.MAX_VALUE);
        if (existing > 1) {
            log.warn(
                    "TarMK compaction found {} checkpoints, you might need to run checkpoint cleanup",
                    existing);
        }

        SegmentNodeState after = compactor.compact(EMPTY_NODE, before);
        writer.flush();
        while (!setHead(before, after)) {
            // Some other concurrent changes have been made.
            // Rebase (and compact) those changes on top of the
            // compacted state before retrying to set the head.
            SegmentNodeState head = getHead();
            after = compactor.compact(before, head);
            before = head;
            writer.flush();
        }
        tracker.setCompactionMap(compactor.getCompactionMap());

        // Drop the SegmentWriter caches and flush any existing state
        // in an attempt to prevent new references to old pre-compacted
View Full Code Here

    public MemoryStore(NodeState root) {
        NodeBuilder builder = EMPTY_NODE.builder();
        builder.setChildNode("root", root);

        SegmentWriter writer = tracker.getWriter();
        this.head = writer.writeNode(builder.getNodeState());
        writer.flush();
    }
View Full Code Here

    public MemoryStore(NodeState root) {
        NodeBuilder builder = EMPTY_NODE.builder();
        builder.setChildNode("root", root);

        SegmentWriter writer = tracker.getWriter();
        this.head = writer.writeNode(builder.getNodeState());
        writer.flush();
    }
View Full Code Here

TOP

Related Classes of org.apache.jackrabbit.oak.plugins.segment.SegmentWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.