Package org.apache.jackrabbit.oak.plugins.segment.file

Examples of org.apache.jackrabbit.oak.plugins.segment.file.FileStore


        String size = lookup(context, SIZE);
        if (size == null) {
            size = System.getProperty(SIZE, "256");
        }

        store = new FileStore(
                new File(directory),
                Integer.parseInt(size), "64".equals(mode));

        delegate = new SegmentNodeStore(store);
        observerTracker = new ObserverTracker(delegate);
View Full Code Here


            // unable to retrieve the checkpoint; use root state instead
            current = store.getRoot();
        }

        // 2. init filestore
        FileStore backup = new FileStore(destination, MAX_FILE_SIZE, false);
        try {
            Journal journal = backup.getJournal("root");

            SegmentNodeState state = new SegmentNodeState(
                    backup.getWriter().getDummySegment(), journal.getHead());
            SegmentNodeBuilder builder = state.builder();

            String beforeCheckpoint = state.getString("checkpoint");
            if (beforeCheckpoint == null) {
                // 3.1 no stored checkpoint, so do the initial full backup
                builder.setChildNode("root", current);
            } else {
                // 3.2 try to retrieve the previously backed up checkpoint
                NodeState before = store.retrieve(beforeCheckpoint);
                if (before != null) {
                    // the previous checkpoint is no longer available,
                    // so use the backed up state as the basis of the
                    // incremental backup diff
                    before = state.getChildNode("root");
                }
                current.compareAgainstBaseState(
                        before, new ApplyDiff(builder.child("root")));
            }
            builder.setProperty("checkpoint", checkpoint);

            // 4. commit the backup
            journal.setHead(
                    state.getRecordId(), builder.getNodeState().getRecordId());
        } finally {
            backup.close();
        }

        log.debug("Backup done in {} ms.", System.currentTimeMillis() - s);
    }
View Full Code Here

            @Override
            protected Repository[] internalSetUpCluster(int n) throws Exception {
                Repository[] cluster = new Repository[n];
                stores = new FileStore[cluster.length];
                for (int i = 0; i < cluster.length; i++) {
                    stores[i] = new FileStore(
                            new File(base, unique),
                            maxFileSizeMB, cacheSizeMB, memoryMapping);
                    Oak oak = new Oak(new SegmentNodeStore(stores[i]));
                    cluster[i] = new Jcr(oak).createRepository();
                }
View Full Code Here

            if (size == null) {
                size = System.getProperty(SIZE, "268435456"); // 256MB
            }

            mongo = null;
            store = new FileStore(
                    new File(directory),
                    Integer.parseInt(size), "64".equals(mode));
        } else {
            int port = Integer.parseInt(String.valueOf(properties.get(PORT)));
            String db = String.valueOf(properties.get(DB));
View Full Code Here

        Session session = null;
        try {
            File directory =
                    new File("target", "tarmk-" + System.currentTimeMillis());
            this.store = new FileStore(directory, 1024 * 1024, false);
            Jcr jcr = new Jcr(new Oak(new SegmentNodeStore(store)));
            this.repository = jcr.createRepository();

            session = getRepository().login(superuser);
            TestContentLoader loader = new TestContentLoader();
View Full Code Here

            @Override
            public Repository[] setUpCluster(int n) throws Exception {
                Repository[] cluster = new Repository[n];
                stores = new FileStore[cluster.length];
                for (int i = 0; i < cluster.length; i++) {
                    stores[i] = new FileStore(
                            new File(base, unique), maxFileSize, memoryMapping);
                    Oak oak = new Oak(new SegmentNodeStore(stores[i]));
                    cluster[i] = new Jcr(oak).createRepository();
                }
                return cluster;
View Full Code Here

                System.err.println("usage: upgrade <olddir> <newdir>");
                System.exit(1);
            }
        } else if ("backup".equals(command)) {
            if (args.length == 2) {
                FileStore store = new FileStore(new File(args[0]), 256, false);
                FileStoreBackup.backup(
                        new SegmentNodeStore(store), new File(args[1]));
                store.close();
            } else {
                System.err.println("usage: backup <repository> <backup>");
                System.exit(1);
            }
        } else if ("tarmk".equals(command)) {
            if (args.length == 0) {
                System.err.println("usage: tarmk <path> [id...]");
                System.exit(1);
            } else {
                System.out.println("TarMK " + args[0]);
                File file = new File(args[0]);
                FileStore store = new FileStore(file, 256, false);
                try {
                    if (args.length == 1) {
                        Map<UUID, List<UUID>> idmap = Maps.newHashMap();

                        int dataCount = 0;
                        long dataSize = 0;
                        int bulkCount = 0;
                        long bulkSize = 0;
                        for (UUID uuid : store.getSegmentIds()) {
                            if (SegmentIdFactory.isDataSegmentId(uuid)) {
                                Segment segment = store.readSegment(uuid);
                                dataCount++;
                                dataSize += segment.size();
                                idmap.put(uuid, segment.getReferencedIds());
                            } else if (SegmentIdFactory.isBulkSegmentId(uuid)) {
                                bulkCount++;
                                bulkSize += store.readSegment(uuid).size();
                                idmap.put(uuid, Collections.<UUID>emptyList());
                            }
                        }
                        System.out.println("Total size:");
                        System.out.format(
                                "%6dMB in %6d data segments%n",
                                dataSize / (1024 * 1024), dataCount);
                        System.out.format(
                                "%6dMB in %6d bulk segments%n",
                                bulkSize / (1024 * 1024), bulkCount);

                        Set<UUID> garbage = newHashSet(idmap.keySet());
                        Queue<UUID> queue = Queues.newArrayDeque();
                        queue.add(store.getJournal("root").getHead().getSegmentId());
                        while (!queue.isEmpty()) {
                            UUID id = queue.remove();
                            if (garbage.remove(id)) {
                                queue.addAll(idmap.get(id));
                            }
                        }
                        dataCount = 0;
                        dataSize = 0;
                        bulkCount = 0;
                        bulkSize = 0;
                        for (UUID uuid : garbage) {
                            if (SegmentIdFactory.isDataSegmentId(uuid)) {
                                dataCount++;
                                dataSize += store.readSegment(uuid).size();
                            } else if (SegmentIdFactory.isBulkSegmentId(uuid)) {
                                bulkCount++;
                                bulkSize += store.readSegment(uuid).size();
                            }
                        }
                        System.out.println("Available for garbage collection:");
                        System.out.format(
                                "%6dMB in %6d data segments%n",
                                dataSize / (1024 * 1024), dataCount);
                        System.out.format(
                                "%6dMB in %6d bulk segments%n",
                                bulkSize / (1024 * 1024), bulkCount);
                    } else {
                        for (int i = 1; i < args.length; i++) {
                            UUID uuid = UUID.fromString(args[i]);
                            System.out.println(store.readSegment(uuid));
                        }
                    }
                } finally {
                    store.close();
                }
            }
        } else {
            System.err.println("Unknown command: " + command);
            System.exit(1);
View Full Code Here

    private static void upgrade(String olddir, String newdir) throws Exception {
        RepositoryContext source = RepositoryContext.create(
                RepositoryConfig.create(new File(olddir)));
        try {
            FileStore store = new FileStore(new File(newdir), 256, true);
            try {
                NodeStore target = new SegmentNodeStore(store);
                new RepositoryUpgrade(source, target).copy();
            } finally {
                store.close();
            }
        } finally {
            source.getRepository().shutdown();
        }
    }
View Full Code Here

        deleteQuietly(destination);
    }

    @Test
    public void testBackup() throws Exception {
        FileStore source = new FileStore(src, 256, false);

        NodeStore store = new SegmentNodeStore(source);
        init(store);

        // initial content
        FileStoreBackup.backup(store, destination);

        compare(store, destination);

        addTestContent(store);
        FileStoreBackup.backup(store, destination);
        compare(store, destination);

        source.close();
    }
View Full Code Here

        store.merge(builder, EmptyHook.INSTANCE, null);
    }

    private static void compare(NodeStore store, File destination)
            throws IOException {
        FileStore backup = new FileStore(destination, 256, false);
        assertEquals(store.getRoot(), new SegmentNodeStore(backup).getRoot());
        backup.close();
    }
View Full Code Here

TOP

Related Classes of org.apache.jackrabbit.oak.plugins.segment.file.FileStore

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.