Package org.apache.cassandra.db

Examples of org.apache.cassandra.db.ColumnFamilyStore$PostFlush


    private void scheduleTidy(final boolean closeBf, final boolean closeSummary, final boolean closeFiles, final boolean deleteFiles, final boolean deleteAll)
    {
        if (references.get() != 0)
            throw new IllegalStateException("SSTable is not fully released (" + references.get() + " references)");

        final ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(metadata.cfId);
        final OpOrder.Barrier barrier;
        if (cfs != null)
        {
            barrier = cfs.readOrdering.newBarrier();
            barrier.issue();
View Full Code Here


        List<Future<?>> futures = new ArrayList<>();
        for (Map.Entry<UUID, ColumnFamilyStore> columnFamilyStoreEntry : prs.columnFamilyStores.entrySet())
        {

            Collection<SSTableReader> sstables = new HashSet<>(prs.getAndReferenceSSTables(columnFamilyStoreEntry.getKey()));
            ColumnFamilyStore cfs = columnFamilyStoreEntry.getValue();
            boolean success = false;
            while (!success)
            {
                for (SSTableReader compactingSSTable : cfs.getDataTracker().getCompacting())
                {
                    if (sstables.remove(compactingSSTable))
                        SSTableReader.releaseReferences(Arrays.asList(compactingSSTable));
                }
                success = sstables.isEmpty() || cfs.getDataTracker().markCompacting(sstables);
            }

            futures.add(CompactionManager.instance.submitAntiCompaction(cfs, prs.ranges, sstables, prs.repairedAt));
        }
View Full Code Here

    }

    /** load keyspace (keyspace) definitions, but do not initialize the keyspace instances. */
    public static void loadSchemas()
    {
        ColumnFamilyStore schemaCFS = SystemKeyspace.schemaCFS(SystemKeyspace.SCHEMA_KEYSPACES_CF);

        // if keyspace with definitions is empty try loading the old way
        if (schemaCFS.estimateKeys() == 0)
        {
            logger.info("Couldn't detect any schema definitions in local storage.");
            // peek around the data directories to see if anything is there.
            if (hasExistingNoSystemTables())
                logger.info("Found keyspace data in data directories. Consider using cqlsh to define your schema.");
View Full Code Here

    {
        Set<ColumnFamilyStore> cfsList = new HashSet<>();

        for (SecondaryIndex index: allIndexes)
        {
            ColumnFamilyStore cfs = index.getIndexCfs();
            if (cfs != null)
                cfsList.add(cfs);
        }

        return cfsList;
View Full Code Here

        // make sure the writes have materialized inside of the memtables by waiting for all outstanding writes
        // on the relevant keyspaces to complete
        Set<Keyspace> keyspaces = new HashSet<>();
        for (UUID cfId : last.getDirtyCFIDs())
        {
            ColumnFamilyStore cfs = Schema.instance.getColumnFamilyStoreInstance(cfId);
            if (cfs != null)
                keyspaces.add(cfs.keyspace);
        }
        for (Keyspace keyspace : keyspaces)
            keyspace.writeOrder.awaitNewBarrier();
View Full Code Here

                    segment.markClean(dirtyCFId, segment.getContext());
                }
                else if (!flushes.containsKey(dirtyCFId))
                {
                    String keyspace = pair.left;
                    final ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(dirtyCFId);
                    // can safely call forceFlush here as we will only ever block (briefly) for other attempts to flush,
                    // no deadlock possibility since switchLock removal
                    flushes.put(dirtyCFId, force ? cfs.forceFlush() : cfs.forceFlush(maxReplayPosition));
                }
            }
        }

        return Futures.allAsList(flushes.values());
View Full Code Here

            {
                logger.warn("Schema does not exist for file {}. Skipping.", filename);
                continue;
            }
            // group by keyspace/columnfamily
            ColumnFamilyStore cfs = Keyspace.open(desc.ksname).getColumnFamilyStore(desc.cfname);
            descriptors.put(cfs, cfs.directories.find(filename.trim()));
        }

        List<Future<?>> futures = new ArrayList<>();
        for (ColumnFamilyStore cfs : descriptors.keySet())
View Full Code Here

                throw new IllegalArgumentException(String.format("Unknown keyspace/columnFamily %s.%s",
                                                                 options.keyspace,
                                                                 options.cf));

            Keyspace keyspace = Keyspace.openWithoutSSTables(options.keyspace);
            ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(options.cf);

            OutputHandler handler = new OutputHandler.SystemOutput(false, options.debug);
            Directories.SSTableLister lister = cfs.directories.sstableLister();
            if (options.snapshot != null)
                lister.onlyBackups(true).snapshots(options.snapshot);
View Full Code Here

                System.err.println("ColumnFamily not found: " + keyspaceName + "/" + columnfamily);
                System.exit(1);
            }

            Keyspace keyspace = Keyspace.openWithoutSSTables(keyspaceName);
            ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(columnfamily);
            boolean foundSSTable = false;
            for (Map.Entry<Descriptor, Set<Component>> sstable : cfs.directories.sstableLister().list().entrySet())
            {
                if (sstable.getValue().contains(Component.STATS))
                {
View Full Code Here

    {
        logger.debug("reading file from {}, repairedAt = {}", session.peer, repairedAt);
        long totalSize = totalSize();

        Pair<String, String> kscf = Schema.instance.getCF(cfId);
        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

        SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);
        DataInputStream dis = new DataInputStream(new LZFInputStream(Channels.newInputStream(channel)));
        BytesReadTracker in = new BytesReadTracker(dis);
        try
View Full Code Here

TOP

Related Classes of org.apache.cassandra.db.ColumnFamilyStore$PostFlush

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.