Package org.apache.cassandra.db.commitlog

Examples of org.apache.cassandra.db.commitlog.ReplayPosition


                logger.debug("memtable is already frozen; another thread must be flushing it");
                return null;
            }

            assert getMemtableThreadSafe() == oldMemtable;
            final ReplayPosition ctx = writeCommitLog ? CommitLog.instance.getContext() : ReplayPosition.NONE;
            logger.debug("flush position is {}", ctx);

            // submit the memtable for any indexed sub-cfses, and our own.
            final List<ColumnFamilyStore> icc = new ArrayList<ColumnFamilyStore>();
            // don't assume that this.memtable is dirty; forceFlush can bring us here during index build even if it is not
View Full Code Here


        // "waitForActiveFlushes" after the new segment has been created.
        logger.debug("truncating {}", columnFamily);
        // flush the CF being truncated before forcing the new segment
        forceBlockingFlush();
        CommitLog.instance.forceNewSegment();
        ReplayPosition position = CommitLog.instance.getContext();
        // now flush everyone else.  re-flushing ourselves is not necessary, but harmless
        for (ColumnFamilyStore cfs : ColumnFamilyStore.all())
            cfs.forceFlush();
        waitForActiveFlushes();
        // if everything was clean, flush won't have called discard
View Full Code Here

                                 sstableMetadataCollector);
    }

    public SSTableWriter createCompactionWriter(long estimatedRows, String location, Collection<SSTableReader> sstables) throws IOException
    {
        ReplayPosition rp = ReplayPosition.getReplayPosition(sstables);
        SSTableMetadata.Collector sstableMetadataCollector = SSTableMetadata.createCollector().replayPosition(rp);

        // get the max timestamp of the precompacted sstables
        for (SSTableReader sstable : sstables)
            sstableMetadataCollector.updateMaxTimestamp(sstable.getMaxTimestamp());
View Full Code Here

            new long[] { 1L, 2L },
            new long[] { 3L, 4L, 5L });
        EstimatedHistogram columnCounts = new EstimatedHistogram(
            new long[] { 6L, 7L },
            new long[] { 8L, 9L, 10L });
        ReplayPosition rp = new ReplayPosition(11L, 12);
        long maxTimestamp = 4162517136L;

        SSTableMetadata.Collector collector = SSTableMetadata.createCollector()
                                                             .estimatedRowSize(rowSizes)
                                                             .estimatedColumnCount(columnCounts)
View Full Code Here

                return null;
            }

            assert getMemtableThreadSafe() == oldMemtable;
            oldMemtable.freeze();
            final ReplayPosition ctx = writeCommitLog ? CommitLog.instance.getContext() : ReplayPosition.NONE;
            logger.debug("flush position is {}", ctx);

            // submit the memtable for any indexed sub-cfses, and our own.
            List<ColumnFamilyStore> icc = new ArrayList<ColumnFamilyStore>();
            // don't assume that this.memtable is dirty; forceFlush can bring us here during index build even if it is not
View Full Code Here

        // for a given column family the memtable is clean, forceFlush will return
        // immediately, even though there could be a memtable being flush at the same
        // time.  So to guarantee that all segments can be cleaned out, we need
        // "waitForActiveFlushes" after the new segment has been created.
        CommitLog.instance.forceNewSegment();
        ReplayPosition position = CommitLog.instance.getContext();
        for (ColumnFamilyStore cfs : ColumnFamilyStore.all())
            cfs.forceFlush();
        waitForActiveFlushes();
        // if everything was clean, flush won't have called discard
        CommitLog.instance.discardCompletedSegments(metadata.cfId, position);
View Full Code Here

                                 sstableMetadataCollector);
    }

    public SSTableWriter createCompactionWriter(long estimatedRows, String location, Collection<SSTableReader> sstables) throws IOException
    {
        ReplayPosition rp = ReplayPosition.getReplayPosition(sstables);
        SSTableMetadata.Collector sstableMetadataCollector = SSTableMetadata.createCollector().replayPosition(rp);

        // get the max timestamp of the precompacted sstables
        for (SSTableReader sstable : sstables)
            sstableMetadataCollector.updateMaxTimestamp(sstable.getMaxTimestamp());
View Full Code Here

                final long truncatedAt = System.currentTimeMillis();
                if (DatabaseDescriptor.isAutoSnapshot())
                    snapshot(Keyspace.getTimestampedSnapshotName(name));

                ReplayPosition replayAfter = discardSSTables(truncatedAt);

                for (SecondaryIndex index : indexManager.getIndexes())
                    index.truncateBlocking(truncatedAt);

                SystemKeyspace.saveTruncationRecord(ColumnFamilyStore.this, truncatedAt, replayAfter);
View Full Code Here

             */
            writeBarrier = keyspace.writeOrder.newBarrier();
            memtables = new ArrayList<>();

            // submit flushes for the memtable for any indexed sub-cfses, and our own
            final ReplayPosition minReplayPosition = CommitLog.instance.getContext();
            for (ColumnFamilyStore cfs : concatWithIndexes())
            {
                // switch all memtables, regardless of their dirty status, setting the barrier
                // so that we can reach a coordinated decision about cleanliness once they
                // are no longer possible to be modified
View Full Code Here

        public StatsMetadata deserialize(Descriptor.Version version, DataInput in) throws IOException
        {
            EstimatedHistogram rowSizes = EstimatedHistogram.serializer.deserialize(in);
            EstimatedHistogram columnCounts = EstimatedHistogram.serializer.deserialize(in);
            ReplayPosition replayPosition = ReplayPosition.serializer.deserialize(in);
            long minTimestamp = in.readLong();
            long maxTimestamp = in.readLong();
            int maxLocalDeletionTime = in.readInt();
            double compressionRatio = in.readDouble();
            StreamingHistogram tombstoneHistogram = StreamingHistogram.serializer.deserialize(in);
View Full Code Here

TOP

Related Classes of org.apache.cassandra.db.commitlog.ReplayPosition

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.