Package org.apache.cassandra.io.sstable

Examples of org.apache.cassandra.io.sstable.SSTableReader$Operator$Equals


         *
         * Thus, the correct approach is to pick sstables overlapping anything between the first key in all
         * the candidate sstables, and the last.
         */
        Iterator<SSTableReader> iter = candidates.iterator();
        SSTableReader sstable = iter.next();
        Token first = sstable.first.token;
        Token last = sstable.last.token;
        while (iter.hasNext())
        {
            sstable = iter.next();
View Full Code Here


        // for non-L0 compactions, pick up where we left off last time
        Collections.sort(generations[level], SSTable.sstableComparator);
        int start = 0; // handles case where the prior compaction touched the very last range
        for (int i = 0; i < generations[level].size(); i++)
        {
            SSTableReader sstable = generations[level].get(i);
            if (sstable.first.compareTo(lastCompactedKeys[level]) > 0)
            {
                start = i;
                break;
            }
        }

        // look for a non-suspect table to compact with, starting with where we left off last time,
        // and wrapping back to the beginning of the generation if necessary
        for (int i = 0; i < generations[level].size(); i++)
        {
            SSTableReader sstable = generations[level].get((start + i) % generations[level].size());
            Set<SSTableReader> candidates = Sets.union(Collections.singleton(sstable), overlapping(sstable, generations[level + 1]));
            if (Iterables.any(candidates, suspectP))
                continue;
            if (Sets.intersection(candidates, compacting).isEmpty())
                return candidates;
View Full Code Here

        protected void runWith(File sstableDirectory) throws Exception
        {
            assert sstableDirectory != null : "Flush task is not bound to any disk";

            SSTableReader sstable = writeSortedContents(context, sstableDirectory);
            cfs.replaceFlushed(Memtable.this, sstable);
            latch.countDown();
        }
View Full Code Here

        private SSTableReader writeSortedContents(Future<ReplayPosition> context, File sstableDirectory)
        throws ExecutionException, InterruptedException
        {
            logger.info("Writing " + Memtable.this.toString());

            SSTableReader ssTable;
            // errors when creating the writer that may leave empty temp files.
            SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(sstableDirectory));
            try
            {
                // (we can't clear out the map as-we-go to free up memory,
                //  since the memtable is being used for queries in the "pending flush" category)
                for (Map.Entry<RowPosition, ColumnFamily> entry : columnFamilies.entrySet())
                {
                    ColumnFamily cf = entry.getValue();
                    if (cf.isMarkedForDelete())
                    {
                        // When every node is up, there's no reason to write batchlog data out to sstables
                        // (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
                        // and BL data is strictly local, so we don't need to preserve tombstones for repair.
                        // If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
                        // See CASSANDRA-4667.
                        if (cfs.columnFamily.equals(SystemTable.BATCHLOG_CF) && cfs.table.name.equals(Table.SYSTEM_KS) && !cf.isEmpty())
                            continue;

                        // Pedantically, you could purge column level tombstones that are past GcGRace when writing to the SSTable.
                        // But it can result in unexpected behaviour where deletes never make it to disk,
                        // as they are lost and so cannot override existing column values. So we only remove deleted columns if there
                        // is a CF level tombstone to ensure the delete makes it into an SSTable.
                        ColumnFamilyStore.removeDeletedColumnsOnly(cf, Integer.MIN_VALUE);
                    }
                    writer.append((DecoratedKey)entry.getKey(), cf);
                }

                if (writer.getFilePointer() > 0)
                {
                    ssTable = writer.closeAndOpenReader();
                    logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                              ssTable.getFilename(), new File(ssTable.getFilename()).length(), context.get()));
                }
                else
                {
                    writer.abort();
                    ssTable = null;
View Full Code Here

        Collections.sort(candidates, SSTable.maxTimestampComparator);

        Iterator<SSTableReader> iterator = candidates.iterator();
        while (iterator.hasNext())
        {
            SSTableReader candidate = iterator.next();
            if (candidate.getMaxTimestamp() >= minTimestamp)
            {
                minTimestamp = Math.min(candidate.getMinTimestamp(), minTimestamp);
                iterator.remove();
            }
            else
            {
               logger.debug("Dropping expired SSTable {} (maxLocalDeletionTime={}, gcBefore={})",
                        candidate, candidate.getSSTableMetadata().maxLocalDeletionTime, gcBefore);
            }
        }
        return new HashSet<SSTableReader>(candidates);
    }
View Full Code Here

            OperationType op;
            int level;
            if (pair == null)
            {
                // if there is no sstable to compact in standard way, try compacting based on droppable tombstone ratio
                SSTableReader sstable = findDroppableSSTable(gcBefore);
                if (sstable == null)
                {
                    logger.debug("No compaction necessary for {}", this);
                    return null;
                }
                sstables = Collections.singleton(sstable);
                op = OperationType.TOMBSTONE_COMPACTION;
                level = sstable.getSSTableLevel();
            }
            else
            {
                op = OperationType.COMPACTION;
                sstables = pair.left;
View Full Code Here

        public Future<Pair<KeyCacheKey, RowIndexEntry>> deserialize(DataInputStream input, ColumnFamilyStore cfs) throws IOException
        {
            ByteBuffer key = ByteBufferUtil.readWithLength(input);
            int generation = input.readInt();
            SSTableReader reader = findDesc(generation, cfs.getSSTables());
            input.readBoolean(); // backwards compatibility for "promoted indexes" boolean
            if (reader == null)
            {
                RowIndexEntry.serializer.skipPromotedIndex(input);
                return null;
View Full Code Here

        protected void runWith(File sstableDirectory) throws Exception
        {
            assert sstableDirectory != null : "Flush task is not bound to any disk";

            SSTableReader sstable = writeSortedContents(context, sstableDirectory);
            cfs.replaceFlushed(Memtable.this, sstable);
            latch.countDown();
        }
View Full Code Here

        private SSTableReader writeSortedContents(Future<ReplayPosition> context, File sstableDirectory)
        throws ExecutionException, InterruptedException
        {
            logger.info("Writing " + Memtable.this.toString());

            SSTableReader ssTable;
            // errors when creating the writer that may leave empty temp files.
            SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(sstableDirectory));
            try
            {
                // (we can't clear out the map as-we-go to free up memory,
                //  since the memtable is being used for queries in the "pending flush" category)
                for (Map.Entry<RowPosition, AtomicSortedColumns> entry : rows.entrySet())
                {
                    ColumnFamily cf = entry.getValue();
                    if (cf.isMarkedForDelete())
                    {
                        // When every node is up, there's no reason to write batchlog data out to sstables
                        // (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
                        // and BL data is strictly local, so we don't need to preserve tombstones for repair.
                        // If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
                        // See CASSANDRA-4667.
                        if (cfs.name.equals(SystemKeyspace.BATCHLOG_CF) && cfs.keyspace.getName().equals(Keyspace.SYSTEM_KS) && !(cf.getColumnCount() == 0))
                            continue;

                        // Pedantically, you could purge column level tombstones that are past GcGRace when writing to the SSTable.
                        // But it can result in unexpected behaviour where deletes never make it to disk,
                        // as they are lost and so cannot override existing column values. So we only remove deleted columns if there
                        // is a CF level tombstone to ensure the delete makes it into an SSTable.
                        ColumnFamilyStore.removeDeletedColumnsOnly(cf, Integer.MIN_VALUE);
                    }
                    writer.append((DecoratedKey)entry.getKey(), cf);
                }

                if (writer.getFilePointer() > 0)
                {
                    ssTable = writer.closeAndOpenReader();
                    logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                              ssTable.getFilename(), new File(ssTable.getFilename()).length(), context.get()));
                }
                else
                {
                    writer.abort();
                    ssTable = null;
View Full Code Here

        public Future<Pair<KeyCacheKey, RowIndexEntry>> deserialize(DataInputStream input, ColumnFamilyStore cfs) throws IOException
        {
            ByteBuffer key = ByteBufferUtil.readWithLength(input);
            int generation = input.readInt();
            SSTableReader reader = findDesc(generation, cfs.getSSTables());
            boolean promotedIndexes = input.readBoolean();
            if (reader == null)
            {
                if (promotedIndexes)
                    RowIndexEntry.serializer.skip(input, Descriptor.Version.CURRENT);
                return null;
            }
            RowIndexEntry entry = promotedIndexes
                                ? RowIndexEntry.serializer.deserialize(input, reader.descriptor.version)
                                : reader.getPosition(reader.partitioner.decorateKey(key), Operator.EQ);
            return Futures.immediateFuture(Pair.create(new KeyCacheKey(reader.descriptor, key), entry));
        }
View Full Code Here

TOP

Related Classes of org.apache.cassandra.io.sstable.SSTableReader$Operator$Equals

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.