Examples of SSTableReader


Examples of org.apache.cassandra.io.sstable.SSTableReader

        private SSTableReader writeSortedContents(Future<ReplayPosition> context, File dataDirectory) throws ExecutionException, InterruptedException
        {
            logger.info("Writing " + Memtable.this.toString());

            SSTableReader ssTable;
            // errors when creating the writer that may leave empty temp files.
            SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(cfs.directories.getLocationForDisk(dataDirectory)));
            try
            {
                // (we can't clear out the map as-we-go to free up memory,
                //  since the memtable is being used for queries in the "pending flush" category)
                for (Map.Entry<RowPosition, ColumnFamily> entry : columnFamilies.entrySet())
                {
                    ColumnFamily cf = entry.getValue();
                    if (cf.isMarkedForDelete())
                    {
                        // When every node is up, there's no reason to write batchlog data out to sstables
                        // (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
                        // and BL data is strictly local, so we don't need to preserve tombstones for repair.
                        // If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
                        // See CASSANDRA-4667.
                        if (cfs.columnFamily.equals(SystemTable.BATCHLOG_CF) && cfs.table.name.equals(Table.SYSTEM_KS) && !cf.isEmpty())
                            continue;

                        // Pedantically, you could purge column level tombstones that are past GcGRace when writing to the SSTable.
                        // But it can result in unexpected behaviour where deletes never make it to disk,
                        // as they are lost and so cannot override existing column values. So we only remove deleted columns if there
                        // is a CF level tombstone to ensure the delete makes it into an SSTable.
                        ColumnFamilyStore.removeDeletedColumnsOnly(cf, Integer.MIN_VALUE);
                    }
                    writer.append((DecoratedKey)entry.getKey(), cf);
                }

                if (writer.getFilePointer() > 0)
                {
                    ssTable = writer.closeAndOpenReader();
                    logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                              ssTable.getFilename(), new File(ssTable.getFilename()).length(), context.get()));
                }
                else
                {
                    writer.abort();
                    ssTable = null;
View Full Code Here

Examples of org.apache.cassandra.io.sstable.SSTableReader

                        }
                    }
                }
                if (!nni.hasNext() || newSSTableSegmentThresholdReached(writer, position))
                {
                    SSTableReader toIndex = writer.closeAndOpenReader(getMaxDataAge(toCompact));
                    cachedKeyMap.put(toIndex, cachedKeys);
                    sstables.add(toIndex);
                    writer = cfs.createCompactionWriter(keysPerSSTable, compactionFileLocation, toCompact);
                    writers.add(writer);
                    cachedKeys = new HashMap<DecoratedKey, Long>();
                }
            }
        }
        finally
        {
            iter.close();
            if (collector != null)
                collector.finishCompaction(ci);
            for (SSTableWriter writer : writers)
                writer.cleanupIfNecessary();
        }

        cfs.replaceCompactedSSTables(toCompact, sstables);
        // TODO: this doesn't belong here, it should be part of the reader to load when the tracker is wired up
        for (Entry<SSTableReader, Map<DecoratedKey, Long>> ssTableReaderMapEntry : cachedKeyMap.entrySet())
        {
            SSTableReader key = ssTableReaderMapEntry.getKey();
            for (Entry<DecoratedKey, Long> entry : ssTableReaderMapEntry.getValue().entrySet())
               key.cacheKey(entry.getKey(), entry.getValue());
        }

        long dTime = System.currentTimeMillis() - startTime;
        long startsize = SSTable.getTotalBytes(toCompact);
        long endsize = SSTable.getTotalBytes(sstables);
View Full Code Here

Examples of org.apache.cassandra.io.sstable.SSTableReader

            keySize += key.key.remaining();
        long estimatedSize = (long) ((keySize // index entries
                                      + keySize // keys in data file
                                      + currentThroughput.get()) // data
                                     * 1.2); // bloom filter and row index overhead
        SSTableReader ssTable;
        // errors when creating the writer that may leave empty temp files.
        SSTableWriter writer = cfs.createFlushWriter(columnFamilies.size(), estimatedSize, context);
        try
        {
            // (we can't clear out the map as-we-go to free up memory,
            //  since the memtable is being used for queries in the "pending flush" category)
            for (Map.Entry<DecoratedKey, ColumnFamily> entry : columnFamilies.entrySet())
            {
                ColumnFamily cf = entry.getValue();
                if (cf.isMarkedForDelete())
                {
                    // Pedantically, you could purge column level tombstones that are past GcGRace when writing to the SSTable.
                    // But it can result in unexpected behaviour where deletes never make it to disk,
                    // as they are lost and so cannot override existing column values. So we only remove deleted columns if there
                    // is a CF level tombstone to ensure the delete makes it into an SSTable.
                    ColumnFamilyStore.removeDeletedColumnsOnly(cf, Integer.MIN_VALUE);
                }
                writer.append(entry.getKey(), cf);
            }

            ssTable = writer.closeAndOpenReader();
        }
        finally
        {
            writer.cleanupIfNecessary();
        }
        logger.info(String.format("Completed flushing %s (%d bytes)",
                                  ssTable.getFilename(), new File(ssTable.getFilename()).length()));
        return ssTable;
    }
View Full Code Here

Examples of org.apache.cassandra.io.sstable.SSTableReader

                cfs.flushLock.lock();
                try
                {
                    if (!cfs.isDropped())
                    {
                        SSTableReader sstable = writeSortedContents(context);
                        cfs.replaceFlushed(Memtable.this, sstable);
                    }
                }
                finally
                {
View Full Code Here

Examples of org.apache.cassandra.io.sstable.SSTableReader

         *
         * Thus, the correct approach is to pick sstables overlapping anything between the first key in all
         * the candidate sstables, and the last.
         */
        Iterator<SSTableReader> iter = candidates.iterator();
        SSTableReader sstable = iter.next();
        Token first = sstable.first.token;
        Token last = sstable.last.token;
        while (iter.hasNext())
        {
            sstable = iter.next();
View Full Code Here

Examples of org.apache.cassandra.io.sstable.SSTableReader

        // for non-L0 compactions, pick up where we left off last time
        Collections.sort(generations[level], SSTable.sstableComparator);
        int start = 0; // handles case where the prior compaction touched the very last range
        for (int i = 0; i < generations[level].size(); i++)
        {
            SSTableReader sstable = generations[level].get(i);
            if (sstable.first.compareTo(lastCompactedKeys[level]) > 0)
            {
                start = i;
                break;
            }
        }

        // look for a non-suspect table to compact with, starting with where we left off last time,
        // and wrapping back to the beginning of the generation if necessary
        int i = start;
        outer:
        while (true)
        {
            SSTableReader sstable = generations[level].get(i);
            Set<SSTableReader> candidates = Sets.union(Collections.singleton(sstable), overlapping(sstable, generations[(level + 1)]));
            for (SSTableReader candidate : candidates)
            {
                if (candidate.isMarkedSuspect())
                {
View Full Code Here

Examples of org.apache.cassandra.io.sstable.SSTableReader

                        }
                    }
                }
                if (!nni.hasNext() || newSSTableSegmentThresholdReached(writer))
                {
                    SSTableReader toIndex = writer.closeAndOpenReader(getMaxDataAge(toCompact));
                    cachedKeyMap.put(toIndex, cachedKeys);
                    sstables.add(toIndex);
                    if (nni.hasNext())
                    {
                        writer = cfs.createCompactionWriter(keysPerSSTable, compactionFileLocation, toCompact);
                        writers.add(writer);
                        cachedKeys = new HashMap<DecoratedKey, Long>();
                    }
                }
            }
        }
        catch (Exception e)
        {
            for (SSTableWriter writer : writers)
                writer.abort();
            throw FBUtilities.unchecked(e);
        }
        finally
        {
            iter.close();
            if (collector != null)
                collector.finishCompaction(ci);
        }

        cfs.replaceCompactedSSTables(toCompact, sstables, compactionType);
        // TODO: this doesn't belong here, it should be part of the reader to load when the tracker is wired up
        for (Map.Entry<SSTableReader, Map<DecoratedKey, Long>> ssTableReaderMapEntry : cachedKeyMap.entrySet())
        {
            SSTableReader key = ssTableReaderMapEntry.getKey();
            for (Map.Entry<DecoratedKey, Long> entry : ssTableReaderMapEntry.getValue().entrySet())
               key.cacheKey(entry.getKey(), entry.getValue());
        }

        long dTime = System.currentTimeMillis() - startTime;
        long startsize = SSTable.getTotalBytes(toCompact);
        long endsize = SSTable.getTotalBytes(sstables);
View Full Code Here

Examples of org.apache.cassandra.io.sstable.SSTableReader

        }
        long estimatedSize = (long) ((keySize // index entries
                                      + keySize // keys in data file
                                      + currentThroughput.get()) // data
                                     * 1.2); // bloom filter and row index overhead
        SSTableReader ssTable;
        // errors when creating the writer that may leave empty temp files.
        SSTableWriter writer = cfs.createFlushWriter(columnFamilies.size(), estimatedSize, context.get());
        try
        {
            // (we can't clear out the map as-we-go to free up memory,
            //  since the memtable is being used for queries in the "pending flush" category)
            for (Map.Entry<RowPosition, ColumnFamily> entry : columnFamilies.entrySet())
            {
                ColumnFamily cf = entry.getValue();
                if (cf.isMarkedForDelete())
                {
                    // Pedantically, you could purge column level tombstones that are past GcGRace when writing to the SSTable.
                    // But it can result in unexpected behaviour where deletes never make it to disk,
                    // as they are lost and so cannot override existing column values. So we only remove deleted columns if there
                    // is a CF level tombstone to ensure the delete makes it into an SSTable.
                    ColumnFamilyStore.removeDeletedColumnsOnly(cf, Integer.MIN_VALUE);
                }
                writer.append((DecoratedKey)entry.getKey(), cf);
            }

            ssTable = writer.closeAndOpenReader();
        }
        catch (Exception e)
        {
            writer.abort();
            throw FBUtilities.unchecked(e);
        }
        logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                  ssTable.getFilename(), new File(ssTable.getFilename()).length(), context.get()));
        return ssTable;
    }
View Full Code Here

Examples of org.apache.cassandra.io.sstable.SSTableReader

    {
        writer.execute(new WrappedRunnable()
        {
            public void runMayThrow() throws Exception
            {
                SSTableReader sstable = writeSortedContents(context);
                cfs.replaceFlushed(Memtable.this, sstable);
                latch.countDown();
            }
        });
    }
View Full Code Here

Examples of org.apache.cassandra.io.sstable.SSTableReader

            HashMap <ColumnFamilyStore, List<SSTableReader>> cfstores = new HashMap<ColumnFamilyStore, List<SSTableReader>>();
            for (Future<SSTableReader> future : buildFutures)
            {
                try
                {
                    SSTableReader sstable = future.get();
                    assert sstable.getTableName().equals(table);
                    if (sstable == null)
                        continue;
                    ColumnFamilyStore cfs = Table.open(sstable.getTableName()).getColumnFamilyStore(sstable.getColumnFamilyName());
                    cfs.addSSTable(sstable);
                    if (!cfstores.containsKey(cfs))
                        cfstores.put(cfs, new ArrayList<SSTableReader>());
                    cfstores.get(cfs).add(sstable);
                }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.