Package org.apache.cassandra.utils

Examples of org.apache.cassandra.utils.BytesReadTracker


        SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
        CompactionController controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);

        try
        {
            BytesReadTracker in = new BytesReadTracker(input);

            for (Pair<Long, Long> section : localFile.sections)
            {
                long length = section.right - section.left;
                long bytesRead = 0;
                while (bytesRead < length)
                {
                    in.reset(0);
                    key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
                    long dataSize = SSTableReader.readRowSize(in, localFile.desc);

                    ColumnFamily cached = cfs.getRawCachedRow(key);
                    if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit())
                    {
                        // need to update row cache
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);
                        // row append does not update the max timestamp on its own
                        writer.updateMaxTimestamp(row.maxTimestamp());

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.updateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
                        cfs.invalidateCachedRow(key);
                    }

                    bytesRead += in.getBytesRead();
                    remoteFile.progress += in.getBytesRead();
                }
            }
            return writer.closeAndOpenReader();
        }
        catch (Exception e)
View Full Code Here


        SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
        CompactionController controller = null;

        try
        {
            BytesReadTracker in = new BytesReadTracker(input);

            for (Pair<Long, Long> section : localFile.sections)
            {
                long length = section.right - section.left;
                long bytesRead = 0;
                while (bytesRead < length)
                {
                    in.reset(0);
                    key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
                    long dataSize = SSTableReader.readRowSize(in, localFile.desc);

                    ColumnFamily cached = cfs.getRawCachedRow(key);
                    if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit())
                    {
                        // need to update row cache
                        if (controller == null)
                            controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);
                        // row append does not update the max timestamp on its own
                        writer.updateMaxTimestamp(row.maxTimestamp());

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.updateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
                        cfs.invalidateCachedRow(key);
                    }

                    bytesRead += in.getBytesRead();
                    remoteFile.progress += in.getBytesRead();
                }
            }
            return writer.closeAndOpenReader();
        }
        catch (Exception e)
View Full Code Here

        SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
        CompactionController controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE);

        try
        {
            BytesReadTracker in = new BytesReadTracker(input);
            long totalBytesRead = 0;

            for (Pair<Long, Long> section : localFile.sections)
            {
                long length = section.right - section.left;
                // skip to beginning of section inside chunk
                if (remoteFile.compressionInfo != null)
                    ((CompressedInputStream) underliningStream).position(section.left);
                long bytesRead = 0;
                while (bytesRead < length)
                {
                    in.reset(0);
                    key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
                    long dataSize = SSTableReader.readRowSize(in, localFile.desc);

                    if (cfs.containsCachedRow(key) && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit())
                    {
                        // need to update row cache
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, localFile.getFilename(), key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.maybeUpdateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
                        cfs.invalidateCachedRow(key);
                    }

                    bytesRead += in.getBytesRead();
                    // when compressed, report total bytes of decompressed chunks since remoteFile.size is the sum of chunks transferred
                    remoteFile.progress += remoteFile.compressionInfo != null
                                           ? ((CompressedInputStream) underliningStream).uncompressedBytes()
                                           : in.getBytesRead();
                    totalBytesRead += in.getBytesRead();
                }
            }
            StreamingMetrics.totalIncomingBytes.inc(totalBytesRead);
            metrics.incomingBytes.inc(totalBytesRead);
            return writer.closeAndOpenReader();
View Full Code Here

                                    IColumnSerializer.Flag flag)
    {
        assert !checkData || (sstable != null);
        this.input = input;
        this.filename = filename;
        this.inputWithTracker = new BytesReadTracker(input);
        this.key = key;
        this.dataStart = dataStart;
        this.dataSize = dataSize;
        this.expireBefore = (int)(System.currentTimeMillis() / 1000);
        this.flag = flag;
View Full Code Here

        SSTableWriter writer = new SSTableWriter(localFile.getFilename(), remoteFile.estimatedKeys);
        CompactionController controller = null;

        try
        {
            BytesReadTracker in = new BytesReadTracker(input);

            for (Pair<Long, Long> section : localFile.sections)
            {
                long length = section.right - section.left;
                long bytesRead = 0;
                while (bytesRead < length)
                {
                    in.reset(0);
                    key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
                    long dataSize = SSTableReader.readRowSize(in, localFile.desc);

                    ColumnFamily cached = cfs.getRawCachedRow(key);
                    if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit())
                    {
                        // need to update row cache
                        if (controller == null)
                            controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);
                        // row append does not update the max timestamp on its own
                        writer.updateMaxTimestamp(row.maxTimestamp());

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.updateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
                        cfs.invalidateCachedRow(key);
                    }

                    bytesRead += in.getBytesRead();
                    remoteFile.progress += in.getBytesRead();
                }
            }
            return writer.closeAndOpenReader();
        }
        finally
View Full Code Here

        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

        SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);

        CompressedInputStream cis = new CompressedInputStream(Channels.newInputStream(channel), compressionInfo, inputVersion.hasPostCompressionAdlerChecksums);
        BytesReadTracker in = new BytesReadTracker(new DataInputStream(cis));
        try
        {
            for (Pair<Long, Long> section : sections)
            {
                long length = section.right - section.left;
                // skip to beginning of section inside chunk
                cis.position(section.left);
                in.reset(0);
                while (in.getBytesRead() < length)
                {
                    writeRow(writer, in, cfs);
                    // when compressed, report total bytes of compressed chunks read since remoteFile.size is the sum of chunks transferred
                    session.progress(desc, ProgressInfo.Direction.IN, cis.getTotalCompressedBytesRead(), totalSize);
                }
            }
            return writer;
        }
        catch (Throwable e)
        {
            writer.abort();
            drain(cis, in.getBytesRead());
            if (e instanceof IOException)
                throw (IOException) e;
            else
                throw Throwables.propagate(e);
        }
View Full Code Here

        Pair<String, String> kscf = Schema.instance.getCF(cfId);
        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

        SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);
        DataInputStream dis = new DataInputStream(new LZFInputStream(Channels.newInputStream(channel)));
        BytesReadTracker in = new BytesReadTracker(dis);
        try
        {
            while (in.getBytesRead() < totalSize)
            {
                writeRow(writer, in, cfs);
                // TODO move this to BytesReadTracker
                session.progress(desc, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
            }
            return writer;
        }
        catch (Throwable e)
        {
            writer.abort();
            drain(dis, in.getBytesRead());
            if (e instanceof IOException)
                throw (IOException) e;
            else
                throw Throwables.propagate(e);
        }
View Full Code Here

        Pair<String, String> kscf = Schema.instance.getCF(cfId);
        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

        SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);
        DataInputStream dis = new DataInputStream(new LZFInputStream(Channels.newInputStream(channel)));
        BytesReadTracker in = new BytesReadTracker(dis);
        try
        {
            while (in.getBytesRead() < totalSize)
            {
                writeRow(writer, in, cfs);
                // TODO move this to BytesReadTracker
                session.progress(desc, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
            }
            return writer;
        }
        catch (Throwable e)
        {
            writer.abort();
            drain(dis, in.getBytesRead());
            if (e instanceof IOException)
                throw (IOException) e;
            else
                throw Throwables.propagate(e);
        }
View Full Code Here

        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

        SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);

        CompressedInputStream cis = new CompressedInputStream(Channels.newInputStream(channel), compressionInfo, inputVersion.hasPostCompressionAdlerChecksums);
        BytesReadTracker in = new BytesReadTracker(new DataInputStream(cis));
        try
        {
            for (Pair<Long, Long> section : sections)
            {
                long length = section.right - section.left;
                // skip to beginning of section inside chunk
                cis.position(section.left);
                in.reset(0);
                while (in.getBytesRead() < length)
                {
                    writeRow(writer, in, cfs);
                    // when compressed, report total bytes of compressed chunks read since remoteFile.size is the sum of chunks transferred
                    session.progress(desc, ProgressInfo.Direction.IN, cis.getTotalCompressedBytesRead(), totalSize);
                }
            }
            return writer;
        }
        catch (Throwable e)
        {
            writer.abort();
            drain(cis, in.getBytesRead());
            if (e instanceof IOException)
                throw (IOException) e;
            else
                throw Throwables.propagate(e);
        }
View Full Code Here

        Pair<String, String> kscf = Schema.instance.getCF(cfId);
        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

        SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);
        DataInputStream dis = new DataInputStream(new LZFInputStream(Channels.newInputStream(channel)));
        BytesReadTracker in = new BytesReadTracker(dis);
        try
        {
            while (in.getBytesRead() < totalSize)
            {
                writeRow(writer, in, cfs);
                // TODO move this to BytesReadTracker
                session.progress(desc, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
            }
            return writer;
        }
        catch (Throwable e)
        {
            writer.abort();
            drain(dis, in.getBytesRead());
            if (e instanceof IOException)
                throw (IOException) e;
            else
                throw Throwables.propagate(e);
        }
View Full Code Here

TOP

Related Classes of org.apache.cassandra.utils.BytesReadTracker

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.