Package org.apache.cassandra.db.compaction

Examples of org.apache.cassandra.db.compaction.PrecompactedRow


        IPartitioner part = validator.tree.partitioner();
        Token mid = part.midpoint(local_range.left, local_range.right);
        validator.prepare(store);

        // add a row
        validator.add(new PrecompactedRow(new DecoratedKey(mid, ByteBufferUtil.bytes("inconceivable!")),
                                          ColumnFamily.create(Schema.instance.getCFMetaData(tablename, cfname))));
        validator.completeTree();

        // confirm that the tree was validated
        assert null != validator.tree.hash(local_range);
View Full Code Here


                    if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit())
                    {
                        // need to update row cache
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);
                        // row append does not update the max timestamp on its own
                        writer.updateMaxTimestamp(row.maxTimestamp());

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.updateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
View Full Code Here

                        // need to update row cache
                        if (controller == null)
                            controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);
                        // row append does not update the max timestamp on its own
                        writer.updateMaxTimestamp(row.maxTimestamp());

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.updateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
View Full Code Here

                    if (cfs.containsCachedRow(key) && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit())
                    {
                        // need to update row cache
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, localFile.getFilename(), key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.maybeUpdateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
View Full Code Here

                        // need to update row cache
                        if (controller == null)
                            controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MIN_VALUE, true);
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);
                        // row append does not update the max timestamp on its own
                        writer.updateMaxTimestamp(row.maxTimestamp());

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.updateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
View Full Code Here

        IPartitioner part = validator.tree.partitioner();
        Token mid = part.midpoint(local_range.left, local_range.right);
        validator.prepare(store);

        // add a row
        validator.add(new PrecompactedRow(new DecoratedKey(mid, ByteBufferUtil.bytes("inconceivable!")),
                                          ColumnFamily.create(Schema.instance.getCFMetaData(tablename, cfname))));
        validator.completeTree();

        // confirm that the tree was validated
        assert validator.tree.hash(local_range) != null;
View Full Code Here

        IPartitioner part = validator.tree.partitioner();
        Token mid = part.midpoint(local_range.left, local_range.right);
        validator.prepare(store);

        // add a row
        validator.add(new PrecompactedRow(new DecoratedKey(mid, ByteBufferUtil.bytes("inconceivable!")), null));
        validator.completeTree();

        // confirm that the tree was validated
        assert null != validator.tree.hash(local_range);
    }
View Full Code Here

                    {
                        // need to update row cache
                        if (controller == null)
                            controller = new CompactionController(cfs, Collections.<SSTableReader>emptyList(), Integer.MAX_VALUE, true);
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, true);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        writer.append(row);
                        // row append does not update the max timestamp on its own
                        writer.updateMaxTimestamp(row.maxTimestamp());

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.updateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
View Full Code Here

                    if (cfs.containsCachedRow(key) && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit())
                    {
                        // need to update row cache
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, localFile.getFilename(), key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.maybeUpdateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
View Full Code Here

        IPartitioner part = validator.tree.partitioner();
        Token mid = part.midpoint(local_range.left, local_range.right);
        validator.prepare(store);

        // add a row
        validator.add(new PrecompactedRow(new DecoratedKey(mid, ByteBufferUtil.bytes("inconceivable!")),
                                          ColumnFamily.create(Schema.instance.getCFMetaData(tablename, cfname))));
        validator.completeTree();

        // confirm that the tree was validated
        assert validator.tree.hash(local_range) != null;
View Full Code Here

TOP

Related Classes of org.apache.cassandra.db.compaction.PrecompactedRow

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.