Examples of ColumnFamily


Examples of org.apache.cassandra.db.ColumnFamily

    }

    public ColumnFamily getColumnFamilyWithColumns() throws IOException
    {
        file.seek(columnPosition - 4); // seek to before column count int
        ColumnFamily cf = columnFamily.cloneMeShallow();
        ColumnFamily.serializer().deserializeColumns(file, cf);
        if (validateColumns)
        {
            try
            {
                cf.validateColumnFields();
            }
            catch (MarshalException e)
            {
                throw new IOException("Error validating row " + key, e);
            }
View Full Code Here

Examples of org.apache.cassandra.db.ColumnFamily

        }
        boolean shouldPurge = major || !cfStore.isKeyInRemainingSSTables(key, sstables);

        if (rows.size() > 1 || shouldPurge || !rows.get(0).sstable.descriptor.isLatestVersion || forceDeserialize)
        {
            ColumnFamily cf = null;
            for (SSTableIdentityIterator row : rows)
            {
                ColumnFamily thisCF;
                try
                {
                    thisCF = row.getColumnFamilyWithColumns();
                }
                catch (IOException e)
                {
                    logger.error("Skipping row " + key + " in " + row.getPath(), e);
                    continue;
                }
                if (cf == null)
                {
                    cf = thisCF;
                }
                else
                {
                    cf.addAll(thisCF);
                }
            }
            ColumnFamily cfPurged = shouldPurge ? ColumnFamilyStore.removeDeleted(cf, gcBefore) : cf;
            if (cfPurged == null)
                return;
            columnCount = ColumnFamily.serializer().serializeWithIndexes(cfPurged, buffer);
        }
        else
View Full Code Here

Examples of org.apache.cassandra.db.ColumnFamily

            }

            // If the raw is cached, we call removeDeleted on it to have/ coherent query returns. However it would look
            // like some deleted columns lived longer than gc_grace + compaction. This can also free up big amount of
            // memory on long running instances
            ColumnFamily cachedRow = cfs.getRawCachedRow(compactedRow.key);
            if (cachedRow != null)
                ColumnFamilyStore.removeDeleted(cachedRow, gcBefore);

            return compactedRow;
        }
View Full Code Here

Examples of org.apache.cassandra.db.ColumnFamily

        if (cf.hasExpiredTombstones(controller.gcBefore))
            shouldPurge = controller.shouldPurge(key);
        // We should only gc tombstone if shouldPurge == true. But otherwise,
        // it is still ok to collect column that shadowed by their (deleted)
        // container, which removeDeleted(cf, Integer.MAX_VALUE) will do
        ColumnFamily compacted = ColumnFamilyStore.removeDeleted(cf, shouldPurge != null && shouldPurge ? controller.gcBefore : Integer.MIN_VALUE);

        if (compacted != null && compacted.metadata().getDefaultValidator().isCommutative())
        {
            if (shouldPurge == null)
                shouldPurge = controller.shouldPurge(key);
            if (shouldPurge)
                CounterColumn.mergeAndRemoveOldShards(key, compacted, controller.gcBefore, controller.mergeShardBefore);
View Full Code Here

Examples of org.apache.cassandra.db.ColumnFamily

    }

    public static ColumnFamily removeDeletedAndOldShards(DecoratedKey key, boolean shouldPurge, CompactionController controller, ColumnFamily cf)
    {
        // See comment in preceding method
        ColumnFamily compacted = ColumnFamilyStore.removeDeleted(cf, shouldPurge ? controller.gcBefore : Integer.MIN_VALUE);
        if (shouldPurge && compacted != null && compacted.metadata().getDefaultValidator().isCommutative())
            CounterColumn.mergeAndRemoveOldShards(key, compacted, controller.gcBefore, controller.mergeShardBefore);
        return compacted;
    }
View Full Code Here

Examples of org.apache.cassandra.db.ColumnFamily

    }

    private static ColumnFamily merge(List<SSTableIdentityIterator> rows)
    {
        assert !rows.isEmpty();
        ColumnFamily cf = null;
        for (SSTableIdentityIterator row : rows)
        {
            ColumnFamily thisCF;
            try
            {
                thisCF = row.getColumnFamilyWithColumns();
            }
            catch (IOException e)
View Full Code Here

Examples of org.apache.cassandra.db.ColumnFamily

            thread.join();
    }

    private ColumnFamily createCF()
    {
        ColumnFamily cf = ColumnFamily.create(tableName, cfName);
        cf.addColumn(column("vijay", "great", 1));
        cf.addColumn(column("awesome", "vijay", 1));
        return cf;
    }
View Full Code Here

Examples of org.apache.cassandra.db.ColumnFamily

   
    @Test
    public void testHeapCache() throws InterruptedException
    {
        ICache<String, ColumnFamily> cache = ConcurrentLinkedHashCache.create(CAPACITY);
        ColumnFamily cf = createCF();
        simpleCase(cf, cache);
        concurrentCase(cf, cache);
    }
View Full Code Here

Examples of org.apache.cassandra.db.ColumnFamily

    @Test
    public void testSerializingCache() throws InterruptedException
    {
        ICache<String, ColumnFamily> cache = new SerializingCache<String, ColumnFamily>(CAPACITY, false, ColumnFamily.serializer());
        ColumnFamily cf = createCF();
        simpleCase(cf, cache);
        concurrentCase(cf, cache);
    }
View Full Code Here

Examples of org.apache.cassandra.db.ColumnFamily

                {
                    in.reset(0);
                    key = SSTableReader.decodeKey(StorageService.getPartitioner(), localFile.desc, ByteBufferUtil.readWithShortLength(in));
                    long dataSize = SSTableReader.readRowSize(in, localFile.desc);

                    ColumnFamily cached = cfs.getRawCachedRow(key);
                    if (cached != null && remoteFile.type == OperationType.AES && dataSize <= DatabaseDescriptor.getInMemoryCompactionLimit())
                    {
                        // need to update row cache
                        // Note: Because we won't just echo the columns, there is no need to use the PRESERVE_SIZE flag, contrarily to what appendFromStream does below
                        SSTableIdentityIterator iter = new SSTableIdentityIterator(cfs.metadata, in, key, 0, dataSize, IColumnSerializer.Flag.FROM_REMOTE);
                        PrecompactedRow row = new PrecompactedRow(controller, Collections.singletonList(iter));
                        // We don't expire anything so the row shouldn't be empty
                        assert !row.isEmpty();
                        writer.append(row);
                        // row append does not update the max timestamp on its own
                        writer.updateMaxTimestamp(row.maxTimestamp());

                        // update cache
                        ColumnFamily cf = row.getFullColumnFamily();
                        cfs.updateRowCache(key, cf);
                    }
                    else
                    {
                        writer.appendFromStream(key, cfs.metadata, dataSize, in);
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.