Package org.apache.cassandra.io.sstable.format

Examples of org.apache.cassandra.io.sstable.format.SSTableWriter


        ArrayBackedSortedColumns cf = ArrayBackedSortedColumns.factory.create(cfmeta);
        cf.addColumn(Util.column("01", "a", 1)); // this must not resurrect
        cf.addColumn(Util.column("a", "a", 3));
        cf.deletionInfo().add(new RangeTombstone(Util.cellname("0"), Util.cellname("b"), 2, (int) (System.currentTimeMillis()/1000)),cfmeta.comparator);

        SSTableWriter writer = SSTableWriter.create(Descriptor.fromFilename(cfs.getTempSSTablePath(dir.getDirectoryForNewSSTables())), 0, 0, 0);


        writer.append(Util.dk("0"), cf);
        writer.append(Util.dk("1"), cf);
        writer.append(Util.dk("3"), cf);

        cfs.addSSTable(writer.closeAndOpenReader());
        writer = SSTableWriter.create(Descriptor.fromFilename(cfs.getTempSSTablePath(dir.getDirectoryForNewSSTables())), 0, 0, 0);

        writer.append(Util.dk("0"), cf);
        writer.append(Util.dk("1"), cf);
        writer.append(Util.dk("2"), cf);
        writer.append(Util.dk("3"), cf);
        cfs.addSSTable(writer.closeAndOpenReader());

        Collection<SSTableReader> toCompact = cfs.getSSTables();
        assert toCompact.size() == 2;

        // Force compaction on first sstables. Since each row is in only one sstable, we will be using EchoedRow.
View Full Code Here


        for (int i = 0; i < count; i++)
            cf.addColumn(Util.column(String.valueOf(i), "a", 1));
        File dir = cfs.directories.getDirectoryForNewSSTables();
        String filename = cfs.getTempSSTablePath(dir);

        SSTableWriter writer = SSTableWriter.create(filename,0,0);

        for (int i = 0; i < count * 5; i++)
            writer.append(StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(i)), cf);
        return writer.closeAndOpenReader();
    }
View Full Code Here

            if (!outOfOrderRows.isEmpty())
            {
                // out of order rows, but no bad rows found - we can keep our repairedAt time
                long repairedAt = badRows > 0 ? ActiveRepairService.UNREPAIRED_SSTABLE : sstable.getSSTableMetadata().repairedAt;
                SSTableWriter inOrderWriter = CompactionManager.createWriter(cfs, destination, expectedBloomFilterSize, repairedAt, sstable);
                for (Row row : outOfOrderRows)
                    inOrderWriter.append(row.key, row.cf);
                newInOrderSstable = inOrderWriter.closeAndOpenReader(sstable.maxDataAge);
                if (!isOffline)
                    cfs.getDataTracker().addSSTables(Collections.singleton(newInOrderSstable));
                outputHandler.warn(String.format("%d out of order rows found while scrubbing %s; Those have been written (in order) to a new sstable (%s)", outOfOrderRows.size(), sstable, newInOrderSstable));
            }
View Full Code Here

        {
            logger.info("Writing {}", Memtable.this.toString());

            SSTableReader ssTable;
            // errors when creating the writer that may leave empty temp files.
            SSTableWriter writer = createFlushWriter(cfs.getTempSSTablePath(sstableDirectory));
            try
            {
                boolean trackContention = logger.isDebugEnabled();
                int heavilyContendedRowCount = 0;
                // (we can't clear out the map as-we-go to free up memory,
                //  since the memtable is being used for queries in the "pending flush" category)
                for (Map.Entry<RowPosition, AtomicBTreeColumns> entry : rows.entrySet())
                {
                    AtomicBTreeColumns cf = entry.getValue();

                    if (cf.isMarkedForDelete() && cf.hasColumns())
                    {
                        // When every node is up, there's no reason to write batchlog data out to sstables
                        // (which in turn incurs cost like compaction) since the BL write + delete cancel each other out,
                        // and BL data is strictly local, so we don't need to preserve tombstones for repair.
                        // If we have a data row + row level tombstone, then writing it is effectively an expensive no-op so we skip it.
                        // See CASSANDRA-4667.
                        if (cfs.name.equals(SystemKeyspace.BATCHLOG_TABLE) && cfs.keyspace.getName().equals(SystemKeyspace.NAME))
                            continue;
                    }

                    if (trackContention && cf.usePessimisticLocking())
                        heavilyContendedRowCount++;

                    if (!cf.isEmpty())
                        writer.append((DecoratedKey)entry.getKey(), cf);
                }

                if (writer.getFilePointer() > 0)
                {
                    writer.isolateReferences();

                    // temp sstables should contain non-repaired data.
                    ssTable = writer.closeAndOpenReader();
                    logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                              ssTable.getFilename(), new File(ssTable.getFilename()).length(), context));
                }
                else
                {
                    writer.abort();
                    ssTable = null;
                    logger.info("Completed flushing; nothing needed to be retained.  Commitlog position was {}",
                                context);
                }

                if (heavilyContendedRowCount > 0)
                    logger.debug(String.format("High update contention in %d/%d partitions of %s ", heavilyContendedRowCount, rows.size(), Memtable.this.toString()));

                return ssTable;
            }
            catch (Throwable e)
            {
                writer.abort();
                throw Throwables.propagate(e);
            }
        }
View Full Code Here

            // schema was dropped during streaming
            throw new IOException("CF " + cfId + " was dropped during streaming");
        }
        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

        SSTableWriter writer = createWriter(cfs, totalSize, repairedAt, format);

        DataInputStream dis = new DataInputStream(new LZFInputStream(Channels.newInputStream(channel)));
        BytesReadTracker in = new BytesReadTracker(dis);
        try
        {
            while (in.getBytesRead() < totalSize)
            {
                writeRow(writer, in, cfs);

                // TODO move this to BytesReadTracker
                session.progress(desc, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
            }
            return writer;
        } catch (Throwable e)
        {
            writer.abort();
            drain(dis, in.getBytesRead());
            if (e instanceof IOException)
                throw (IOException) e;
            else
                throw Throwables.propagate(e);
View Full Code Here

            // schema was dropped during streaming
            throw new IOException("CF " + cfId + " was dropped during streaming");
        }
        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

        SSTableWriter writer = createWriter(cfs, totalSize, repairedAt, format);

        CompressedInputStream cis = new CompressedInputStream(Channels.newInputStream(channel), compressionInfo, inputVersion.hasPostCompressionAdlerChecksums());
        BytesReadTracker in = new BytesReadTracker(new DataInputStream(cis));
        try
        {
            for (Pair<Long, Long> section : sections)
            {
                assert in.getBytesRead() < totalSize;
                int sectionLength = (int) (section.right - section.left);

                // skip to beginning of section inside chunk
                cis.position(section.left);
                in.reset(0);

                while (in.getBytesRead() < sectionLength)
                {
                    writeRow(writer, in, cfs);

                    // when compressed, report total bytes of compressed chunks read since remoteFile.size is the sum of chunks transferred
                    session.progress(desc, ProgressInfo.Direction.IN, cis.getTotalCompressedBytesRead(), totalSize);
                }
            }
            return writer;
        }
        catch (Throwable e)
        {
            writer.abort();
            drain(cis, in.getBytesRead());
            if (e instanceof IOException)
                throw (IOException) e;
            else
                throw Throwables.propagate(e);
View Full Code Here

        JsonParser parser = getParser(jsonFile);

        Object[] data = parser.readValueAs(new TypeReference<Object[]>(){});

        keyCountToImport = (keyCountToImport == null) ? data.length : keyCountToImport;
        SSTableWriter writer = SSTableWriter.create(Descriptor.fromFilename(ssTablePath), keyCountToImport, ActiveRepairService.UNREPAIRED_SSTABLE, 0);

        System.out.printf("Importing %s keys...%n", keyCountToImport);

        // sort by dk representation, but hold onto the hex version
        SortedMap<DecoratedKey,Map<?, ?>> decoratedKeys = new TreeMap<DecoratedKey,Map<?, ?>>();

        for (Object row : data)
        {
            Map<?,?> rowAsMap = (Map<?, ?>)row;
            decoratedKeys.put(partitioner.decorateKey(getKeyValidator(columnFamily).fromString((String) rowAsMap.get("key"))), rowAsMap);
        }

        for (Map.Entry<DecoratedKey, Map<?, ?>> row : decoratedKeys.entrySet())
        {
            if (row.getValue().containsKey("metadata"))
            {
                parseMeta((Map<?, ?>) row.getValue().get("metadata"), columnFamily, null);
            }

            Object columns = row.getValue().get("cells");
            addColumnsToCF((List<?>) columns, columnFamily);


            writer.append(row.getKey(), columnFamily);
            columnFamily.clear();

            importedKeys++;

            long current = System.nanoTime();

            if (TimeUnit.NANOSECONDS.toSeconds(current - start) >= 5) // 5 secs.
            {
                System.out.printf("Currently imported %d keys.%n", importedKeys);
                start = current;
            }

            if (keyCountToImport == importedKeys)
                break;
        }

        writer.closeAndOpenReader();

        return importedKeys;
    }
View Full Code Here

        }

        System.out.printf("Importing %s keys...%n", keyCountToImport);

        parser = getParser(jsonFile); // renewing parser
        SSTableWriter writer = SSTableWriter.create(Descriptor.fromFilename(ssTablePath), keyCountToImport, ActiveRepairService.UNREPAIRED_SSTABLE);

        int lineNumber = 1;
        DecoratedKey prevStoredKey = null;

        parser.nextToken(); // START_ARRAY
        while (parser.nextToken() != null)
        {
            String key = parser.getCurrentName();
            Map<?, ?> row = parser.readValueAs(new TypeReference<Map<?, ?>>(){});
            DecoratedKey currentKey = partitioner.decorateKey(getKeyValidator(columnFamily).fromString((String) row.get("key")));

            if (row.containsKey("metadata"))
                parseMeta((Map<?, ?>) row.get("metadata"), columnFamily, null);

            addColumnsToCF((List<?>) row.get("cells"), columnFamily);

            if (prevStoredKey != null && prevStoredKey.compareTo(currentKey) != -1)
            {
                System.err
                        .printf("Line %d: Key %s is greater than previous, collection is not sorted properly. Aborting import. You might need to delete SSTables manually.%n",
                                lineNumber, key);
                return -1;
            }

            // saving decorated key
            writer.append(currentKey, columnFamily);
            columnFamily.clear();

            prevStoredKey = currentKey;
            importedKeys++;
            lineNumber++;

            long current = System.nanoTime();

            if (TimeUnit.NANOSECONDS.toSeconds(current - start) >= 5) // 5 secs.
            {
                System.out.printf("Currently imported %d keys.%n", importedKeys);
                start = current;
            }

            if (keyCountToImport == importedKeys)
                break;

        }

        writer.closeAndOpenReader();

        return importedKeys;
    }
View Full Code Here

        cfs.truncateBlocking();
        ArrayBackedSortedColumns cf = ArrayBackedSortedColumns.factory.create(cfs.metadata);
        for (int i = 0; i < 1000; i++)
            cf.addColumn(Util.column(String.valueOf(i), "a", 1));
        File dir = cfs.directories.getDirectoryForNewSSTables();
        SSTableWriter writer = getWriter(cfs, dir);

        for (int i = 0; i < 500; i++)
            writer.append(StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(i)), cf);
        SSTableReader s = writer.openEarly(1000);
        //assertFileCounts(dir.list(), 2, 3);
        for (int i = 500; i < 1000; i++)
            writer.append(StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(i)), cf);
        writer.closeAndOpenReader();
        s.markObsolete();
        s.releaseReference();
        Thread.sleep(1000);
        assertFileCounts(dir.list(), 0, 0);
        validateCFS(cfs);
View Full Code Here

        for (int i = 0; i < count / 100; i++)
            cf.addColumn(Util.cellname(i), ByteBuffer.allocate(1000), 1);
        File dir = cfs.directories.getDirectoryForNewSSTables();
        String filename = cfs.getTempSSTablePath(dir);

        SSTableWriter writer = SSTableWriter.create(filename, 0, 0);

        for (int i = 0; i < count * 5; i++)
            writer.append(StorageService.getPartitioner().decorateKey(ByteBufferUtil.bytes(i)), cf);
        return writer.closeAndOpenReader();
    }
View Full Code Here

TOP

Related Classes of org.apache.cassandra.io.sstable.format.SSTableWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.