Package org.apache.cassandra.io.sstable

Examples of org.apache.cassandra.io.sstable.SSTableWriter


        }

        public SSTableWriter createFlushWriter(String filename) throws ExecutionException, InterruptedException
        {
            MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.metadata.comparator).replayPosition(context);
            return new SSTableWriter(filename,
                                     rows.size(),
                                     ActiveRepairService.UNREPAIRED_SSTABLE,
                                     cfs.metadata,
                                     cfs.partitioner,
                                     sstableMetadataCollector);
View Full Code Here


                // a crash could cause data loss.
                cfs.markObsolete(toCompact, compactionType);
                return;
            }

            SSTableWriter writer = createCompactionWriter(sstableDirectory, keysPerSSTable, minRepairedAt);
            writers.add(writer);
            while (iter.hasNext())
            {
                if (ci.isStopRequested())
                    throw new CompactionInterruptedException(ci.getCompactionInfo());

                AbstractCompactedRow row = iter.next();
                RowIndexEntry indexEntry = writer.append(row);
                if (indexEntry == null)
                {
                    controller.invalidateCachedRow(row.key);
                    row.close();
                    continue;
                }

                totalKeysWritten++;

                if (DatabaseDescriptor.getPreheatKeyCache())
                {
                    for (SSTableReader sstable : actuallyCompact)
                    {
                        if (sstable.getCachedPosition(row.key, false) != null)
                        {
                            cachedKeys.put(row.key, indexEntry);
                            break;
                        }
                    }
                }

                if (newSSTableSegmentThresholdReached(writer))
                {
                    // tmp = false because later we want to query it with descriptor from SSTableReader
                    cachedKeyMap.put(writer.descriptor.asTemporary(false), cachedKeys);
                    writer = createCompactionWriter(sstableDirectory, keysPerSSTable, minRepairedAt);
                    writers.add(writer);
                    cachedKeys = new HashMap<>();
                }
            }

            if (writer.getFilePointer() > 0)
            {
                cachedKeyMap.put(writer.descriptor.asTemporary(false), cachedKeys);
            }
            else
            {
                writer.abort();
                writers.remove(writer);
            }

            long maxAge = getMaxDataAge(toCompact);
            for (SSTableWriter completedWriter : writers)
                sstables.add(completedWriter.closeAndOpenReader(maxAge));
        }
        catch (Throwable t)
        {
            for (SSTableWriter writer : writers)
                writer.abort();
            // also remove already completed SSTables
            for (SSTableReader sstable : sstables)
            {
                sstable.markObsolete();
                sstable.releaseReference();
View Full Code Here

        return minRepairedAt;
    }

    private SSTableWriter createCompactionWriter(File sstableDirectory, long keysPerSSTable, long repairedAt)
    {
        return new SSTableWriter(cfs.getTempSSTablePath(sstableDirectory),
                                 keysPerSSTable,
                                 repairedAt,
                                 cfs.metadata,
                                 cfs.partitioner,
                                 new MetadataCollector(toCompact, cfs.metadata.comparator, getLevel()));
View Full Code Here

        JsonParser parser = getParser(jsonFile);

        Object[] data = parser.readValueAs(new TypeReference<Object[]>(){});

        keyCountToImport = (keyCountToImport == null) ? data.length : keyCountToImport;
        SSTableWriter writer = new SSTableWriter(ssTablePath, keyCountToImport, ActiveRepairService.UNREPAIRED_SSTABLE);

        System.out.printf("Importing %s keys...%n", keyCountToImport);

        // sort by dk representation, but hold onto the hex version
        SortedMap<DecoratedKey,Map<?, ?>> decoratedKeys = new TreeMap<DecoratedKey,Map<?, ?>>();

        for (Object row : data)
        {
            Map<?,?> rowAsMap = (Map<?, ?>)row;
            decoratedKeys.put(partitioner.decorateKey(hexToBytes((String)rowAsMap.get("key"))), rowAsMap);
        }

        for (Map.Entry<DecoratedKey, Map<?, ?>> row : decoratedKeys.entrySet())
        {
            if (row.getValue().containsKey("metadata"))
            {
                parseMeta((Map<?, ?>) row.getValue().get("metadata"), columnFamily, null);
            }

            Object columns = row.getValue().get("columns");
            if (columnFamily.getType() == ColumnFamilyType.Super && oldSCFormat)
                addToSuperCF((Map<?, ?>) columns, columnFamily);
            else
                addToStandardCF((List<?>) columns, columnFamily);


            writer.append(row.getKey(), columnFamily);
            columnFamily.clear();

            importedKeys++;

            long current = System.nanoTime();

            if (TimeUnit.NANOSECONDS.toSeconds(current - start) >= 5) // 5 secs.
            {
                System.out.printf("Currently imported %d keys.%n", importedKeys);
                start = current;
            }

            if (keyCountToImport == importedKeys)
                break;
        }

        writer.closeAndOpenReader();

        return importedKeys;
    }
View Full Code Here

        }

        System.out.printf("Importing %s keys...%n", keyCountToImport);

        parser = getParser(jsonFile); // renewing parser
        SSTableWriter writer = new SSTableWriter(ssTablePath, keyCountToImport, ActiveRepairService.UNREPAIRED_SSTABLE);

        int lineNumber = 1;
        DecoratedKey prevStoredKey = null;

        parser.nextToken(); // START_ARRAY
        while (parser.nextToken() != null)
        {
            String key = parser.getCurrentName();
            Map<?, ?> row = parser.readValueAs(new TypeReference<Map<?, ?>>(){});
            DecoratedKey currentKey = partitioner.decorateKey(hexToBytes((String) row.get("key")));

            if (row.containsKey("metadata"))
                parseMeta((Map<?, ?>) row.get("metadata"), columnFamily, null);

            if (columnFamily.getType() == ColumnFamilyType.Super && oldSCFormat)
                addToSuperCF((Map<?, ?>)row.get("columns"), columnFamily);
            else
                addToStandardCF((List<?>)row.get("columns"), columnFamily);

            if (prevStoredKey != null && prevStoredKey.compareTo(currentKey) != -1)
            {
                System.err
                        .printf("Line %d: Key %s is greater than previous, collection is not sorted properly. Aborting import. You might need to delete SSTables manually.%n",
                                lineNumber, key);
                return -1;
            }

            // saving decorated key
            writer.append(currentKey, columnFamily);
            columnFamily.clear();

            prevStoredKey = currentKey;
            importedKeys++;
            lineNumber++;

            long current = System.nanoTime();

            if (TimeUnit.NANOSECONDS.toSeconds(current - start) >= 5) // 5 secs.
            {
                System.out.printf("Currently imported %d keys.%n", importedKeys);
                start = current;
            }

            if (keyCountToImport == importedKeys)
                break;

        }

        writer.closeAndOpenReader();

        return importedKeys;
    }
View Full Code Here

        long totalSize = totalSize();

        Pair<String, String> kscf = Schema.instance.getCF(cfId);
        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

        SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);
        DataInputStream dis = new DataInputStream(new LZFInputStream(Channels.newInputStream(channel)));
        BytesReadTracker in = new BytesReadTracker(dis);
        try
        {
            while (in.getBytesRead() < totalSize)
            {
                writeRow(writer, in, cfs);
                // TODO move this to BytesReadTracker
                session.progress(desc, ProgressInfo.Direction.IN, in.getBytesRead(), totalSize);
            }
            return writer;
        }
        catch (Throwable e)
        {
            writer.abort();
            drain(dis, in.getBytesRead());
            if (e instanceof IOException)
                throw (IOException) e;
            else
                throw Throwables.propagate(e);
View Full Code Here

        Directories.DataDirectory localDir = cfs.directories.getCompactionLocation();
        if (localDir == null)
            throw new IOException("Insufficient disk space to store " + totalSize + " bytes");
        desc = Descriptor.fromFilename(cfs.getTempSSTablePath(cfs.directories.getLocationForDisk(localDir)));

        return new SSTableWriter(desc.filenameFor(Component.DATA), estimatedKeys, repairedAt);
    }
View Full Code Here

        long totalSize = totalSize();

        Pair<String, String> kscf = Schema.instance.getCF(cfId);
        ColumnFamilyStore cfs = Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);

        SSTableWriter writer = createWriter(cfs, totalSize, repairedAt);

        CompressedInputStream cis = new CompressedInputStream(Channels.newInputStream(channel), compressionInfo, inputVersion.hasPostCompressionAdlerChecksums);
        BytesReadTracker in = new BytesReadTracker(new DataInputStream(cis));
        try
        {
            for (Pair<Long, Long> section : sections)
            {
                long length = section.right - section.left;
                // skip to beginning of section inside chunk
                cis.position(section.left);
                in.reset(0);
                while (in.getBytesRead() < length)
                {
                    writeRow(writer, in, cfs);
                    // when compressed, report total bytes of compressed chunks read since remoteFile.size is the sum of chunks transferred
                    session.progress(desc, ProgressInfo.Direction.IN, cis.getTotalCompressedBytesRead(), totalSize);
                }
            }
            return writer;
        }
        catch (Throwable e)
        {
            writer.abort();
            drain(cis, in.getBytesRead());
            if (e instanceof IOException)
                throw (IOException) e;
            else
                throw Throwables.propagate(e);
View Full Code Here

   
    public SSTableWriter getDummyWriter() throws IOException
    {
        File tempSS = tempSSTableFile("Keyspace1", "Standard1");
        ColumnFamily cfamily = ArrayBackedSortedColumns.factory.create("Keyspace1", "Standard1");
        SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2, ActiveRepairService.UNREPAIRED_SSTABLE);

        // Add rowA
        cfamily.addColumn(Util.cellname("colA"), ByteBufferUtil.bytes("valA"), System.currentTimeMillis());
        writer.append(Util.dk("rowA"), cfamily);
        cfamily.clear();
       
        cfamily.addColumn(Util.cellname("colB"), ByteBufferUtil.bytes("valB"), System.currentTimeMillis());
        writer.append(Util.dk("rowB"), cfamily);
        cfamily.clear();
       
       
        return writer;
View Full Code Here

    @Test
    public void testEnumeratekeys() throws IOException
    {
        File tempSS = tempSSTableFile("Keyspace1", "Standard1");
        ColumnFamily cfamily = ArrayBackedSortedColumns.factory.create("Keyspace1", "Standard1");
        SSTableWriter writer = new SSTableWriter(tempSS.getPath(), 2, ActiveRepairService.UNREPAIRED_SSTABLE);

        // Add rowA
        cfamily.addColumn(Util.cellname("colA"), ByteBufferUtil.bytes("valA"), System.currentTimeMillis());
        writer.append(Util.dk("rowA"), cfamily);
        cfamily.clear();

        // Add rowB
        cfamily.addColumn(Util.cellname("colB"), ByteBufferUtil.bytes("valB"), System.currentTimeMillis());
        writer.append(Util.dk("rowB"), cfamily);
        cfamily.clear();

        writer.closeAndOpenReader();

        // Enumerate and verify
        File temp = File.createTempFile("Standard1", ".txt");
        SSTableExport.enumeratekeys(Descriptor.fromFilename(writer.getFilename()), new PrintStream(temp.getPath()));


        try (FileReader file = new FileReader(temp))
        {
            char[] buf = new char[(int) temp.length()];
View Full Code Here

TOP

Related Classes of org.apache.cassandra.io.sstable.SSTableWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.