Package org.apache.cassandra.io.sstable

Examples of org.apache.cassandra.io.sstable.SSTableWriter$Builder


        }

        System.out.printf("Importing %s keys...%n", keyCountToImport);

        parser = getParser(jsonFile); // renewing parser
        SSTableWriter writer = new SSTableWriter(ssTablePath, keyCountToImport);

        int lineNumber = 1;
        DecoratedKey prevStoredKey = null;

        while (parser.nextToken() != null)
        {
            String key = parser.getCurrentName();

            if (key != null)
            {
                String tokenName = parser.nextToken().name();

                if (tokenName.equals("START_ARRAY"))
                {
                    if (columnFamily.getType() == ColumnFamilyType.Super)
                    {
                        throw new RuntimeException("Can't write Standard columns to the Super Column Family.");
                    }

                    List<?> columns = parser.readValueAs(new TypeReference<List<?>>() {});
                    addToStandardCF(columns, columnFamily);
                }
                else if (tokenName.equals("START_OBJECT"))
                {
                    if (columnFamily.getType() == ColumnFamilyType.Standard)
                    {
                        throw new RuntimeException("Can't write Super columns to the Standard Column Family.");
                    }

                    Map<?, ?> columns = parser.readValueAs(new TypeReference<Map<?, ?>>() {});
                    addToSuperCF(columns, columnFamily);
                }
                else
                {
                    throw new UnsupportedOperationException("Only Array or Hash allowed as row content.");
                }

                DecoratedKey currentKey = partitioner.decorateKey(hexToBytes(key));

                if (prevStoredKey != null && prevStoredKey.compareTo(currentKey) != -1)
                {
                    System.err.printf("Line %d: Key %s is greater than previous, collection is not sorted properly. Aborting import. You might need to delete SSTables manually.%n", lineNumber, key);
                    return -1;
                }

                // saving decorated key
                writer.append(currentKey, columnFamily);
                columnFamily.clear();

                prevStoredKey = currentKey;
                importedKeys++;
                lineNumber++;

                long current = System.currentTimeMillis();

                if (current - start >= 5000) // 5 secs.
                {
                    System.out.printf("Currently imported %d keys.%n", importedKeys);
                    start = current;
                }

                if (keyCountToImport == importedKeys)
                    break;
            }
        }

        writer.closeAndOpenReader();

        return importedKeys;
    }
View Full Code Here


                // a crash could cause data loss.
                cfs.markCompacted(toCompact, compactionType);
                return 0;
            }

            SSTableWriter writer = cfs.createCompactionWriter(keysPerSSTable, compactionFileLocation, toCompact);
            writers.add(writer);
            while (nni.hasNext())
            {
                if (ci.isStopRequested())
                    throw new CompactionInterruptedException(ci.getCompactionInfo());

                AbstractCompactedRow row = nni.next();
                if (row.isEmpty())
                    continue;

                long position = writer.append(row);
                totalkeysWritten++;

                if (DatabaseDescriptor.getPreheatKeyCache())
                {
                    for (SSTableReader sstable : toCompact)
                    {
                        if (sstable.getCachedPosition(row.key, false) != null)
                        {
                            cachedKeys.put(row.key, position);
                            break;
                        }
                    }
                }
                if (!nni.hasNext() || newSSTableSegmentThresholdReached(writer))
                {
                    SSTableReader toIndex = writer.closeAndOpenReader(getMaxDataAge(toCompact));
                    cachedKeyMap.put(toIndex, cachedKeys);
                    sstables.add(toIndex);
                    if (nni.hasNext())
                    {
                        writer = cfs.createCompactionWriter(keysPerSSTable, compactionFileLocation, toCompact);
                        writers.add(writer);
                        cachedKeys = new HashMap<DecoratedKey, Long>();
                    }
                }
            }
        }
        catch (Exception e)
        {
            for (SSTableWriter writer : writers)
                writer.abort();
            throw FBUtilities.unchecked(e);
        }
        finally
        {
            iter.close();
View Full Code Here

                                      + keySize // keys in data file
                                      + currentThroughput.get()) // data
                                     * 1.2); // bloom filter and row index overhead
        SSTableReader ssTable;
        // errors when creating the writer that may leave empty temp files.
        SSTableWriter writer = cfs.createFlushWriter(columnFamilies.size(), estimatedSize, context.get());
        try
        {
            // (we can't clear out the map as-we-go to free up memory,
            //  since the memtable is being used for queries in the "pending flush" category)
            for (Map.Entry<RowPosition, ColumnFamily> entry : columnFamilies.entrySet())
            {
                ColumnFamily cf = entry.getValue();
                if (cf.isMarkedForDelete())
                {
                    // Pedantically, you could purge column level tombstones that are past GcGRace when writing to the SSTable.
                    // But it can result in unexpected behaviour where deletes never make it to disk,
                    // as they are lost and so cannot override existing column values. So we only remove deleted columns if there
                    // is a CF level tombstone to ensure the delete makes it into an SSTable.
                    ColumnFamilyStore.removeDeletedColumnsOnly(cf, Integer.MIN_VALUE);
                }
                writer.append((DecoratedKey)entry.getKey(), cf);
            }

            ssTable = writer.closeAndOpenReader();
        }
        catch (Exception e)
        {
            writer.abort();
            throw FBUtilities.unchecked(e);
        }
        logger.info(String.format("Completed flushing %s (%d bytes) for commitlog position %s",
                                  ssTable.getFilename(), new File(ssTable.getFilename()).length(), context.get()));
        return ssTable;
View Full Code Here


    private SSTableReader writeSortedContents() throws IOException
    {
        logger.info("Writing " + this);
        SSTableWriter writer = cfs.createFlushWriter(columnFamilies.size());

        for (Map.Entry<DecoratedKey, ColumnFamily> entry : columnFamilies.entrySet())
            writer.append(entry.getKey(), entry.getValue());

        SSTableReader ssTable = writer.closeAndOpenReader();
        logger.info(String.format("Completed flushing %s (%d bytes)",
                                  ssTable.getFilename(), new File(ssTable.getFilename()).length()));
        return ssTable;
    }
View Full Code Here

        JsonParser parser = getParser(jsonFile);

        Object[] data = parser.readValueAs(new TypeReference<Object[]>(){});

        keyCountToImport = (keyCountToImport == null) ? data.length : keyCountToImport;
        SSTableWriter writer = new SSTableWriter(ssTablePath, keyCountToImport);

        System.out.printf("Importing %s keys...%n", keyCountToImport);

        // sort by dk representation, but hold onto the hex version
        SortedMap<DecoratedKey,Map<?, ?>> decoratedKeys = new TreeMap<DecoratedKey,Map<?, ?>>();

        for (Object row : data)
        {
            Map<?,?> rowAsMap = (Map<?, ?>)row;
            decoratedKeys.put(partitioner.decorateKey(hexToBytes((String)rowAsMap.get("key"))), rowAsMap);
        }

        for (Map.Entry<DecoratedKey, Map<?, ?>> row : decoratedKeys.entrySet())
        {
            if (row.getValue().containsKey("metadata"))
            {
                parseMeta((Map<?, ?>) row.getValue().get("metadata"), columnFamily, null);
            }

            Object columns = row.getValue().get("columns");
            if (columnFamily.getType() == ColumnFamilyType.Super && oldSCFormat)
                addToSuperCF((Map<?, ?>) columns, columnFamily);
            else
                addToStandardCF((List<?>) columns, columnFamily);


            writer.append(row.getKey(), columnFamily);
            columnFamily.clear();

            importedKeys++;

            long current = System.nanoTime();

            if (TimeUnit.NANOSECONDS.toSeconds(current - start) >= 5) // 5 secs.
            {
                System.out.printf("Currently imported %d keys.%n", importedKeys);
                start = current;
            }

            if (keyCountToImport == importedKeys)
                break;
        }

        writer.closeAndOpenReader();

        return importedKeys;
    }
View Full Code Here

        }

        System.out.printf("Importing %s keys...%n", keyCountToImport);

        parser = getParser(jsonFile); // renewing parser
        SSTableWriter writer = new SSTableWriter(ssTablePath, keyCountToImport);

        int lineNumber = 1;
        DecoratedKey prevStoredKey = null;

        parser.nextToken(); // START_ARRAY
        while (parser.nextToken() != null)
        {
            String key = parser.getCurrentName();
            Map<?, ?> row = parser.readValueAs(new TypeReference<Map<?, ?>>(){});
            DecoratedKey currentKey = partitioner.decorateKey(hexToBytes((String) row.get("key")));

            if (row.containsKey("metadata"))
                parseMeta((Map<?, ?>) row.get("metadata"), columnFamily, null);

            if (columnFamily.getType() == ColumnFamilyType.Super && oldSCFormat)
                addToSuperCF((Map<?, ?>)row.get("columns"), columnFamily);
            else
                addToStandardCF((List<?>)row.get("columns"), columnFamily);

            if (prevStoredKey != null && prevStoredKey.compareTo(currentKey) != -1)
            {
                System.err
                        .printf("Line %d: Key %s is greater than previous, collection is not sorted properly. Aborting import. You might need to delete SSTables manually.%n",
                                lineNumber, key);
                return -1;
            }

            // saving decorated key
            writer.append(currentKey, columnFamily);
            columnFamily.clear();

            prevStoredKey = currentKey;
            importedKeys++;
            lineNumber++;

            long current = System.nanoTime();

            if (TimeUnit.NANOSECONDS.toSeconds(current - start) >= 5) // 5 secs.
            {
                System.out.printf("Currently imported %d keys.%n", importedKeys);
                start = current;
            }

            if (keyCountToImport == importedKeys)
                break;

        }

        writer.closeAndOpenReader();

        return importedKeys;
    }
View Full Code Here


    private SSTableReader writeSortedContents() throws IOException
    {
        logger.info("Writing " + this);
        SSTableWriter writer = cfs.createFlushWriter(columnFamilies.size());

        for (Map.Entry<DecoratedKey, ColumnFamily> entry : columnFamilies.entrySet())
            writer.append(entry.getKey(), entry.getValue());

        SSTableReader ssTable = writer.closeAndOpenReader();
        logger.info(String.format("Completed flushing %s (%d bytes)",
                                  ssTable.getFilename(), new File(ssTable.getFilename()).length()));
        return ssTable;
    }
View Full Code Here

            keySize += key.key.remaining();
        long estimatedSize = (long) ((keySize // index entries
                                      + keySize // keys in data file
                                      + currentThroughput.get()) // data
                                     * 1.2); // bloom filter and row index overhead
        SSTableWriter writer = cfs.createFlushWriter(columnFamilies.size(), estimatedSize);

        for (Map.Entry<DecoratedKey, ColumnFamily> entry : columnFamilies.entrySet())
            writer.append(entry.getKey(), entry.getValue());

        SSTableReader ssTable = writer.closeAndOpenReader();
        logger.info(String.format("Completed flushing %s (%d bytes)",
                                  ssTable.getFilename(), new File(ssTable.getFilename()).length()));
        return ssTable;
    }
View Full Code Here

                // a crash could cause data loss.
                cfs.markCompacted(toCompact, compactionType);
                return 0;
            }

            SSTableWriter writer = cfs.createCompactionWriter(keysPerSSTable, compactionFileLocation, toCompact);
            writers.add(writer);
            while (nni.hasNext())
            {
                if (ci.isStopRequested())
                    throw new CompactionInterruptedException(ci.getCompactionInfo());

                AbstractCompactedRow row = nni.next();
                if (row.isEmpty())
                    continue;

                long position = writer.append(row);
                totalkeysWritten++;

                if (DatabaseDescriptor.getPreheatKeyCache())
                {
                    for (SSTableReader sstable : toCompact)
                    {
                        if (sstable.getCachedPosition(row.key, false) != null)
                        {
                            cachedKeys.put(row.key, position);
                            break;
                        }
                    }
                }
                if (!nni.hasNext() || newSSTableSegmentThresholdReached(writer, position))
                {
                    SSTableReader toIndex = writer.closeAndOpenReader(getMaxDataAge(toCompact));
                    cachedKeyMap.put(toIndex, cachedKeys);
                    sstables.add(toIndex);
                    if (nni.hasNext())
                    {
                        writer = cfs.createCompactionWriter(keysPerSSTable, compactionFileLocation, toCompact);
                        writers.add(writer);
                        cachedKeys = new HashMap<DecoratedKey, Long>();
                    }
                }
            }
        }
        catch (Exception e)
        {
            for (SSTableWriter writer : writers)
                writer.abort();
            throw FBUtilities.unchecked(e);
        }
        finally
        {
            iter.close();
View Full Code Here

        JsonParser parser = getParser(jsonFile);

        Object[] data = parser.readValueAs(new TypeReference<Object[]>(){});

        keyCountToImport = (keyCountToImport == null) ? data.length : keyCountToImport;
        SSTableWriter writer = new SSTableWriter(ssTablePath, keyCountToImport);

        System.out.printf("Importing %s keys...%n", keyCountToImport);

        // sort by dk representation, but hold onto the hex version
        SortedMap<DecoratedKey,Map<?, ?>> decoratedKeys = new TreeMap<DecoratedKey,Map<?, ?>>();

        for (Object row : data)
        {
            Map<?,?> rowAsMap = (Map<?, ?>)row;
            decoratedKeys.put(partitioner.decorateKey(hexToBytes((String)rowAsMap.get("key"))), rowAsMap);
        }

        for (Map.Entry<DecoratedKey, Map<?, ?>> row : decoratedKeys.entrySet())
        {
            if (row.getValue().containsKey("metadata"))
            {
                parseMeta((Map<?, ?>) row.getValue().get("metadata"), columnFamily);
            }

            Object columns = row.getValue().get("columns");
            if (columnFamily.getType() == ColumnFamilyType.Super)
                addToSuperCF((Map<?, ?>) columns, columnFamily);
            else
                addToStandardCF((List<?>) columns, columnFamily);


            writer.append(row.getKey(), columnFamily);
            columnFamily.clear();

            importedKeys++;

            long current = System.currentTimeMillis();

            if (current - start >= 5000) // 5 secs.
            {
                System.out.printf("Currently imported %d keys.%n", importedKeys);
                start = current;
            }

            if (keyCountToImport == importedKeys)
                break;
        }

        writer.closeAndOpenReader();

        return importedKeys;
    }
View Full Code Here

TOP

Related Classes of org.apache.cassandra.io.sstable.SSTableWriter$Builder

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.