Package com.google.common.util.concurrent

Examples of com.google.common.util.concurrent.RateLimiter$WarmingUp


            logger.debug("average hinted-row column size is {}; using pageSize of {}", averageColumnSize, pageSize);
        }

        // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
        int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB();
        RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);

        while (true)
        {
            QueryFilter filter = QueryFilter.getSliceFilter(epkey, new QueryPath(SystemTable.HINTS_CF), startColumn, ByteBufferUtil.EMPTY_BYTE_BUFFER, false, pageSize);
            ColumnFamily hintsPage = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter), (int)(System.currentTimeMillis() / 1000));
            if (pagingFinished(hintsPage, startColumn))
            {
                if (ByteBufferUtil.EMPTY_BYTE_BUFFER.equals(startColumn))
                {
                    // we've started from the beginning and could not find anything (only maybe tombstones)
                    break;
                }
                else
                {
                    // restart query from the first column until we read an empty row;
                    // that will tell us everything was delivered successfully with no timeouts
                    startColumn = ByteBufferUtil.EMPTY_BYTE_BUFFER;
                    continue;
                }

            }

            for (final IColumn hint : hintsPage.getSortedColumns())
            {
                // Skip tombstones:
                // if we iterate quickly enough, it's possible that we could request a new page in the same millisecond
                // in which the local deletion timestamp was generated on the last column in the old page, in which
                // case the hint will have no columns (since it's deleted) but will still be included in the resultset
                // since (even with gcgs=0) it's still a "relevant" tombstone.
                if (!hint.isLive())
                    continue;

                startColumn = hint.name();

                ByteBuffer[] components = comparator.split(hint.name());
                int version = Int32Type.instance.compose(components[1]);
                DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(hint.value()));
                RowMutation rm;
                try
                {
                    rm = RowMutation.serializer.deserialize(in, version);
                }
                catch (UnknownColumnFamilyException e)
                {
                    logger.debug("Skipping delivery of hint for deleted columnfamily", e);
                    deleteHint(hostIdBytes, hint.name(), hint.maxTimestamp());
                    continue;
                }

                MessageOut<RowMutation> message = rm.createMessage();
                rateLimiter.acquire(message.serializedSize(MessagingService.current_version));
                WrappedRunnable callback = new WrappedRunnable()
                {
                    public void runMayThrow() throws IOException
                    {
                        rowsReplayed.incrementAndGet();
View Full Code Here


     * allow for a more memory efficient solution if we know the sstable don't overlap (see
     * LeveledCompactionStrategy for instance).
     */
    public List<ICompactionScanner> getScanners(Collection<SSTableReader> sstables, Range<Token> range)
    {
        RateLimiter limiter = CompactionManager.instance.getRateLimiter();
        ArrayList<ICompactionScanner> scanners = new ArrayList<ICompactionScanner>();
        for (SSTableReader sstable : sstables)
            scanners.add(sstable.getScanner(range, limiter));
        return scanners;
    }
View Full Code Here

        int pageSize = calculatePageSize();
        logger.debug("Using pageSize of {}", pageSize);

        // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
        int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB();
        RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);

        delivery:
        while (true)
        {
            QueryFilter filter = QueryFilter.getSliceFilter(epkey,
                                                            new QueryPath(SystemTable.HINTS_CF),
                                                            startColumn,
                                                            ByteBufferUtil.EMPTY_BYTE_BUFFER,
                                                            false,
                                                            pageSize);

            ColumnFamily hintsPage = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter),
                                                                     (int) (System.currentTimeMillis() / 1000));

            if (pagingFinished(hintsPage, startColumn))
                break;

            // check if node is still alive and we should continue delivery process
            if (!FailureDetector.instance.isAlive(endpoint))
            {
                logger.info("Endpoint {} died during hint delivery; aborting ({} delivered)", endpoint, rowsReplayed);
                return;
            }

            List<WriteResponseHandler> responseHandlers = Lists.newArrayList();

            for (final IColumn hint : hintsPage.getSortedColumns())
            {
                // check if hints delivery has been paused during the process
                if (hintedHandOffPaused)
                {
                    logger.debug("Hints delivery process is paused, aborting");
                    break delivery;
                }

                // Skip tombstones:
                // if we iterate quickly enough, it's possible that we could request a new page in the same millisecond
                // in which the local deletion timestamp was generated on the last column in the old page, in which
                // case the hint will have no columns (since it's deleted) but will still be included in the resultset
                // since (even with gcgs=0) it's still a "relevant" tombstone.
                if (!hint.isLive())
                    continue;

                startColumn = hint.name();

                ByteBuffer[] components = comparator.split(hint.name());
                int version = Int32Type.instance.compose(components[1]);
                DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(hint.value()));
                RowMutation rm;
                try
                {
                    rm = RowMutation.serializer.deserialize(in, version);
                }
                catch (UnknownColumnFamilyException e)
                {
                    logger.debug("Skipping delivery of hint for deleted columnfamily", e);
                    deleteHint(hostIdBytes, hint.name(), hint.maxTimestamp());
                    continue;
                }
                catch (IOException e)
                {
                    throw new AssertionError(e);
                }

                MessageOut<RowMutation> message = rm.createMessage();
                rateLimiter.acquire(message.serializedSize(MessagingService.current_version));
                Runnable callback = new Runnable()
                {
                    public void run()
                    {
                        rowsReplayed.incrementAndGet();
View Full Code Here

        // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
        // max rate is scaled by the number of nodes in the cluster (CASSANDRA-5272).
        int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB()
                           / (StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1);
        RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);

        delivery:
        while (true)
        {
            QueryFilter filter = QueryFilter.getSliceFilter(epkey,
                                                            new QueryPath(SystemTable.HINTS_CF),
                                                            startColumn,
                                                            ByteBufferUtil.EMPTY_BYTE_BUFFER,
                                                            false,
                                                            pageSize);

            ColumnFamily hintsPage = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter),
                                                                     (int) (System.currentTimeMillis() / 1000));

            if (pagingFinished(hintsPage, startColumn))
                break;

            // check if node is still alive and we should continue delivery process
            if (!FailureDetector.instance.isAlive(endpoint))
            {
                logger.info("Endpoint {} died during hint delivery; aborting ({} delivered)", endpoint, rowsReplayed);
                return;
            }

            List<WriteResponseHandler> responseHandlers = Lists.newArrayList();

            for (final IColumn hint : hintsPage.getSortedColumns())
            {
                // check if hints delivery has been paused during the process
                if (hintedHandOffPaused)
                {
                    logger.debug("Hints delivery process is paused, aborting");
                    break delivery;
                }

                // Skip tombstones:
                // if we iterate quickly enough, it's possible that we could request a new page in the same millisecond
                // in which the local deletion timestamp was generated on the last column in the old page, in which
                // case the hint will have no columns (since it's deleted) but will still be included in the resultset
                // since (even with gcgs=0) it's still a "relevant" tombstone.
                if (!hint.isLive())
                    continue;

                startColumn = hint.name();

                ByteBuffer[] components = comparator.split(hint.name());
                int version = Int32Type.instance.compose(components[1]);
                DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(hint.value()));
                RowMutation rm;
                try
                {
                    rm = RowMutation.serializer.deserialize(in, version);
                }
                catch (UnknownColumnFamilyException e)
                {
                    logger.debug("Skipping delivery of hint for deleted columnfamily", e);
                    deleteHint(hostIdBytes, hint.name(), hint.maxTimestamp());
                    continue;
                }
                catch (IOException e)
                {
                    throw new AssertionError(e);
                }

                Map<UUID, Long> truncationTimesCache = new HashMap<UUID, Long>();
                for (UUID cfId : ImmutableSet.copyOf((rm.getColumnFamilyIds())))
                {
                    Long truncatedAt = truncationTimesCache.get(cfId);
                    if (truncatedAt == null)
                    {
                        ColumnFamilyStore cfs = Table.open(rm.getTable()).getColumnFamilyStore(cfId);
                        truncatedAt = cfs.getTruncationTime();
                        truncationTimesCache.put(cfId, truncatedAt);
                    }

                    if (hint.maxTimestamp() < truncatedAt)
                    {
                        logger.debug("Skipping delivery of hint for truncated columnfamily {}" + cfId);
                        rm = rm.without(cfId);
                    }
                }

                if (rm.isEmpty())
                {
                    deleteHint(hostIdBytes, hint.name(), hint.maxTimestamp());
                    continue;
                }

                MessageOut<RowMutation> message = rm.createMessage();
                rateLimiter.acquire(message.serializedSize(MessagingService.current_version));
                Runnable callback = new Runnable()
                {
                    public void run()
                    {
                        rowsReplayed.incrementAndGet();
View Full Code Here

        output.println("total,interval_op_rate,interval_key_rate,latency/95th/99th,elapsed_time");

        int itemsPerThread = client.getKeysPerThread();
        int modulo = client.getNumKeys() % threadCount;
        RateLimiter rateLimiter = RateLimiter.create(client.getMaxOpsPerSecond());

        // creating required type of the threads for the test
        for (int i = 0; i < threadCount; i++) {
            if (i == threadCount - 1)
                itemsPerThread += modulo; // last one is going to handle N + modulo items
View Full Code Here

        waitForRawInserts.await("Failed to add raw data");
    }

    //@Test(dependsOnMethods = "insertRawData")
    public void queryRawDataAsync() throws Exception {
        RateLimiter readPermits = RateLimiter.create(50);

        log.info("Running queryRawDataAsync");
        long start = System.currentTimeMillis();

        DateTime startTime = hour(3).minusHours(1).minusSeconds(1);
View Full Code Here

        if (opCount < 0)
            workQueue = new ContinuousWorkQueue(50);
        else
            workQueue = FixedWorkQueue.build(opCount);

        RateLimiter rateLimiter = null;
        // TODO : move this to a new queue wrapper that gates progress based on a poisson (or configurable) distribution
        if (settings.rate.opRateTargetPerSecond > 0)
            rateLimiter = RateLimiter.create(settings.rate.opRateTargetPerSecond);

        final StressMetrics metrics = new StressMetrics(output, settings.log.intervalMillis);
View Full Code Here

        // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
        // max rate is scaled by the number of nodes in the cluster (CASSANDRA-5272).
        int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB()
                           / (StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1);
        RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);

        boolean finished = false;
        delivery:
        while (true)
        {
            long now = System.currentTimeMillis();
            QueryFilter filter = QueryFilter.getSliceFilter(epkey,
                                                            SystemKeyspace.HINTS_CF,
                                                            startColumn,
                                                            Composites.EMPTY,
                                                            false,
                                                            pageSize,
                                                            now);

            ColumnFamily hintsPage = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter), (int) (now / 1000));

            if (pagingFinished(hintsPage, startColumn))
            {
                logger.info("Finished hinted handoff of {} rows to endpoint {}", rowsReplayed, endpoint);
                finished = true;
                break;
            }

            // check if node is still alive and we should continue delivery process
            if (!FailureDetector.instance.isAlive(endpoint))
            {
                logger.info("Endpoint {} died during hint delivery; aborting ({} delivered)", endpoint, rowsReplayed);
                break;
            }

            List<WriteResponseHandler> responseHandlers = Lists.newArrayList();
            for (final Cell hint : hintsPage)
            {
                // check if hints delivery has been paused during the process
                if (hintedHandOffPaused)
                {
                    logger.debug("Hints delivery process is paused, aborting");
                    break delivery;
                }

                // Skip tombstones:
                // if we iterate quickly enough, it's possible that we could request a new page in the same millisecond
                // in which the local deletion timestamp was generated on the last column in the old page, in which
                // case the hint will have no columns (since it's deleted) but will still be included in the resultset
                // since (even with gcgs=0) it's still a "relevant" tombstone.
                if (!hint.isLive())
                    continue;

                startColumn = hint.name();

                int version = Int32Type.instance.compose(hint.name().get(1));
                DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(hint.value()));
                Mutation mutation;
                try
                {
                    mutation = Mutation.serializer.deserialize(in, version);
                }
                catch (UnknownColumnFamilyException e)
                {
                    logger.debug("Skipping delivery of hint for deleted columnfamily", e);
                    deleteHint(hostIdBytes, hint.name(), hint.timestamp());
                    continue;
                }
                catch (IOException e)
                {
                    throw new AssertionError(e);
                }

                for (UUID cfId : mutation.getColumnFamilyIds())
                {
                    if (hint.timestamp() <= SystemKeyspace.getTruncatedAt(cfId))
                    {
                        logger.debug("Skipping delivery of hint for truncated columnfamily {}", cfId);
                        mutation = mutation.without(cfId);
                    }
                }

                if (mutation.isEmpty())
                {
                    deleteHint(hostIdBytes, hint.name(), hint.timestamp());
                    continue;
                }

                MessageOut<Mutation> message = mutation.createMessage();
                rateLimiter.acquire(message.serializedSize(MessagingService.current_version));
                Runnable callback = new Runnable()
                {
                    public void run()
                    {
                        rowsReplayed.incrementAndGet();
View Full Code Here

     */
    class CompactionScannerFactory implements EntryLogger.EntryLogListener {
        List<Offset> offsets = new ArrayList<Offset>();

        EntryLogScanner newScanner(final EntryLogMetadata meta) {
            final RateLimiter rateLimiter = RateLimiter.create(compactionRate);
            return new EntryLogScanner() {
                @Override
                public boolean accept(long ledgerId) {
                    return meta.containsLedger(ledgerId);
                }

                @Override
                public void process(final long ledgerId, long offset, ByteBuffer entry)
                        throws IOException {
                    rateLimiter.acquire();
                    synchronized (CompactionScannerFactory.this) {
                        if (offsets.size() > maxOutstandingRequests) {
                            waitEntrylogFlushed();
                        }
                        entry.getLong(); // discard ledger id, we already have it
View Full Code Here

        // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
        // max rate is scaled by the number of nodes in the cluster (CASSANDRA-5272).
        int throttleInKB = DatabaseDescriptor.getHintedHandoffThrottleInKB()
                           / (StorageService.instance.getTokenMetadata().getAllEndpoints().size() - 1);
        RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);

        delivery:
        while (true)
        {
            QueryFilter filter = QueryFilter.getSliceFilter(epkey,
                                                            new QueryPath(SystemTable.HINTS_CF),
                                                            startColumn,
                                                            ByteBufferUtil.EMPTY_BYTE_BUFFER,
                                                            false,
                                                            pageSize);

            ColumnFamily hintsPage = ColumnFamilyStore.removeDeleted(hintStore.getColumnFamily(filter),
                                                                     (int) (System.currentTimeMillis() / 1000));

            if (pagingFinished(hintsPage, startColumn))
                break;

            // check if node is still alive and we should continue delivery process
            if (!FailureDetector.instance.isAlive(endpoint))
            {
                logger.info("Endpoint {} died during hint delivery; aborting ({} delivered)", endpoint, rowsReplayed);
                return;
            }

            List<WriteResponseHandler> responseHandlers = Lists.newArrayList();

            for (final IColumn hint : hintsPage.getSortedColumns())
            {
                // check if hints delivery has been paused during the process
                if (hintedHandOffPaused)
                {
                    logger.debug("Hints delivery process is paused, aborting");
                    break delivery;
                }

                // Skip tombstones:
                // if we iterate quickly enough, it's possible that we could request a new page in the same millisecond
                // in which the local deletion timestamp was generated on the last column in the old page, in which
                // case the hint will have no columns (since it's deleted) but will still be included in the resultset
                // since (even with gcgs=0) it's still a "relevant" tombstone.
                if (!hint.isLive())
                    continue;

                startColumn = hint.name();

                ByteBuffer[] components = comparator.split(hint.name());
                int version = Int32Type.instance.compose(components[1]);
                DataInputStream in = new DataInputStream(ByteBufferUtil.inputStream(hint.value()));
                RowMutation rm;
                try
                {
                    rm = RowMutation.serializer.deserialize(in, version);
                }
                catch (UnknownColumnFamilyException e)
                {
                    logger.debug("Skipping delivery of hint for deleted columnfamily", e);
                    deleteHint(hostIdBytes, hint.name(), hint.maxTimestamp());
                    continue;
                }
                catch (IOException e)
                {
                    throw new AssertionError(e);
                }

                Map<UUID, Long> truncationTimesCache = new HashMap<UUID, Long>();
                for (UUID cfId : ImmutableSet.copyOf((rm.getColumnFamilyIds())))
                {
                    Long truncatedAt = truncationTimesCache.get(cfId);
                    if (truncatedAt == null)
                    {
                        ColumnFamilyStore cfs = Table.open(rm.getTable()).getColumnFamilyStore(cfId);
                        truncatedAt = cfs.getTruncationTime();
                        truncationTimesCache.put(cfId, truncatedAt);
                    }

                    if (hint.maxTimestamp() < truncatedAt)
                    {
                        logger.debug("Skipping delivery of hint for truncated columnfamily {}" + cfId);
                        rm = rm.without(cfId);
                    }
                }

                if (rm.isEmpty())
                {
                    deleteHint(hostIdBytes, hint.name(), hint.maxTimestamp());
                    continue;
                }

                MessageOut<RowMutation> message = rm.createMessage();
                rateLimiter.acquire(message.serializedSize(MessagingService.current_version));
                Runnable callback = new Runnable()
                {
                    public void run()
                    {
                        rowsReplayed.incrementAndGet();
View Full Code Here

TOP

Related Classes of com.google.common.util.concurrent.RateLimiter$WarmingUp

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.