Package org.apache.kafka.clients.producer.internals

Examples of org.apache.kafka.clients.producer.internals.RecordAccumulator.ready()


    @Test
    public void testLinger() throws Exception {
        long lingerMs = 10L;
        RecordAccumulator accum = new RecordAccumulator(1024, 10 * 1024, lingerMs, 100L, false, metrics, time);
        accum.append(tp1, key, value, CompressionType.NONE, null);
        assertEquals("No partitions should be ready", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size());
        time.sleep(10);
        assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes);
        List<RecordBatch> batches = accum.drain(cluster, Collections.singleton(node1), Integer.MAX_VALUE, 0).get(node1.id());
        assertEquals(1, batches.size());
        RecordBatch batch = batches.get(0);
View Full Code Here


        long lingerMs = 10L;
        RecordAccumulator accum = new RecordAccumulator(1024, 10 * 1024, lingerMs, 100L, false, metrics, time);
        accum.append(tp1, key, value, CompressionType.NONE, null);
        assertEquals("No partitions should be ready", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size());
        time.sleep(10);
        assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes);
        List<RecordBatch> batches = accum.drain(cluster, Collections.singleton(node1), Integer.MAX_VALUE, 0).get(node1.id());
        assertEquals(1, batches.size());
        RecordBatch batch = batches.get(0);
        Iterator<LogEntry> iter = batch.records.iterator();
        LogEntry entry = iter.next();
View Full Code Here

        List<TopicPartition> partitions = asList(tp1, tp2);
        for (TopicPartition tp : partitions) {
            for (int i = 0; i < appends; i++)
                accum.append(tp, key, value, CompressionType.NONE, null);
        }
        assertEquals("Partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes);

        List<RecordBatch> batches = accum.drain(cluster, Collections.singleton(node1), 1024, 0).get(node1.id());
        assertEquals("But due to size bound only one partition should have been retrieved", 1, batches.size());
    }
View Full Code Here

        for (Thread t : threads)
            t.start();
        int read = 0;
        long now = time.milliseconds();
        while (read < numThreads * msgs) {
            Set<Node> nodes = accum.ready(cluster, now).readyNodes;
            List<RecordBatch> batches = accum.drain(cluster, nodes, 5 * 1024, 0).get(node1.id());
            if (batches != null) {
                for (RecordBatch batch : batches) {
                    for (LogEntry entry : batch.records)
                        read++;
View Full Code Here

        int appends = 1024 / msgSize;

        // Partition on node1 only
        for (int i = 0; i < appends; i++)
            accum.append(tp1, key, value, CompressionType.NONE, null);
        RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
        assertEquals("No nodes should be ready.", 0, result.readyNodes.size());
        assertEquals("Next check time should be the linger time", lingerMs, result.nextReadyCheckDelayMs);

        time.sleep(lingerMs / 2);
View Full Code Here

        time.sleep(lingerMs / 2);

        // Add partition on node2 only
        for (int i = 0; i < appends; i++)
            accum.append(tp3, key, value, CompressionType.NONE, null);
        result = accum.ready(cluster, time.milliseconds());
        assertEquals("No nodes should be ready.", 0, result.readyNodes.size());
        assertEquals("Next check time should be defined by node1, half remaining linger time", lingerMs / 2, result.nextReadyCheckDelayMs);

        // Add data for another partition on node1, enough to make data sendable immediately
        for (int i = 0; i < appends+1; i++)
View Full Code Here

        assertEquals("Next check time should be defined by node1, half remaining linger time", lingerMs / 2, result.nextReadyCheckDelayMs);

        // Add data for another partition on node1, enough to make data sendable immediately
        for (int i = 0; i < appends+1; i++)
            accum.append(tp2, key, value, CompressionType.NONE, null);
        result = accum.ready(cluster, time.milliseconds());
        assertEquals("Node1 should be ready", Collections.singleton(node1), result.readyNodes);
        // Note this can actually be < linger time because it may use delays from partitions that aren't sendable
        // but have leaders with other sendable data.
        assertTrue("Next check time should be defined by node2, at most linger time", result.nextReadyCheckDelayMs <= lingerMs);
    }
View Full Code Here

        long now = time.milliseconds();
        RecordAccumulator accum = new RecordAccumulator(1024, 10 * 1024, 10L, 100L, false, metrics, time);
        int appends = 1024 / msgSize;
        for (int i = 0; i < appends; i++) {
            accum.append(tp1, key, value, CompressionType.NONE, null);
            assertEquals("No partitions should be ready.", 0, accum.ready(cluster, now).readyNodes.size());
        }
        accum.append(tp1, key, value, CompressionType.NONE, null);
        assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes);
        List<RecordBatch> batches = accum.drain(cluster, Collections.singleton(node1), Integer.MAX_VALUE, 0).get(node1.id());
        assertEquals(1, batches.size());
View Full Code Here

        for (int i = 0; i < appends; i++) {
            accum.append(tp1, key, value, CompressionType.NONE, null);
            assertEquals("No partitions should be ready.", 0, accum.ready(cluster, now).readyNodes.size());
        }
        accum.append(tp1, key, value, CompressionType.NONE, null);
        assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes);
        List<RecordBatch> batches = accum.drain(cluster, Collections.singleton(node1), Integer.MAX_VALUE, 0).get(node1.id());
        assertEquals(1, batches.size());
        RecordBatch batch = batches.get(0);
        Iterator<LogEntry> iter = batch.records.iterator();
        for (int i = 0; i < appends; i++) {
View Full Code Here

    @Test
    public void testAppendLarge() throws Exception {
        int batchSize = 512;
        RecordAccumulator accum = new RecordAccumulator(batchSize, 10 * 1024, 0L, 100L, false, metrics, time);
        accum.append(tp1, key, new byte[2 * batchSize], CompressionType.NONE, null);
        assertEquals("Our partition's leader should be ready", Collections.singleton(node1), accum.ready(cluster, time.milliseconds()).readyNodes);
    }

    @Test
    public void testLinger() throws Exception {
        long lingerMs = 10L;
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.