Package kafka.common

Examples of kafka.common.TopicAndPartition


        }
        return getOffset(consumer, topic, partition, startOffsetTime);
    }

    public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
        OffsetRequest request = new OffsetRequest(
                requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
View Full Code Here


        KafkaClient mKafkaClient = new KafkaClient(config);

        Map<TopicPartition, Long> lastOffsets = Maps.newHashMap();
        for (String topic : topics) {
            for (int i = 0; i < num_partitions; i++) {
                TopicAndPartition topicAndPartition = new TopicAndPartition(
                        topic, i);
                SimpleConsumer consumer = mKafkaClient
                        .createConsumer(new TopicPartition(topic, i));
                Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
                requestInfo.put(topicAndPartition,
View Full Code Here

        }
        return getOffset(consumer, topic, partition, startOffsetTime);
    }

    public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
        OffsetRequest request = new OffsetRequest(
                requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
View Full Code Here

    private static String getClientName(TopicPartition topicPartition) {
        return "secorClient_" + topicPartition.getTopic() + "_" + topicPartition.getPartition();
    }

    private long findLastOffset(TopicPartition topicPartition, SimpleConsumer consumer) {
        TopicAndPartition topicAndPartition = new TopicAndPartition(topicPartition.getTopic(),
                topicPartition.getPartition());
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo =
                new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(
                kafka.api.OffsetRequest.LatestTime(), 1));
View Full Code Here

        return new FixedSplitSource(connectorId, builder.build());
    }

    private static long[] findAllOffsets(SimpleConsumer consumer, KafkaPartition partition)
    {
        TopicAndPartition topicAndPartition = new TopicAndPartition(partition.getTopicName(), partition.getPartitionIdAsInt());

        // The API implies that this will always return all of the offsets. So it seems a partition can not have
        // more than Integer.MAX_VALUE-1 segments.
        //
        // This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value
View Full Code Here

        }
        return getOffset(consumer, topic, partition, startOffsetTime);
    }

    public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
        TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
        OffsetRequest request = new OffsetRequest(
                requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
View Full Code Here

      return 0L;
    }

    // Fire offset request
    OffsetRequest request = new OffsetRequest(ImmutableMap.of(
      new TopicAndPartition(topicPart.getTopic(), topicPart.getPartition()),
      new PartitionOffsetRequestInfo(timestamp, 1)
    ), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());

    OffsetResponse response = consumer.getOffsetsBefore(request);
View Full Code Here

    protected long[] getOffsetRange() throws IOException {

        /* get smallest and largest offsets*/
        long[] range = new long[2];

        TopicAndPartition topicAndPartition = new TopicAndPartition(_request.getTopic(), _request.getPartition());
        Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo =
                new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.EarliestTime(), 1));
        OffsetRequest request = new OffsetRequest(
            requestInfo, kafka.api.OffsetRequest.CurrentVersion(), kafka.api.OffsetRequest.DefaultClientId());
View Full Code Here

        if (this.earliestOffset == -2 && uri != null) {
            // TODO : Make the hardcoded paramters configurable
            SimpleConsumer consumer = new SimpleConsumer(uri.getHost(), uri.getPort(), 60000,
                    1024 * 1024, "hadoop-etl");
            Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
            offsetInfo.put(new TopicAndPartition(topic, partition), new PartitionOffsetRequestInfo(
                    kafka.api.OffsetRequest.EarliestTime(), 1));
            OffsetResponse response = consumer
                    .getOffsetsBefore(new OffsetRequest(offsetInfo, kafka.api.OffsetRequest
                            .CurrentVersion(), "hadoop-etl"));
            long[] endOffset = response.offsets(topic, partition);
View Full Code Here

    @Override
    public long getLastOffset(long time) {
        SimpleConsumer consumer = new SimpleConsumer(uri.getHost(), uri.getPort(), 60000,
                1024 * 1024, "hadoop-etl");
        Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        offsetInfo.put(new TopicAndPartition(topic, partition), new PartitionOffsetRequestInfo(
                time, 1));
        OffsetResponse response = consumer.getOffsetsBefore(new OffsetRequest(offsetInfo,
                kafka.api.OffsetRequest.CurrentVersion(),"hadoop-etl"));
        long[] endOffset = response.offsets(topic, partition);
        consumer.close();
View Full Code Here

TOP

Related Classes of kafka.common.TopicAndPartition

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.