Package kafka.javaapi.consumer

Examples of kafka.javaapi.consumer.SimpleConsumer


   */
  private RemovalListener<BrokerInfo, SimpleConsumer> createRemovalListener() {
    return new RemovalListener<BrokerInfo, SimpleConsumer>() {
      @Override
      public void onRemoval(RemovalNotification<BrokerInfo, SimpleConsumer> notification) {
        SimpleConsumer consumer = notification.getValue();
        if (consumer != null) {
          consumer.close();
        }
      }
    };
  }
View Full Code Here


   *
   * @return The last offset before the given timestamp or {@code 0} if failed to do so.
   */
  private long getLastOffset(TopicPartition topicPart, long timestamp) {
    BrokerInfo brokerInfo = brokerService.getLeader(topicPart.getTopic(), topicPart.getPartition());
    SimpleConsumer consumer = brokerInfo == null ? null : consumers.getUnchecked(brokerInfo);

    // If no broker, treat it as failure attempt.
    if (consumer == null) {
      LOG.warn("Failed to talk to any broker. Default offset to 0 for {}", topicPart);
      return 0L;
    }

    // Fire offset request
    OffsetRequest request = new OffsetRequest(ImmutableMap.of(
      new TopicAndPartition(topicPart.getTopic(), topicPart.getPartition()),
      new PartitionOffsetRequestInfo(timestamp, 1)
    ), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());

    OffsetResponse response = consumer.getOffsetsBefore(request);

    // Retrieve offsets from response
    long[] offsets = response.hasError() ? null : response.offsets(topicPart.getTopic(), topicPart.getPartition());
    if (offsets == null || offsets.length <= 0) {
      short errorCode = response.errorCode(topicPart.getTopic(), topicPart.getPartition());
View Full Code Here

        long off = offset.get();
        if (off < 0) {
          offset.set(getLastOffset(topicPart, off));
        }

        SimpleConsumer consumer = consumerEntry.getValue();

        // Fire a fetch message request
        try {
          FetchResponse response = fetchMessages(consumer, offset.get());
View Full Code Here

                        .addFetch(split.getTopicName(), split.getPartitionId(), cursorOffset, KAFKA_READ_BUFFER_SIZE)
                        .build();

                // TODO - this should look at the actual node this is running on and prefer
                // that copy if running locally. - look into NodeInfo
                SimpleConsumer consumer = consumerManager.getConsumer(split.getNodes().get(0));

                FetchResponse fetchResponse = consumer.fetch(req);
                if (fetchResponse.hasError()) {
                    short errorCode = fetchResponse.errorCode(split.getTopicName(), split.getPartitionId());
                    log.warn("Fetch response has error: %d", errorCode);
                    throw new PrestoException(KafkaErrorCode.KAFKA_SPLIT_ERROR.toErrorCode(), "could not fetch data from Kafka, error code is '" + errorCode + "'");
                }
View Full Code Here

  }
 
  public static void main(String[] args) throws Exception {
    generateData();
     
    SimpleConsumer simpleConsumer = new SimpleConsumer(KafkaProperties.kafkaServerURL,
                                                       KafkaProperties.kafkaServerPort,
                                                       KafkaProperties.connectionTimeOut,
                                                       KafkaProperties.kafkaProducerBufferSize,
                                                       KafkaProperties.clientId);

    System.out.println("Testing single fetch");
    FetchRequest req = new FetchRequestBuilder()
            .clientId(KafkaProperties.clientId)
            .addFetch(KafkaProperties.topic2, 0, 0L, 100)
            .build();
    FetchResponse fetchResponse = simpleConsumer.fetch(req);
      printMessages((ByteBufferMessageSet) fetchResponse.messageSet(KafkaProperties.topic2, 0));

    System.out.println("Testing single multi-fetch");
    Map<String, List<Integer>> topicMap = new HashMap<String, List<Integer>>() {{
        put(KafkaProperties.topic2, new ArrayList<Integer>(){{ add(0); }});
        put(KafkaProperties.topic3, new ArrayList<Integer>(){{ add(0); }});
    }};
    req = new FetchRequestBuilder()
            .clientId(KafkaProperties.clientId)
            .addFetch(KafkaProperties.topic2, 0, 0L, 100)
            .addFetch(KafkaProperties.topic3, 0, 0L, 100)
            .build();
    fetchResponse = simpleConsumer.fetch(req);
    int fetchReq = 0;
    for ( Map.Entry<String, List<Integer>> entry : topicMap.entrySet() ) {
      String topic = entry.getKey();
      for ( Integer offset : entry.getValue()) {
        System.out.println("Response from fetch request no: " + ++fetchReq);
View Full Code Here

        _input = input;
        _request = new KafkaETLRequest(input.trim());
       
        // read data from queue
        URI uri = _request.getURI();
        _consumer = new SimpleConsumer(uri.getHost(), uri.getPort(), _timeout, _bufferSize, "KafkaETLContext");
       
        // get available offset range
        _offsetRange = getOffsetRange();
        System.out.println("Connected to node " + uri
                + " beginning reading at offset " + _offsetRange[0]
View Full Code Here

 
  public static void main(String[] args)
  {
   
    generateData();
    SimpleConsumer simpleConsumer = new SimpleConsumer(KafkaProperties.kafkaServerURL,
                                                       KafkaProperties.kafkaServerPort,
                                                       KafkaProperties.connectionTimeOut,
                                                       KafkaProperties.kafkaProducerBufferSize);

    System.out.println("Testing single fetch");
    FetchRequest req = new FetchRequest(KafkaProperties.topic2, 0, 0L, 100);
    ByteBufferMessageSet messageSet = simpleConsumer.fetch(req);
    printMessages(messageSet);

    System.out.println("Testing single multi-fetch");
    req = new FetchRequest(KafkaProperties.topic2, 0, 0L, 100);
    List<FetchRequest> list = new ArrayList<FetchRequest>();
    list.add(req);
    req = new FetchRequest(KafkaProperties.topic3, 0, 0L, 100);
    list.add(req);
    MultiFetchResponse response = simpleConsumer.multifetch(list);
    int fetchReq = 0;
    for (ByteBufferMessageSet resMessageSet : response )
    {
      System.out.println("Response from fetch request no: " + ++fetchReq);
      printMessages(resMessageSet);
View Full Code Here

        if (leader == kafkaServer.getServer(0).config().brokerId()) {
            config = kafkaServer.getServer(0).config();
        } else {
            config = kafkaServer.getServer(1).config();
        }
        SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
        FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());

        List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
        assertEquals("Should have fetched 2 messages", 2, messageSet.size());

        assertEquals(new String(extractMessage(messageSet, 0)), "testMessage" + 0);
View Full Code Here

            config = kafkaServer.getServer(0).config();
        } else {
            config = kafkaServer.getServer(1).config();
        }
        // get data back from Kafka
        SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
        FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());

        List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
        assertEquals("Should have fetched 2 messages", 2, messageSet.size());

        for( int i=0; i<messageSet.size(); i++ ){
View Full Code Here

     */
    @Override
    public long getEarliestOffset() {
        if (this.earliestOffset == -2 && uri != null) {
            // TODO : Make the hardcoded paramters configurable
            SimpleConsumer consumer = new SimpleConsumer(uri.getHost(), uri.getPort(), 60000,
                    1024 * 1024, "hadoop-etl");
            Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
            offsetInfo.put(new TopicAndPartition(topic, partition), new PartitionOffsetRequestInfo(
                    kafka.api.OffsetRequest.EarliestTime(), 1));
            OffsetResponse response = consumer
                    .getOffsetsBefore(new OffsetRequest(offsetInfo, kafka.api.OffsetRequest
                            .CurrentVersion(), "hadoop-etl"));
            long[] endOffset = response.offsets(topic, partition);
            consumer.close();
            this.earliestOffset = endOffset[0];
            return endOffset[0];
        } else {
            return this.earliestOffset;
        }
View Full Code Here

TOP

Related Classes of kafka.javaapi.consumer.SimpleConsumer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.