Package kafka.javaapi.consumer

Examples of kafka.javaapi.consumer.SimpleConsumer


    /* (non-Javadoc)
     * @see com.linkedin.camus.etl.kafka.common.CamusRequest#getLastOffset(long)
     */
    @Override
    public long getLastOffset(long time) {
        SimpleConsumer consumer = new SimpleConsumer(uri.getHost(), uri.getPort(), 60000,
                1024 * 1024, "hadoop-etl");
        Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
        offsetInfo.put(new TopicAndPartition(topic, partition), new PartitionOffsetRequestInfo(
                time, 1));
        OffsetResponse response = consumer.getOffsetsBefore(new OffsetRequest(offsetInfo,
                kafka.api.OffsetRequest.CurrentVersion(),"hadoop-etl"));
        long[] endOffset = response.offsets(topic, partition);
        consumer.close();
        if(endOffset.length == 0)
        {
          log.info("The exception is thrown because the latest offset retunred zero for topic : " + topic + " and partition " + partition);
        }
        this.latestOffset = endOffset[0];
View Full Code Here


    boolean fetchMetaDataSucceeded = false;
    int i = 0;
    List<TopicMetadata> topicMetadataList = null;
    Exception savedException = null;
    while (i < brokers.size() && !fetchMetaDataSucceeded) {
      SimpleConsumer consumer = createConsumer(context, brokers.get(i));
      log.info(String.format("Fetching metadata from broker %s with client id %s for %d topic(s) %s",
      brokers.get(i), consumer.clientId(), metaRequestTopics.size(), metaRequestTopics));
      try {
        topicMetadataList = consumer.send(new TopicMetadataRequest(metaRequestTopics)).topicsMetadata();
        fetchMetaDataSucceeded = true;
      } catch (Exception e) {
        savedException = e;
        log.warn(String.format("Fetching topic metadata with client id %s for topics [%s] from broker [%s] failed",
          consumer.clientId(), metaRequestTopics, brokers.get(i)), e);
      } finally {
        consumer.close();
        i++;
      }
    }
    if (!fetchMetaDataSucceeded) {
      throw new RuntimeException("Failed to obtain metadata!", savedException);
View Full Code Here

  private SimpleConsumer createConsumer(JobContext context, String broker) {
    if (!broker.matches(".+:\\d+"))
      throw new InvalidParameterException("The kakfa broker " + broker + " must follow address:port pattern");
    String[] hostPort = broker.split(":");
    SimpleConsumer consumer = new SimpleConsumer(
      hostPort[0],
      Integer.valueOf(hostPort[1]),
      CamusJob.getKafkaTimeoutValue(context),
      CamusJob.getKafkaBufferSize(context),
      CamusJob.getKafkaClientName(context));
View Full Code Here

  public ArrayList<CamusRequest> fetchLatestOffsetAndCreateEtlRequests(
      JobContext context,
      HashMap<LeaderInfo, ArrayList<TopicAndPartition>> offsetRequestInfo) {
    ArrayList<CamusRequest> finalRequests = new ArrayList<CamusRequest>();
    for (LeaderInfo leader : offsetRequestInfo.keySet()) {
      SimpleConsumer consumer = new SimpleConsumer(leader.getUri()
          .getHost(), leader.getUri().getPort(),
          CamusJob.getKafkaTimeoutValue(context),
          CamusJob.getKafkaBufferSize(context),
          CamusJob.getKafkaClientName(context));
      // Latest Offset
      PartitionOffsetRequestInfo partitionLatestOffsetRequestInfo = new PartitionOffsetRequestInfo(
          kafka.api.OffsetRequest.LatestTime(), 1);
      // Earliest Offset
      PartitionOffsetRequestInfo partitionEarliestOffsetRequestInfo = new PartitionOffsetRequestInfo(
          kafka.api.OffsetRequest.EarliestTime(), 1);
      Map<TopicAndPartition, PartitionOffsetRequestInfo> latestOffsetInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
      Map<TopicAndPartition, PartitionOffsetRequestInfo> earliestOffsetInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
      ArrayList<TopicAndPartition> topicAndPartitions = offsetRequestInfo
          .get(leader);
      for (TopicAndPartition topicAndPartition : topicAndPartitions) {
        latestOffsetInfo.put(topicAndPartition,
            partitionLatestOffsetRequestInfo);
        earliestOffsetInfo.put(topicAndPartition,
            partitionEarliestOffsetRequestInfo);
      }

      OffsetResponse latestOffsetResponse = consumer
          .getOffsetsBefore(new OffsetRequest(latestOffsetInfo,
              kafka.api.OffsetRequest.CurrentVersion(), CamusJob
                  .getKafkaClientName(context)));
      OffsetResponse earliestOffsetResponse = consumer
          .getOffsetsBefore(new OffsetRequest(earliestOffsetInfo,
              kafka.api.OffsetRequest.CurrentVersion(), CamusJob
                  .getKafkaClientName(context)));
      consumer.close();
      for (TopicAndPartition topicAndPartition : topicAndPartitions) {
        long latestOffset = latestOffsetResponse.offsets(
            topicAndPartition.topic(),
            topicAndPartition.partition())[0];
        long earliestOffset = earliestOffsetResponse.offsets(
View Full Code Here

    totalFetchTime = 0;

    // read data from queue

    URI uri = kafkaRequest.getURI();
    simpleConsumer = new SimpleConsumer(uri.getHost(), uri.getPort(),
        CamusJob.getKafkaTimeoutValue(context),
        CamusJob.getKafkaBufferSize(context),
        CamusJob.getKafkaClientName(context));
    log.info("Connected to leader " + uri
        + " beginning reading at offset " + beginOffset
View Full Code Here

        partition = this.split.getPartition();
        watermark = this.split.getWatermark();

        int timeout = conf.getInt("kafka.socket.timeout.ms", 30000);
        int bufferSize = conf.getInt("kafka.socket.buffersize", 64*1024);
        consumer =  new SimpleConsumer(this.split.getBrokerHost(), this.split.getBrokerPort(), timeout, bufferSize);

        fetchSize = conf.getInt("kafka.fetch.size", 1024 * 1024);
        reset = conf.get("kafka.watermark.reset", "watermark");
        earliestOffset = getEarliestOffset();
        latestOffset = getLatestOffset();
View Full Code Here

    }
  }

  private PartitionMetadata fetchPartitonMetadata() {
    for (KafkaHost broker : replicaBrokers) {
      SimpleConsumer consumer = new SimpleConsumer(broker.getHostname(), broker.getPort(), TIMEOUT_MS,
                                                   BUFFER_SIZE_BYTES, clientName);
      try {
        List<String> topics = ImmutableList.of(topic);
        TopicMetadataRequest req = new TopicMetadataRequest(topics);
        TopicMetadataResponse resp = consumer.send(req);

        List<TopicMetadata> topicMetadataList = resp.topicsMetadata();
        for (TopicMetadata item : topicMetadataList) {
          for (PartitionMetadata part : item.partitionsMetadata()) {
            if (part.partitionId() == partition) {
              return part;
            }
          }
        }
      } finally {
        consumer.close();
      }
    }
    return null;
  }
View Full Code Here

    if (metadata.leader() == null) {
      LOG.warn("Can't find leader for topic {} and partition {} with brokers {}.",
               topic, partition, replicaBrokers, ErrorMapping.exceptionFor(metadata.errorCode()));
      throw new RuntimeException(ErrorMapping.exceptionFor(metadata.errorCode()));
    }
    consumer = new SimpleConsumer(metadata.leader().host(), metadata.leader().port(), TIMEOUT_MS, BUFFER_SIZE_BYTES,
                                  clientName);
    saveReplicaBrokers(metadata);
  }
View Full Code Here

            partitionsPerHost = hosts.partitionsPerHost;           
        }
       
  
        public Map emitPartitionBatchNew(TransactionAttempt attempt, TridentCollector collector, int partition, Map lastMeta) {
            SimpleConsumer consumer = _connections.getConsumer(partition);

            return KafkaUtils.emitPartitionBatchNew(_config, partition, consumer, attempt, collector, lastMeta, _topologyInstanceId);
        }
View Full Code Here

      
        public void emitPartitionBatch(TransactionAttempt attempt, TridentCollector collector, int partition, Map meta) {
            String instanceId = (String) meta.get("instanceId");
            if(!_config.forceFromStart || instanceId.equals(_topologyInstanceId)) {
                SimpleConsumer consumer = _connections.getConsumer(partition);
                long offset = (Long) meta.get("offset");
                long nextOffset = (Long) meta.get("nextOffset");
                ByteBufferMessageSet msgs = consumer.fetch(new FetchRequest(_config.topic, partition % partitionsPerHost, offset, _config.fetchSizeBytes));
                for(MessageAndOffset msg: msgs) {
                    if(offset == nextOffset) break;
                    if(offset > nextOffset) {
                        throw new RuntimeException("Error when re-emitting batch. overshot the end offset");
                    }
View Full Code Here

TOP

Related Classes of kafka.javaapi.consumer.SimpleConsumer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.