@Override protected void closeConnections() throws Exception { if (consumer != null) { SimpleConsumer consumer = this.consumer; consumer.close(); // de-reference the consumer to avoid closing multiple times this.consumer = null; } }
public void close() { if (consumer != null) { consumer.close(); } }
@PreDestroy public void tearDown() { for (Map.Entry<HostAddress, SimpleConsumer> entry : consumerCache.asMap().entrySet()) { try { entry.getValue().close(); } catch (Exception e) { log.warn(e, "While closing consumer %s:", entry.getKey()); } } }
/** * Cancels this fetch thread. The thread will release all resources and terminate. */ public void cancel() { this.running = false; // interrupt whatever the consumer is doing if (consumer != null) { consumer.close(); } this.interrupt(); }
private void stopConsumer() { if (consumer != null) { try { consumer.close(); log.info("stop consumer[%s][%s], leaderBroker[%s]", topic, partitionId, leaderBroker); } catch (Exception e) { log.warn(e, "stop consumer[%s][%s] failed", topic, partitionId); } finally { consumer = null; } } }
@Override public void close() throws IOException { int numOfConsumersNotClosed = 0; for (SimpleConsumer consumer : this.activeConsumers.values()) { if (consumer != null) { try { consumer.close(); } catch (Exception e) { log.warn(String.format("Failed to close Kafka Consumer %s:%d", consumer.host(), consumer.port())); numOfConsumersNotClosed++; } } } this.activeConsumers.clear(); if (numOfConsumersNotClosed > 0) { throw new IOException(numOfConsumersNotClosed + " consumer(s) failed to close."); } }
@Override public void close() throws IOException { int numOfConsumersNotClosed = 0; for (SimpleConsumer consumer : this.activeConsumers.values()) { if (consumer != null) { try { consumer.close(); } catch (Exception e) { LOG.warn(String.format("Failed to close Kafka Consumer %s:%d", consumer.host(), consumer.port())); numOfConsumersNotClosed++; } } } this.activeConsumers.clear(); if (numOfConsumersNotClosed > 0) { throw new IOException(numOfConsumersNotClosed + " consumer(s) failed to close."); } } }
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, String... selectedTopics) { log.info(String.format("Fetching topic metadata from broker %s", broker)); SimpleConsumer consumer = null; try { consumer = getSimpleConsumer(broker); for (int i = 0; i < this.fetchTopicRetries; i++) { try { return consumer.send(new TopicMetadataRequest(Arrays.asList(selectedTopics))).topicsMetadata(); } catch (Exception e) { log.warn(String.format("Fetching topic metadata from broker %s has failed %d times.", broker, i + 1), e); try { Thread.sleep((long) ((i + Math.random()) * 1000)); } catch (InterruptedException e2) { log.warn("Caught InterruptedException: " + e2); } } } } finally { if (consumer != null) { consumer.close(); } } return null; }
@Override public void process() { // Connect to a random bootstrap node if (_simpleConsumer != null) { try { _simpleConsumer.close(); } catch (Exception e) { LOGGER.warn("Caught exception while closing consumer for topic {}, ignoring", _topic, e); } } int randomHostIndex = _random.nextInt(_bootstrapHosts.length); _currentHost = _bootstrapHosts[randomHostIndex]; _currentPort = _bootstrapPorts[randomHostIndex]; try { LOGGER.info("Connecting to bootstrap host {}:{} for topic {}", _currentHost, _currentPort, _topic); _simpleConsumer = _simpleConsumerFactory .buildSimpleConsumer(_currentHost, _currentPort, _socketTimeout, _bufferSize, _clientId); setCurrentState(new ConnectedToBootstrapNode()); } catch (Exception e) { handleConsumerException(e); } }
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, String... selectedTopics) { LOG.info(String.format("Fetching topic metadata from broker %s", broker)); SimpleConsumer consumer = null; try { consumer = getSimpleConsumer(broker); for (int i = 0; i < this.fetchTopicRetries; i++) { try { return consumer.send(new TopicMetadataRequest(Arrays.asList(selectedTopics))).topicsMetadata(); } catch (Exception e) { LOG.warn(String.format("Fetching topic metadata from broker %s has failed %d times.", broker, i + 1), e); try { Thread.sleep((long) ((i + Math.random()) * 1000)); } catch (InterruptedException e2) { LOG.warn("Caught InterruptedException: " + e2); } } } } finally { if (consumer != null) { consumer.close(); } } return null; }
void close() throws IOException { boolean needToCloseConsumer = _currentState.isConnectedToKafkaBroker() && _simpleConsumer != null; // Reset the state machine setCurrentState(new ConnectingToBootstrapNode()); // Close the consumer if needed if (needToCloseConsumer) { _simpleConsumer.close(); _simpleConsumer = null; } } }
@Override public Message getLastMessage(TopicPartition topicPartition) throws TException { SimpleConsumer consumer = null; try { consumer = createConsumer(topicPartition); if (consumer == null) { return null; } long lastOffset = findLastOffset(topicPartition, consumer); if (lastOffset < 1) { return null; } return getMessage(topicPartition, lastOffset, consumer); } finally { if (consumer != null) { consumer.close(); } } }
@Override public int getNumPartitions(String topic) { SimpleConsumer consumer = null; try { consumer = createConsumer( mConfig.getKafkaSeedBrokerHost(), mConfig.getKafkaSeedBrokerPort(), "partitionLookup"); List<String> topics = new ArrayList<String>(); topics.add(topic); TopicMetadataRequest request = new TopicMetadataRequest(topics); TopicMetadataResponse response = consumer.send(request); if (response.topicsMetadata().size() != 1) { throw new RuntimeException("Expected one metadata for topic " + topic + " found " + response.topicsMetadata().size()); } TopicMetadata topicMetadata = response.topicsMetadata().get(0); return topicMetadata.partitionsMetadata().size(); } finally { if (consumer != null) { consumer.close(); } } }
@Override public Message getCommittedMessage(TopicPartition topicPartition) throws Exception { SimpleConsumer consumer = null; try { long committedOffset = mZookeeperConnector.getCommittedOffsetCount(topicPartition) - 1; if (committedOffset < 0) { return null; } consumer = createConsumer(topicPartition); if (consumer == null) { return null; } return getMessage(topicPartition, committedOffset, consumer); } catch (MessageDoesNotExistException e) { // If a RuntimeEMessageDoesNotExistException exception is raised, // the message at the last comitted offset does not exist in Kafka. // This is usually due to the message being compacted away by the // Kafka log compaction process. // // That is no an exceptional situation - in fact it can be normal if // the topic being consumed by Secor has a low volume. So in that // case, simply return null LOG.warn("no committed message for topic {} partition {}", topicPartition.getTopic(), topicPartition.getPartition()); return null; } finally { if (consumer != null) { consumer.close(); } } }
@Override void process() { // If we're already connected to the leader broker, don't disconnect and reconnect LOGGER.info("Trying to fetch leader host and port: {}:{} for topic {}", _leader.host(), _leader.port(), _topic); if (_leader.host().equals(_currentHost) && _leader.port() == _currentPort) { setCurrentState(new ConnectedToPartitionLeader()); return; } // Disconnect from current broker if (_simpleConsumer != null) { try { _simpleConsumer.close(); _simpleConsumer = null; } catch (Exception e) { handleConsumerException(e); return; } } // Connect to the partition leader try { _simpleConsumer = _simpleConsumerFactory .buildSimpleConsumer(_leader.host(), _leader.port(), _socketTimeout, _bufferSize, _clientId); setCurrentState(new ConnectedToPartitionLeader()); } catch (Exception e) { handleConsumerException(e); } }
consumer.close();
private HostAndPort findLeader(TopicPartition topicPartition) { SimpleConsumer consumer = null; try { LOG.debug("looking up leader for topic {} partition {}", topicPartition.getTopic(), topicPartition.getPartition()); consumer = createConsumer( mConfig.getKafkaSeedBrokerHost(), mConfig.getKafkaSeedBrokerPort(), "leaderLookup"); List<String> topics = new ArrayList<String>(); topics.add(topicPartition.getTopic()); TopicMetadataRequest request = new TopicMetadataRequest(topics); TopicMetadataResponse response = consumer.send(request); List<TopicMetadata> metaData = response.topicsMetadata(); for (TopicMetadata item : metaData) { for (PartitionMetadata part : item.partitionsMetadata()) { if (part.partitionId() == topicPartition.getPartition()) { return HostAndPort.fromParts(part.leader().host(), part.leader().port()); } } } } finally { if (consumer != null) { consumer.close(); } } return null; }
FetchResponse response = consumer.fetch(request); if (response.hasError()) { consumer.close(); int errorCode = response.errorCode(topicPartition.getTopic(), topicPartition.getPartition());