@Override public void start() { logger.info("Starting Kafka Channel: {}", getName()); // As a migration step check if there are any offsets from the group stored in kafka // If not read them from Zookeeper and commit them to Kafka if (migrateZookeeperOffsets && zookeeperConnect != null && !zookeeperConnect.isEmpty()) { migrateOffsets(); } producer = new KafkaProducer<String, byte[]>(producerProps); // We always have just one topic being read by one thread logger.info("Topic = {}", topic.get()); counter.start(); super.start(); }
@Override public void stop() { for (ConsumerAndRecords c : consumers) { try { decommissionConsumerAndRecords(c); } catch (Exception ex) { logger.warn("Error while shutting down consumer.", ex); } } producer.close(); counter.stop(); super.stop(); logger.info("Kafka channel {} stopped.", getName()); }
/** * <p> * Initializes the channel if it is not already, then checks to see * if there is an open transaction for this thread, creating a new * one via <code>createTransaction</code> if not. * @return the current <code>Transaction</code> object for the * calling thread * </p> */ @Override public Transaction getTransaction() { if (!initialized) { synchronized (this) { if (!initialized) { initialize(); initialized = true; } } } BasicTransactionSemantics transaction = currentTransaction.get(); if (transaction == null || transaction.getState().equals( BasicTransactionSemantics.State.CLOSED)) { transaction = createTransaction(); currentTransaction.set(transaction); } return transaction; } }
@Override public synchronized void setName(String name) { channelNameDescriptor = "[channel=" + name + "]"; super.setName(name); }
@Override public synchronized void setName(String name) { channelNameDescriptor = "[channel=" + name + "]"; super.setName(name); }
@Override public synchronized void start() { channelCounter.start(); channelCounter.setChannelSize(queue.size()); channelCounter.setChannelCapacity(Long.valueOf( queue.size() + queue.remainingCapacity())); super.start(); }
@Override public synchronized void stop() { channelCounter.setChannelSize(queue.size()); channelCounter.stop(); super.stop(); }
/** * <p> * Initializes the channel if it is not already, then checks to see * if there is an open transaction for this thread, creating a new * one via <code>createTransaction</code> if not. * @return the current <code>Transaction</code> object for the * calling thread * </p> */ @Override public Transaction getTransaction() { if (!initialized) { synchronized (this) { if (!initialized) { initialize(); initialized = true; } } } BasicTransactionSemantics transaction = currentTransaction.get(); if (transaction == null || transaction.getState().equals( BasicTransactionSemantics.State.CLOSED)) { transaction = createTransaction(); currentTransaction.set(transaction); } return transaction; } }
@Override public synchronized void start() { LOG.info("Starting {}...", this); channelCounter.start(); try { Builder builder = createLogBuilder(); log = builder.build(); log.replay(); setOpen(true); int depth = getDepth(); Preconditions.checkState(queueRemaining.tryAcquire(depth), "Unable to acquire " + depth + " permits " + channelNameDescriptor); LOG.info("Queue Size after replay: " + depth + " " + channelNameDescriptor); } catch (Throwable t) { setOpen(false); channelCounter.setUnhealthy(1); startupError = t; LOG.error("Failed to start the file channel " + channelNameDescriptor, t); if (t instanceof Error) { throw (Error) t; } } if (open) { channelCounter.setChannelSize(getDepth()); channelCounter.setChannelCapacity(capacity); } super.start(); }
@Override public synchronized void stop() { LOG.info("Stopping {}...", this); startupError = null; int size = getDepth(); close(); if (!open) { channelCounter.setChannelSize(size); channelCounter.stop(); } super.stop(); }
@Override public void start() { logger.info("Starting Kafka Channel: {}", getName()); // As a migration step check if there are any offsets from the group stored in kafka // If not read them from Zookeeper and commit them to Kafka if (migrateZookeeperOffsets && zookeeperConnect != null && !zookeeperConnect.isEmpty()) { migrateOffsets(); } producer = new KafkaProducer<String, byte[]>(producerProps); // We always have just one topic being read by one thread logger.info("Topic = {}", topic.get()); counter.start(); super.start(); }
@Override public void stop() { for (ConsumerAndRecords c : consumers) { try { decommissionConsumerAndRecords(c); } catch (Exception ex) { logger.warn("Error while shutting down consumer.", ex); } } producer.close(); counter.stop(); super.stop(); logger.info("Kafka channel {} stopped.", getName()); }
@Override public void start() { try { LOGGER.info("Starting Kafka Channel: " + getName()); producer = new Producer<String, byte[]>(new ProducerConfig(kafkaConf)); // We always have just one topic being read by one thread LOGGER.info("Topic = " + topic.get()); topicCountMap.put(topic.get(), 1); counter.start(); super.start(); } catch (Exception e) { LOGGER.error("Could not start producer"); throw new FlumeException("Unable to create Kafka Connections. " + "Check whether Kafka Brokers are up and that the " + "Flume agent can connect to it.", e); } }
@Override public synchronized void stop() { LOG.info("Stopping {}...", this); startupError = null; int size = getDepth(); close(); if (!open) { channelCounter.setChannelSize(size); channelCounter.stop(); } super.stop(); }
@Override public synchronized void start() { channelCounter.start(); channelCounter.setChannelSize(queue.size()); channelCounter.setChannelCapacity(Long.valueOf( queue.size() + queue.remainingCapacity())); super.start(); }
@Override public void stop() { for (ConsumerAndIterator c : consumers) { try { decommissionConsumerAndIterator(c); } catch (Exception ex) { LOGGER.warn("Error while shutting down consumer.", ex); } } producer.close(); counter.stop(); super.stop(); LOGGER.info("Kafka channel {} stopped. Metrics: {}", getName(), counter); }
@Override public synchronized void start() { LOG.info("Starting {}...", this); channelCounter.start(); try { Builder builder = createLogBuilder(); log = builder.build(); log.replay(); setOpen(true); int depth = getDepth(); Preconditions.checkState(queueRemaining.tryAcquire(depth), "Unable to acquire " + depth + " permits " + channelNameDescriptor); LOG.info("Queue Size after replay: " + depth + " " + channelNameDescriptor); } catch (Throwable t) { setOpen(false); channelCounter.setUnhealthy(1); startupError = t; LOG.error("Failed to start the file channel " + channelNameDescriptor, t); if (t instanceof Error) { throw (Error) t; } } if (open) { channelCounter.setChannelSize(getDepth()); channelCounter.setChannelCapacity(capacity); } super.start(); }
@Override public synchronized void stop() { channelCounter.setChannelSize(queue.size()); channelCounter.stop(); super.stop(); }