@Override public final void start() { LOG.info("Starting HttpSink"); sinkCounter.start(); }
@Override public synchronized void start() { this.lastRolledMillis = System.currentTimeMillis(); counter.start(); // signal that this sink is ready to process LOG.info("Started DatasetSink " + getName()); super.start(); }
@Override public void start() { String timeoutName = "hdfs-" + getName() + "-call-runner-%d"; callTimeoutPool = Executors.newFixedThreadPool(threadsPoolSize, new ThreadFactoryBuilder().setNameFormat(timeoutName).build()); String rollerName = "hdfs-" + getName() + "-roll-timer-%d"; timedRollerPool = Executors.newScheduledThreadPool(rollTimerPoolSize, new ThreadFactoryBuilder().setNameFormat(rollerName).build()); this.sfWriters = new WriterLinkedHashMap(maxOpenFiles); sinkCounter.start(); super.start(); }
@Override public void start() { String timeoutName = "hive-" + getName() + "-call-runner-%d"; // call timeout pool needs only 1 thd as sink is effectively single threaded callTimeoutPool = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setNameFormat(timeoutName).build()); this.allWriters = Maps.newHashMap(); sinkCounter.start(); super.start(); setupHeartBeatTimer(); LOG.info(getName() + ": Hive Sink {} started", getName() ); }
@Override public void start() { logger.info("Starting {}...", this); sinkCounter.start(); super.start();
@Override public void start() { Preconditions.checkArgument(client == null, "Please call stop " + "before calling start on an old instance."); sinkCounter.start(); sinkCounter.incrementConnectionCreatedCount(); client = initHBaseClient(); super.start(); }
@Override public synchronized void start() { LOGGER.info("Starting Morphline Sink {} ...", this); sinkCounter.start(); if (handler == null) { MorphlineHandler tmpHandler; try { tmpHandler = (MorphlineHandler) Class.forName(handlerClass).newInstance(); } catch (Exception e) { throw new ConfigurationException(e); } tmpHandler.configure(context); handler = tmpHandler; } super.start(); LOGGER.info("Morphline Sink {} started.", getName()); }
sinkCounter.start(); started = true; super.start();
/** * The start() of RpcSink is more of an optimization that allows connection * to be created before the process() loop is started. In case it so happens * that the start failed, the process() loop will itself attempt to reconnect * as necessary. This is the expected behavior since it is possible that the * downstream source becomes unavailable in the middle of the process loop * and the sink will have to retry the connection again. */ @Override public void start() { logger.info("Starting {}...", this); sinkCounter.start(); try { createConnection(); } catch (FlumeException e) { logger.warn("Unable to create Rpc client using hostname: " + hostname + ", port: " + port, e); /* Try to prevent leaking resources. */ destroyConnection(); } super.start(); logger.info("Rpc sink {} started.", getName()); }
/** * Starts a grid and initializes an event transformer. */ @SuppressWarnings("unchecked") @Override public synchronized void start() { A.notNull(springCfgPath, "Ignite config file"); A.notNull(cacheName, "Cache name"); A.notNull(eventTransformerCls, "Event transformer class"); sinkCounter.start(); try { if (ignite == null) ignite = Ignition.start(springCfgPath); if (eventTransformerCls != null && !eventTransformerCls.isEmpty()) { Class<? extends EventTransformer> clazz = (Class<? extends EventTransformer<Event, Object, Object>>)Class.forName(eventTransformerCls); eventTransformer = clazz.newInstance(); } } catch (Exception e) { log.error("Failed to start grid", e); sinkCounter.incrementConnectionFailedCount(); throw new FlumeException("Failed to start grid", e); } sinkCounter.incrementConnectionCreatedCount(); super.start(); }
@Override public synchronized void start() { producer = new DefaultMQProducer(producerGroup); producer.setNamesrvAddr(nameServer); try { producer.start(); } catch (MQClientException e) { sinkCounter.incrementConnectionFailedCount(); log.error("RocketMQ producer start failed", e); throw new FlumeException("Failed to start RocketMQ producer", e); } sinkCounter.incrementConnectionCreatedCount(); sinkCounter.start(); super.start(); }
@Override public void start() { ElasticSearchClientFactory clientFactory = new ElasticSearchClientFactory(); logger.info("ElasticSearch sink {} started"); sinkCounter.start(); try { if (isLocal) { client = clientFactory.getLocalClient( clientType, eventSerializer, indexRequestFactory); } else { client = clientFactory.getClient(clientType, serverAddresses, clusterName, eventSerializer, indexRequestFactory); client.configure(elasticSearchClientContext); } sinkCounter.incrementConnectionCreatedCount(); } catch (Exception ex) { ex.printStackTrace(); sinkCounter.incrementConnectionFailedCount(); if (client != null) { client.close(); sinkCounter.incrementConnectionClosedCount(); } } super.start(); }
sinkCounter.start();
sinkCounter.start();
@Override public void start() { logger.info("Starting sink {} ",this.getName()); sinkCounter.start(); try { serializer.initialize(); sinkCounter.incrementConnectionCreatedCount(); } catch(Exception ex) { sinkCounter.incrementConnectionFailedCount(); logger.error("Error {} in initializing the serializer.",ex.getMessage()); Throwables.propagate(ex); } super.start(); }
@Override public void start() { logger.info("Starting sink {} ",this.getName()); sinkCounter.start(); try { serializer.initialize(); sinkCounter.incrementConnectionCreatedCount(); } catch(Exception ex) { sinkCounter.incrementConnectionFailedCount(); logger.error("Error {} in initializing the serializer.",ex.getMessage()); Throwables.propagate(ex); } super.start(); }
@Override public final void start() { LOG.info("Starting HttpSink"); sinkCounter.start(); }
@Override public synchronized void start() { this.lastRolledMs = System.currentTimeMillis(); counter.start(); // signal that this sink is ready to process LOG.info("Started DatasetSink " + getName()); super.start(); }
@Override public void start() { String timeoutName = "hdfs-" + getName() + "-call-runner-%d"; callTimeoutPool = Executors.newFixedThreadPool(threadsPoolSize, new ThreadFactoryBuilder().setNameFormat(timeoutName).build()); String rollerName = "hdfs-" + getName() + "-roll-timer-%d"; timedRollerPool = Executors.newScheduledThreadPool(rollTimerPoolSize, new ThreadFactoryBuilder().setNameFormat(rollerName).build()); this.sfWriters = new WriterLinkedHashMap(maxOpenFiles); sinkCounter.start(); super.start(); }
@Override public void start() { String timeoutName = "hive-" + getName() + "-call-runner-%d"; // call timeout pool needs only 1 thd as sink is effectively single threaded callTimeoutPool = Executors.newFixedThreadPool(1, new ThreadFactoryBuilder().setNameFormat(timeoutName).build()); this.allWriters = Maps.newHashMap(); sinkCounter.start(); super.start(); setupHeartBeatTimer(); LOG.info(getName() + ": Hive Sink {} started", getName() ); }