public void closeConnection() throws InterruptedException { LOG.info("Closing connection to EndPoint : {}", endPoint); try { timedCall(new CallRunner1<Void>() { @Override public Void call() { connection.close(); // could block return null; } }); sinkCounter.incrementConnectionClosedCount(); } catch (Exception e) { LOG.warn("Error closing connection to EndPoint : " + endPoint, e); // Suppressing exceptions as we don't care for errors on connection close } }
/** * Locate all writers past idle timeout and retire them * @return number of writers retired */ private int closeIdleWriters() throws InterruptedException { int count = 0; long now = System.currentTimeMillis(); ArrayList<HiveEndPoint> retirees = Lists.newArrayList(); //1) Find retirement candidates for (Entry<HiveEndPoint,HiveWriter> entry : allWriters.entrySet()) { if (now - entry.getValue().getLastUsed() > idleTimeout) { ++count; retirees.add(entry.getKey()); } } //2) Retire them for (HiveEndPoint ep : retirees) { sinkCounter.incrementConnectionClosedCount(); LOG.info(getName() + ": Closing idle Writer to Hive end point : {}", ep); allWriters.remove(ep).close(); } return count; }
@Override public void stop() { logger.info("ElasticSearch sink {} stopping"); if (client != null) { client.close(); } sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); super.stop(); }
try { callWithTimeout(createCloseCallRunner()); sinkCounter.incrementConnectionClosedCount(); } catch (InterruptedException | IOException e) { LOG.warn("Closing file: " + path + " failed. Will " +
private void destroyConnection() { if (client != null) { logger.debug("Rpc sink {} closing Rpc client: {}", getName(), client); try { client.close(); sinkCounter.incrementConnectionClosedCount(); } catch (FlumeException e) { sinkCounter.incrementConnectionFailedCount(); logger.error("Rpc sink " + getName() + ": Attempt to close Rpc " + "client failed. Exception follows.", e); } } client = null; }
/** * Stops the grid. */ @Override public synchronized void stop() { if (ignite != null) ignite.close(); sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); super.stop(); }
@Override public synchronized void stop() { producer.shutdown(); sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); super.stop(); }
@Override public void stop() { serializer.cleanUp(); if (client != null) { shutdownHBaseClient(); } sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); try { if (sinkCallbackPool != null) { sinkCallbackPool.shutdown(); if (!sinkCallbackPool.awaitTermination(5, TimeUnit.SECONDS)) { sinkCallbackPool.shutdownNow(); } } } catch (InterruptedException e) { logger.error("Interrupted while waiting for asynchbase sink pool to " + "die", e); if (sinkCallbackPool != null) { sinkCallbackPool.shutdownNow(); } } sinkCallbackPool = null; client = null; conf = null; open = false; super.stop(); }
@Override public void stop() { try { if (table != null) { table.close(); } table = null; } catch (IOException e) { throw new FlumeException("Error closing table.", e); } sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); }
try { sinkCounter.incrementConnectionClosedCount(); } catch (IOException ex) { LOG.warn("failed to close() HDFSWriter for file ("
serializer.beforeClose(); outputStream.close(); sinkCounter.incrementConnectionClosedCount(); } catch (IOException e) { sinkCounter.incrementConnectionFailedCount();
@Override public void stop() { try { if (table != null) { table.close(); } table = null; } catch (IOException e) { throw new FlumeException("Error closing table.", e); } try { if (conn != null) { conn.close(); } conn = null; } catch (IOException e) { throw new FlumeException("Error closing connection.", e); } sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); }
@Override public void start() { ElasticSearchClientFactory clientFactory = new ElasticSearchClientFactory(); logger.info("ElasticSearch sink {} started"); sinkCounter.start(); try { if (isLocal) { client = clientFactory.getLocalClient( clientType, eventSerializer, indexRequestFactory); } else { client = clientFactory.getClient(clientType, serverAddresses, clusterName, eventSerializer, indexRequestFactory); client.configure(elasticSearchClientContext); } sinkCounter.incrementConnectionCreatedCount(); } catch (Exception ex) { ex.printStackTrace(); sinkCounter.incrementConnectionFailedCount(); if (client != null) { client.close(); sinkCounter.incrementConnectionClosedCount(); } } super.start(); }
serializer.beforeClose(); outputStream.close(); sinkCounter.incrementConnectionClosedCount(); shouldRotate = false; } catch (IOException e) {
@Override public void stop(){ super.stop(); try { serializer.close(); } catch (SQLException e) { logger.error(" Error while closing connection {} for sink {} ",e.getMessage(),this.getName()); } sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); }
@Override public void stop(){ super.stop(); try { serializer.close(); } catch (SQLException e) { logger.error(" Error while closing connection {} for sink {} ",e.getMessage(),this.getName()); } sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); }
@Override public void stop() { logger.info("ElasticSearch sink {} stopping"); if (client != null) { client.close(); } sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); super.stop(); }
@Override public void stop() { logger.info("ElasticSearch sink {} stopping"); if (client != null) { client.close(); } sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); super.stop(); }
private void destroyConnection() { if (client != null) { logger.debug("Rpc sink {} closing Rpc client: {}", getName(), client); try { client.close(); sinkCounter.incrementConnectionClosedCount(); } catch (FlumeException e) { sinkCounter.incrementConnectionFailedCount(); logger.error("Rpc sink " + getName() + ": Attempt to close Rpc " + "client failed. Exception follows.", e); } } client = null; }
@Override public void stop() { try { if (table != null) { table.close(); } table = null; } catch (IOException e) { throw new FlumeException("Error closing table.", e); } sinkCounter.incrementConnectionClosedCount(); sinkCounter.stop(); }