@Override protected void onFinalRejection(Runnable task) { metrics.currentBlocked.dec(); } }
@Override protected void onFinalAccept(Runnable task) { metrics.currentBlocked.dec(); }
/** * Decrement the counter by one. */ public void dec() { dec(1); }
@Override public void decr(final long delta) { delegate.dec(delta); }
@Override public void decr() { delegate.dec(); }
@Override protected void onFinalAccept(Runnable task) { metrics.currentBlocked.dec(); }
@Override protected void onFinalRejection(Runnable task) { metrics.currentBlocked.dec(); } }
@Override public void close() throws IOException { super.close(); currentlyOpenEngines.dec(); }
public QueryRequest takeQuery() throws InterruptedException { QueryRequest request = blockingQueue.take(); queueTimer.update(System.currentTimeMillis() - request.queueStartTime, TimeUnit.MILLISECONDS); queueCount.dec(); return request; } }
@Override public void close() throws IOException { // This method does not throw a KafkaException if (consumer.paused().contains(topicPartition)) PAUSED_PARTITIONS.dec(); } }
@Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { activeConnections.dec(); super.channelInactive(ctx); } }
private void removeOldSSTablesSize(Iterable<SSTableReader> oldSSTables) { for (SSTableReader sstable : oldSSTables) { if (logger.isDebugEnabled()) logger.debug(String.format("removing %s from list of files tracked for %s.%s", sstable.descriptor, cfstore.keyspace.getName(), cfstore.name)); long size = sstable.bytesOnDisk(); StorageMetrics.load.dec(size); cfstore.metric.liveDiskSpaceUsed.dec(size); } }
@Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { activeConnections.dec(); super.channelInactive(ctx); } }
public void run() { try { runMayThrow(); } catch (Exception e) { throw new RuntimeException(e); } finally { StorageMetrics.totalHintsInProgress.dec(); getHintsInProgressFor(target).decrementAndGet(); } }
void close(boolean wasEnd) throws IOException { if (!closed) { mark.setEnd(wasEnd); input.close(); mark.update(stream); markDB.put(dbKey, mark); log.debug("mark.save {}:{} / {}", dbKey, mark, stream); closed = true; reading.dec(); } }
public void run() { // If we can't successfully delete the DATA component, set the task to be retried later: see above File datafile = new File(desc.filenameFor(Component.DATA)); if (!datafile.delete()) { logger.error("Unable to delete {} (it will be removed on server restart; we'll also retry after GC)", datafile); failedTasks.add(this); return; } // let the remainder be cleaned up by delete SSTable.delete(desc, Sets.difference(components, Collections.singleton(Component.DATA))); if (totalDiskSpaceUsed != null) totalDiskSpaceUsed.dec(bytesOnDisk); }
@Override public void destroyObject(ChannelFuture future) throws Exception { LOG.debug("destroying connection {}", Integer.toHexString(future.getChannel().getId())); // if an error has closed the channel already Netty incorrectly throws an // exception without this check see https://github.com/netty/netty/issues/724 if (future.getChannel().isOpen()) { future.getChannel().close(); } if (future.isSuccess()) { aliveConnections.dec(); } }
protected void startWriteHttpJsonListener(String strPort) { if (tokenAuthenticator.authRequired()) { logger.warning("Port " + strPort + " (writeHttpJson) is not compatible with HTTP authentication, ignoring"); return; } preprocessors.forPort(strPort).forReportPoint() .addFilter(new ReportPointTimestampInRangeFilter(dataBackfillCutoffHours, dataPrefillCutoffHours)); startAsManagedThread(() -> { activeListeners.inc(); try { org.eclipse.jetty.server.Server server = new org.eclipse.jetty.server.Server(Integer.parseInt(strPort)); server.setHandler(new WriteHttpJsonMetricsEndpoint(strPort, hostname, prefix, pushValidationLevel, pushBlockedSamples, getFlushTasks(strPort), preprocessors.forPort(strPort))); server.start(); server.join(); } catch (InterruptedException e) { logger.warning("WriteHttpJson server interrupted."); } catch (Exception e) { if (e instanceof BindException) { bindErrors.inc(); logger.severe("Unable to start listener - port " + String.valueOf(strPort) + " is already in use!"); } else { logger.log(Level.SEVERE, "WriteHttpJson exception", e); } } finally { activeListeners.dec(); } }, "listener-plaintext-writehttpjson-" + strPort); }
protected void startJsonListener(String strPort) { if (tokenAuthenticator.authRequired()) { logger.warning("Port " + strPort + " (jsonListener) is not compatible with HTTP authentication, ignoring"); return; } preprocessors.forPort(strPort).forReportPoint() .addFilter(new ReportPointTimestampInRangeFilter(dataBackfillCutoffHours, dataPrefillCutoffHours)); startAsManagedThread(() -> { activeListeners.inc(); try { org.eclipse.jetty.server.Server server = new org.eclipse.jetty.server.Server(Integer.parseInt(strPort)); server.setHandler(new JsonMetricsEndpoint(strPort, hostname, prefix, pushValidationLevel, pushBlockedSamples, getFlushTasks(strPort), preprocessors.forPort(strPort))); server.start(); server.join(); } catch (InterruptedException e) { logger.warning("Http Json server interrupted."); } catch (Exception e) { if (e instanceof BindException) { bindErrors.inc(); logger.severe("Unable to start listener - port " + String.valueOf(strPort) + " is already in use!"); } else { logger.log(Level.SEVERE, "HttpJson exception", e); } } finally { activeListeners.dec(); } }, "listener-plaintext-json-" + strPort); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { final StatusExposingServletResponse wrappedResponse = new StatusExposingServletResponse((HttpServletResponse) response); activeRequests.inc(); final TimerContext context = requestTimer.time(); try { chain.doFilter(request, wrappedResponse); } finally { context.stop(); activeRequests.dec(); markMeterForStatusCode(wrappedResponse.getStatus()); } }