/** * Flushes and closes the bulk processor to further writes. */ @Override public void close() { try { if (!hadWriteErrors) { flush(); } else { if (LOG.isDebugEnabled()) { LOG.debug("Dirty close; ignoring last existing write batch..."); } } if (requiresRefreshAfterBulk && executedBulkWrite) { // refresh batch restClient.refresh(resource); if (LOG.isDebugEnabled()) { LOG.debug(String.format("Refreshing index [%s]", resource)); } } } finally { for (IBulkWriteErrorHandler handler : documentBulkErrorHandlers) { handler.close(); } } }
@Override public void close() { if (isOpen()) { // TODO: look at collecting these stats some other way later. if (clientSettings.getBatchRefreshAfterWrite()) { writeClient.getRestClient().refresh(endpoint); } writeClient.close(); } } }
flush(); client.refresh(resources.getResourceWrite());
/** * Flushes and closes the bulk processor to further writes. */ @Override public void close() { try { if (!hadWriteErrors) { flush(); } else { if (LOG.isDebugEnabled()) { LOG.debug("Dirty close; ignoring last existing write batch..."); } } if (requiresRefreshAfterBulk && executedBulkWrite) { // refresh batch restClient.refresh(resource); if (LOG.isDebugEnabled()) { LOG.debug(String.format("Refreshing index [%s]", resource)); } } } finally { for (IBulkWriteErrorHandler handler : documentBulkErrorHandlers) { handler.close(); } } }
/** * Flushes and closes the bulk processor to further writes. */ @Override public void close() { try { if (!hadWriteErrors) { flush(); } else { if (LOG.isDebugEnabled()) { LOG.debug("Dirty close; ignoring last existing write batch..."); } } if (requiresRefreshAfterBulk && executedBulkWrite) { // refresh batch restClient.refresh(resource); if (LOG.isDebugEnabled()) { LOG.debug(String.format("Refreshing index [%s]", resource)); } } } finally { for (IBulkWriteErrorHandler handler : documentBulkErrorHandlers) { handler.close(); } } }
/** * Flushes and closes the bulk processor to further writes. */ @Override public void close() { try { if (!hadWriteErrors) { flush(); } else { if (LOG.isDebugEnabled()) { LOG.debug("Dirty close; ignoring last existing write batch..."); } } if (requiresRefreshAfterBulk && executedBulkWrite) { // refresh batch restClient.refresh(resource); if (LOG.isDebugEnabled()) { LOG.debug(String.format("Refreshing index [%s]", resource)); } } } finally { for (IBulkWriteErrorHandler handler : documentBulkErrorHandlers) { handler.close(); } } }
@Override public void close() { if (log.isDebugEnabled()) { log.debug("Closing repository and connection to Elasticsearch ..."); } // bail out if closed before if (client == null) { return; } try { if (!hadWriteErrors) { flush(); } else { if (log.isDebugEnabled()) { log.debug("Dirty close; ignoring last existing write batch..."); } } if (requiresRefreshAfterBulk && executedBulkWrite) { // refresh batch client.refresh(resourceW); if (log.isDebugEnabled()) { log.debug(String.format("Refreshing index [%s]", resourceW)); } } } finally { client.close(); stats.aggregate(client.stats()); client = null; } }
@Override public void close() { if (isOpen()) { // TODO: look at collecting these stats some other way later. if (clientSettings.getBatchRefreshAfterWrite()) { writeClient.getRestClient().refresh(endpoint); } writeClient.close(); } } }
@Override public void close() { if (isOpen()) { // TODO: look at collecting these stats some other way later. if (clientSettings.getBatchRefreshAfterWrite()) { writeClient.getRestClient().refresh(endpoint); } writeClient.close(); } } }
@Override public void close() { if (isOpen()) { // TODO: look at collecting these stats some other way later. if (clientSettings.getBatchRefreshAfterWrite()) { writeClient.getRestClient().refresh(endpoint); } writeClient.close(); } } }
flush(); client.refresh(resources.getResourceWrite());
flush(); client.refresh(resources.getResourceWrite());
flush(); client.refresh(resources.getResourceWrite());
client.refresh(resourceW); } finally { stats.aggregate(sq.stats());