private List<ServerAddress> getServerAddressForReplica(DBObject item) { String definition = item.get("host").toString(); if (definition.contains("/")) { definition = definition.substring(definition.indexOf("/") + 1); } if (logger.isDebugEnabled()) { logger.debug("getServerAddressForReplica - definition: {}", definition); } List<ServerAddress> servers = new ArrayList<ServerAddress>(); for (String server : definition.split(",")) { servers.add(new ServerAddress(server)); } return servers; }
private void checkBulkProcessorAvailability() { while (!isBulkProcessorAvailable()) { try { if (logger.isDebugEnabled()) { logger.debug("Waiting for bulk queue to empty..."); } Thread.sleep(2000); } catch (InterruptedException e) { logger.warn("checkIndexStatistics interrupted", e); } } }
@Override protected void handleRequest(RestRequest request, RestChannel channel, Client esClient) throws Exception { logger.debug("uri: {}", request.uri()); logger.debug("action: {}", request.param("action")); if (request.path().endsWith("list")) { list(request, channel, esClient); return; } else if (request.path().endsWith("start")) { start(request, channel, esClient); return; } else if (request.path().endsWith("stop")) { stop(request, channel, esClient); return; } else if (request.path().endsWith("delete")) { delete(request, channel, esClient); return; } respondError(request, channel, "action not found: " + request.uri(), RestStatus.OK); }
logger.debug("Encountered oplog entry with a:false, ts:" + item.get("ts")); break; logger.debug("Before waiting for 500 ms"); Thread.sleep(500); } finally { } catch (MongoSocketException | MongoTimeoutException | MongoCursorNotFoundException e) { logger.info("Oplog tailing - {} - {}. Will retry.", e.getClass().getSimpleName(), e.getMessage()); logger.debug("Total documents inserted so far by river {}: {}", definition.getRiverName(), totalDocuments.get()); try { Thread.sleep(MongoDBRiver.MONGODB_RETRY_ERROR_DELAY_MS);
} catch (Exception e) { logger.error("Exception in initial import", e); logger.debug("Total documents inserted so far: {}", totalDocuments.get()); Thread.currentThread().interrupt();
@Override public void run() { while (true) { try { Status status = MongoDBRiverHelper.getRiverStatus(this.mongoDBRiver.esClient, this.definition.getRiverName()); if (status != this.context.getStatus()) { if (status == Status.RUNNING && this.context.getStatus() != Status.STARTING) { logger.trace("About to start river: {}", this.definition.getRiverName()); mongoDBRiver.internalStartRiver(); } else if (status == Status.STOPPED) { logger.info("About to stop river: {}", this.definition.getRiverName()); mongoDBRiver.internalStopRiver(); } } Thread.sleep(1000L); } catch (InterruptedException e) { logger.debug("Status thread interrupted", e, (Object) null); Thread.currentThread().interrupt(); break; } } } }
public Indexer(MongoDBRiver river) { super(river); this.river = river; this.definition = river.definition; this.context = river.context; this.esClient = river.esClient; this.scriptService = river.scriptService; logger.debug( "Create bulk processor with parameters - bulk actions: {} - concurrent request: {} - flush interval: {} - bulk size: {}", definition.getBulk().getBulkActions(), definition.getBulk().getConcurrentRequests(), definition.getBulk() .getFlushInterval(), definition.getBulk().getBulkSize()); getBulkProcessor(definition.getIndexName(), definition.getTypeName()); }
lastId = addInsertToStream(null, applyFieldFilter(object), collection.getName()); } else { logger.debug("Last entry for initial import of {} - add timestamp: {}", collection.getFullName(), timestamp); lastId = addInsertToStream(timestamp, applyFieldFilter(object), collection.getName()); lastId = addInsertToStream(null, file); } else { logger.debug("Last entry for initial import of {} - add timestamp: {}", collection.getFullName(), timestamp); lastId = addInsertToStream(timestamp, file); logger.debug("Total documents inserted so far: {}", totalDocuments.get()); Thread.sleep(MongoDBRiver.MONGODB_RETRY_ERROR_DELAY_MS); } finally {
@Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { logger.debug("Cluster updated"); sendInitialStateEventIfNeeded(); }
@Override public void removedService(ServiceReference<CuratorFramework> reference, CuratorFramework service) { logger.debug("CuratorFramework lost, closing group"); try { singleton.close(); } catch (IOException e) { LOG.error("Error stopping group", e); } context.ungetService(reference); }
private void sendInitialStateEventIfNeeded() { if (initialStateSent.compareAndSet(false, true)) { logger.debug("Sending initial state event"); for (InitialStateDiscoveryListener listener : initialStateListeners) { listener.initialStateProcessed(); } } }
@Override public void onFailure(final Throwable e) { if (e instanceof IndexMissingException) { logger.debug( "Try to refresh security configuration but it failed due to {} - This might be ok if security setup not complete yet.", e.toString()); } else { logger.error("Try to refresh security configuration but it failed due to {}", e, e.toString()); } } });
@Override public void publish(ClusterState clusterState, AckListener ackListener) { logger.debug("Publishing cluster state"); if (!singleton.isMaster()) { throw new ElasticsearchIllegalStateException("Shouldn't publish state when not master"); } latestDiscoNodes = clusterState.nodes(); publishClusterState.publish(clusterState, ackListener); logger.debug("Cluster state published"); }
@Override public void onResponse(SearchResponse response) { logger.debug("[{}] documents match query", response.getHits().getTotalHits()); onScrollResponse(timeValueSeconds(0), response); }
@Override public void onResponse(final GetResponse response) { if (response.isExists() && !response.isSourceEmpty()) { securityConfiguration = response.getSourceAsBytesRef(); latch.countDown(); logger.debug("Security configuration reloaded"); } }
@Override public User authenticate(final RestRequest request, final RestChannel channel, final AuthenticationBackend backend, final Authorizator authorizator) throws AuthException { final User authenticatedUser = backend .authenticate(new AuthCredentials(HTTPUnauthenticatedAuthenticator.UNAUTHENTICATED_USER, null)); authorizator.fillRoles(authenticatedUser, new AuthCredentials(authenticatedUser.getName(), null)); log.debug("User '{}' is authenticated", authenticatedUser); return authenticatedUser; }
@Override public void onResponse(final IndexResponse response) { if (logger.isDebugEnabled()) { logger.debug( "Response: {}/{}/{}, Created: {}, Version: {}", response.getIndex(), response.getType(), response.getId(), response.isCreated(), response.getVersion()); } countDown(); }