@Override public void close() { logger.info("Closing river"); // Stop the status thread completely, it will be re-started by #start() if (statusThread != null) { statusThread.interrupt(); statusThread = null; } // Cleanup the other parts (the status thread is gone, and can't do that for us anymore) internalStopRiver(); }
/** * Execute actions to stop this river. * * The status thread will not be touched, and the river can be restarted by setting its status again * to {@link Status#RUNNING}. */ void internalStopRiver() { logger.info("Stopping"); try { if (startupThread != null) { startupThread.interrupt(); startupThread = null; } for (Thread thread : tailerThreads) { thread.interrupt(); thread = null; } tailerThreads.clear(); if (indexerThread != null) { indexerThread.interrupt(); indexerThread = null; } logger.info("Stopped"); } catch (Throwable t) { logger.error("Failed to stop", t); } finally { this.context.setStatus(Status.STOPPED); } }
private XContentBuilder getGridFSMapping() throws IOException { XContentBuilder mapping = jsonBuilder() .startObject() .startObject(definition.getTypeName()) .startObject("properties") .startObject("content").field("type", "attachment").endObject() .startObject("filename").field("type", "string").endObject() .startObject("contentType").field("type", "string").endObject() .startObject("md5").field("type", "string").endObject() .startObject("length").field("type", "long").endObject() .startObject("chunkSize").field("type", "long").endObject() .endObject() .endObject() .endObject(); logger.info("GridFS Mapping: {}", mapping.string()); return mapping; }
public static void setRiverStatus(Client client, String riverName, Status status) { logger.info("setRiverStatus called with {} - {}", riverName, status); XContentBuilder xb; try { xb = jsonBuilder().startObject().startObject(MongoDBRiver.TYPE).field(MongoDBRiver.STATUS_FIELD, status).endObject() .endObject(); client.prepareIndex("_river", riverName, MongoDBRiver.STATUS_ID).setSource(xb).get(); } catch (IOException ioEx) { logger.error("setRiverStatus failed for river {}", ioEx, riverName); } }
private void delete(RestRequest request, RestChannel channel, Client esClient) { String river = request.param("river"); if (river == null || river.isEmpty()) { respondError(request, channel, "Parameter 'river' is required", RestStatus.BAD_REQUEST); return; } logger.info("Delete river: {}", river); if (esClient.admin().indices().prepareTypesExists(riverIndexName).setTypes(river).get().isExists()) { esClient.admin().indices().prepareDeleteMapping(riverIndexName).setType(river).get(); } respondSuccess(request, channel, RestStatus.OK); }
private void addToStream(final Operation operation, final Timestamp<?> currentTimestamp, final DBObject data, final String collection) throws InterruptedException { if (logger.isTraceEnabled()) { String dataString = data.toString(); if (dataString.length() > 400) { logger.trace("addToStream - operation [{}], currentTimestamp [{}], data (_id:[{}], serialized length:{}), collection [{}]", operation, currentTimestamp, data.get("_id"), dataString.length(), collection); } else { logger.trace("addToStream - operation [{}], currentTimestamp [{}], data [{}], collection [{}]", operation, currentTimestamp, dataString, collection); } } if (operation == Operation.DROP_DATABASE) { logger.info("addToStream - Operation.DROP_DATABASE, currentTimestamp [{}], data [{}], collection [{}]", currentTimestamp, data, collection); if (definition.isImportAllCollections()) { for (String name : slurpedDb.getCollectionNames()) { logger.info("addToStream - isImportAllCollections - Operation.DROP_DATABASE, currentTimestamp [{}], data [{}], collection [{}]", currentTimestamp, data, name); context.getStream().put(new MongoDBRiver.QueueEntry(currentTimestamp, Operation.DROP_COLLECTION, data, name)); } } else { context.getStream().put(new MongoDBRiver.QueueEntry(currentTimestamp, Operation.DROP_COLLECTION, data, collection)); } } else { context.getStream().put(new MongoDBRiver.QueueEntry(currentTimestamp, operation, data, collection)); } }
private void addToStream(final Operation operation, final Timestamp<?> currentTimestamp, final DBObject data, final String collection) throws InterruptedException { if (logger.isTraceEnabled()) { String dataString = data.toString(); if (dataString.length() > 400) { logger.trace("addToStream - operation [{}], currentTimestamp [{}], data (_id:[{}], serialized length:{}), collection [{}]", operation, currentTimestamp, data.get("_id"), dataString.length(), collection); } else { logger.trace("addToStream - operation [{}], currentTimestamp [{}], data [{}], collection [{}]", operation, currentTimestamp, dataString, collection); } } if (operation == Operation.DROP_DATABASE) { logger.info("addToStream - Operation.DROP_DATABASE, currentTimestamp [{}], data [{}], collection [{}]", currentTimestamp, data, collection); if (definition.isImportAllCollections()) { for (String name : slurpedDb.getCollectionNames()) { logger.info("addToStream - isImportAllCollections - Operation.DROP_DATABASE, currentTimestamp [{}], data [{}], collection [{}]", currentTimestamp, data, name); context.getStream().put(new MongoDBRiver.QueueEntry(currentTimestamp, Operation.DROP_COLLECTION, data, name)); } } else { context.getStream().put(new MongoDBRiver.QueueEntry(currentTimestamp, Operation.DROP_COLLECTION, data, collection)); } } else { context.getStream().put(new MongoDBRiver.QueueEntry(currentTimestamp, operation, data, collection)); } }
break; } catch (MongoInterruptedException | InterruptedException e) { logger.info("river-mongodb slurper interrupted"); Thread.currentThread().interrupt(); break; } catch (MongoSocketException | MongoTimeoutException | MongoCursorNotFoundException e) { logger.info("Oplog tailing - {} - {}. Will retry.", e.getClass().getSimpleName(), e.getMessage()); logger.debug("Total documents inserted so far by river {}: {}", definition.getRiverName(), totalDocuments.get()); try { Thread.sleep(MongoDBRiver.MONGODB_RETRY_ERROR_DELAY_MS); } catch (InterruptedException iEx) { logger.info("river-mongodb slurper interrupted"); Thread.currentThread().interrupt(); break; logger.info("Slurper is stopping. River has status {}", context.getStatus());
@Override public void start() { // http://stackoverflow.com/questions/5270611/read-maven-properties-file-inside-jar-war-file logger.info("{} - {}", DESCRIPTION, MongoDBHelper.getRiverVersion()); Status status = MongoDBRiverHelper.getRiverStatus(esClient, riverName.getName()); if (status == Status.IMPORT_FAILED || status == Status.INITIAL_IMPORT_FAILED || status == Status.SCRIPT_IMPORT_FAILED || status == Status.START_FAILED) { logger.error("Cannot start. Current status is {}", status); return; } if (status == Status.STOPPED) { // Leave the current status of the river alone, but set the context status to 'stopped'. // Enabling the river via REST will trigger the actual start. context.setStatus(Status.STOPPED); logger.info("River is currently disabled and will not be started"); } else { // Mark the current status as "waiting for full start" context.setStatus(Status.START_PENDING); // Request start of the river in the next iteration of the status thread MongoDBRiverHelper.setRiverStatus(esClient, riverName.getName(), Status.RUNNING); logger.info("Startup pending"); } statusThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_status:" + definition.getIndexName()).newThread( new StatusChecker(this, definition, context)); statusThread.start(); }
logger.info("Creating MongoClient for [{}]", servers); mongoClient = new MongoClient(servers, mongoCredentials, mongoClientOptions); mongoClients.put(cacheKey, mongoClient);
private XContentBuilder build(final DBObject data, final String objectId) throws IOException { if (data instanceof GridFSDBFile) { logger.info("Add Attachment: {} to index {} / type {}", objectId, definition.getIndexName(), definition.getTypeName()); return MongoDBHelper.serialize((GridFSDBFile) data); } else { Map<String, Object> mapData = this.createObjectMap(data); return XContentFactory.jsonBuilder().map(mapData); } }
@Override public void run() { while (true) { try { Status status = MongoDBRiverHelper.getRiverStatus(this.mongoDBRiver.esClient, this.definition.getRiverName()); if (status != this.context.getStatus()) { if (status == Status.RUNNING && this.context.getStatus() != Status.STARTING) { logger.trace("About to start river: {}", this.definition.getRiverName()); mongoDBRiver.internalStartRiver(); } else if (status == Status.STOPPED) { logger.info("About to stop river: {}", this.definition.getRiverName()); mongoDBRiver.internalStopRiver(); } } Thread.sleep(1000L); } catch (InterruptedException e) { logger.debug("Status thread interrupted", e, (Object) null); Thread.currentThread().interrupt(); break; } } } }
@Override public void run() { while (context.getStatus() == Status.RUNNING) { try { Timestamp<?> lastTimestamp = null; // 1. Attempt to fill as much of the bulk request as possible QueueEntry entry = context.getStream().take(); lastTimestamp = processBlockingQueue(entry); while ((entry = context.getStream().poll(definition.getBulk().getFlushInterval().millis(), MILLISECONDS)) != null) { lastTimestamp = processBlockingQueue(entry); } // 2. Update the timestamp if (lastTimestamp != null) { river.setLastTimestamp(lastTimestamp, getBulkProcessor(definition.getIndexName(), definition.getTypeName()).getBulkProcessor()); } } catch (InterruptedException e) { logger.info("river-mongodb indexer interrupted"); releaseProcessors(); Thread.currentThread().interrupt(); break; } } }
logger.info("MongoDBRiver is beginning initial import of " + collection.getFullName()); boolean inProgress = true; String lastId = null; logger.info("Number of documents indexed in initial import of {}: {}", collection.getFullName(), count); } else { logger.info("Initial import - {} - {}. Will retry.", e.getClass().getSimpleName(), e.getMessage()); logger.debug("Total documents inserted so far: {}", totalDocuments.get()); Thread.sleep(MongoDBRiver.MONGODB_RETRY_ERROR_DELAY_MS);
@Override public void run() { logger.info("Starting"); logger.info( "MongoDB options: secondaryreadpreference [{}], drop_collection [{}], include_collection [{}], throttlesize [{}], gridfs [{}], filter [{}], db [{}], collection [{}], script [{}], indexing to [{}]/[{}]", definition.isMongoSecondaryReadPreference(), definition.isDropCollection(), definition.getIncludeCollection(), logger.info("Skip initial import from collection {}", definition.getMongoCollection()); thread.start(); logger.info("Started"); } catch (Throwable t) { logger.warn("Failed to start", t);
logger.trace("Command executed return : {}", cr); logger.info("MongoDB version - {}", cr.get("version")); if (logger.isTraceEnabled()) { logger.trace("serverStatus: {}", cr);
processor.dropIndex(); } else { logger.info("Ignore drop collection request [{}], [{}]. The option has been disabled.", index, type);
public JiebaAnalyzer(String segMode, Path dataPath, boolean isStop) { super(); this.type = segMode; WordDictionary.getInstance().init(dataPath); this.stopWords = isStop ? this.loadStopWords(dataPath) : CharArraySet.EMPTY_SET; this.log.info("Jieba segMode = {}", type); this.log.info("JiebaAnalyzer isStop = {}", isStop); this.log.info("JiebaAnalyzer stopWords = {}", this.stopWords.toString()); }
@Override public void onResponse(final BulkResponse response) { if (response.hasFailures()) { logger.error("Failed to write a result on {}/{}: {}", index, type, response.buildFailureMessage()); } else { logger.info("Wrote {} results in {}/{}.", currentQueue.size(), index, type); } }