@Override public GridFSFindIterable sort(@Nullable final Bson sort) { underlying.sort(sort); return this; }
private MongoCursor<Document> getCursor(final int startChunkIndex) { FindIterable<Document> findIterable; Document filter = new Document("files_id", fileId).append("n", new Document("$gte", startChunkIndex)); if (clientSession != null) { findIterable = chunksCollection.find(clientSession, filter); } else { findIterable = chunksCollection.find(filter); } return findIterable.batchSize(batchSize).sort(new Document("n", 1)).iterator(); }
/** * Obtain the current position of the oplog, and record it in the source. */ protected void recordCurrentOplogPosition() { primaryClient.execute("get oplog position", primary -> { MongoCollection<Document> oplog = primary.getDatabase("local").getCollection("oplog.rs"); Document last = oplog.find().sort(new Document("$natural", -1)).limit(1).first(); // may be null source.offsetStructForEvent(replicaSet.replicaSetName(), last); }); }
@Signature public WrapMongoIterable sort(BasicDBObject sort) { if (getWrappedObject() instanceof FindIterable) { return new WrapMongoIterable(this.__env__, ((FindIterable) getWrappedObject()).sort(sort)); } else { return this; } }
collection.find(query).sort(sort).limit(recordcount);
final ImmutableSet.Builder<String> modifiedAlertConditions = ImmutableSet.builder(); for (Document document : collection.find().sort(ascending(FIELD_CREATED_AT))) {
Document firstEvent = oplog.find().sort(new Document("$natural", 1)).limit(1).first(); // may be null return SourceInfo.extractEventTimestamp(firstEvent); });
@Override public void upgrade() { if (clusterConfigService.get(MigrationCompleted.class) != null) { LOG.debug("Migration already done."); return; } // Do not overwrite an existing default index config boolean defaultDone = clusterConfigService.get(DefaultIndexSetConfig.class) != null; final ImmutableSet.Builder<String> builder = ImmutableSet.builder(); final FindIterable<Document> documents = collection.find(exists(FIELD_DEFAULT)).sort(ascending(FIELD_CREATION_DATE)); for (final Document document : documents) { final ObjectId id = document.getObjectId(FIELD_ID); final String idString = id.toHexString(); final boolean isDefault = firstNonNull(document.getBoolean(FIELD_DEFAULT), false); if (!defaultDone && isDefault) { defaultDone = true; clusterConfigService.write(DefaultIndexSetConfig.create(idString)); } final long modifiedCount = collection.updateOne(eq(FIELD_ID, id), unset(FIELD_DEFAULT)).getMatchedCount(); if (modifiedCount > 0) { LOG.info("Removed <default> field from index set <{}> ({})", document.getString(FIELD_TITLE), idString); builder.add(idString); } else { LOG.error("Couldn't remove <default> field from index set <{}> ({})", document.getString(FIELD_TITLE), idString); } } clusterConfigService.write(MigrationCompleted.create(builder.build())); }
/** * Use the given primary to read the oplog. * * @param primary the connection to the replica set's primary node; may not be null */ protected void readOplog(MongoClient primary) { BsonTimestamp oplogStart = source.lastOffsetTimestamp(replicaSet.replicaSetName()); logger.info("Reading oplog for '{}' primary {} starting at {}", replicaSet, primary.getAddress(), oplogStart); // Include none of the cluster-internal operations and only those events since the previous timestamp ... MongoCollection<Document> oplog = primary.getDatabase("local").getCollection("oplog.rs"); Bson filter = Filters.and(Filters.gt("ts", oplogStart), // start just after our last position Filters.exists("fromMigrate", false)); // skip internal movements across shards FindIterable<Document> results = oplog.find(filter) .sort(new Document("$natural", 1)) // force forwards collection scan .oplogReplay(true) // tells Mongo to not rely on indexes .cursorType(CursorType.TailableAwait); // tail and await new data // Read as much of the oplog as we can ... ServerAddress primaryAddress = primary.getAddress(); try (MongoCursor<Document> cursor = results.iterator()) { while (running.get() && cursor.hasNext()) { if (!handleOplogEvent(primaryAddress, cursor.next())) { // Something happened, and we're supposed to stop reading return; } } } }
@SuppressWarnings("resource") @Override public List<T> call() throws Exception { @Nullable Bson query = criteria != null ? convertToBson(criteria) : null; FindIterable<T> cursor = collection().find(query); if (!exclusion.isNil()) { cursor.projection(convertToBson(exclusion)); } if (!ordering.isNil()) { cursor.sort(convertToBson(ordering)); } cursor.skip(skip); if (limit != 0) { cursor.limit(limit); if (limit <= LARGE_BATCH_SIZE) { // if limit specified and is smaller than reasonable large batch size // then we force batch size to be the same as limit, // but negative, this force cursor to close right after result is sent cursor.batchSize(-limit); } } // close properly try (MongoCursor<T> iterator = cursor.iterator()) { return ImmutableList.copyOf(iterator); } } });
cursorToUse = cursorToUse.sort(sort);
it.sort(sort);
find = find.sort(Document.parse(context.getProperty(SORT).evaluateAttributeExpressions(input).getValue()));
.getCollection("oplog.rs") .find(filter) .sort(new Document("$natural", 1)) .oplogReplay(true) // tells Mongo to not rely on indexes .noCursorTimeout(true) // don't timeout waiting for events
@Override public Stream<Map<String, Object>> find(Map<String, Object> query, Map<String, Object> project, Map<String, Object> sort, Long skip, Long limit) { FindIterable<Document> documents = query == null ? collection.find() : collection.find(new Document(query)); if (project != null) documents = documents.projection(new Document(project)); if (sort != null) documents = documents.sort(new Document(sort)); if (skip != 0) documents = documents.skip(skip.intValue()); if (limit != 0) documents = documents.limit(limit.intValue()); return asStream(documents); }
private MongoCollectionResult find(MongoQueryOptions mongoQueryOptions, final MongoCollectionResult mongoCollectionResult, com.mongodb.client.MongoCollection<Document> collection) { Document filter = mongoQueryOptions.getFilter(); Document projection = mongoQueryOptions.getProjection(); Document sort = mongoQueryOptions.getSort(); FindIterable<Document> cursor = collection.find(filter); if (!MongoQueryOptions.EMPTY_DOCUMENT.equals(projection)) { cursor.projection(projection); } if (!MongoQueryOptions.EMPTY_DOCUMENT.equals(sort)) { cursor.sort(sort); } int resultLimit = mongoQueryOptions.getResultLimit(); if (resultLimit > 0) { cursor.limit(resultLimit); } cursor.forEach((Block<Document>) mongoCollectionResult::add); return mongoCollectionResult; }
cursorToUse = cursorToUse.sort(sort);
private long getNextLongVersion(MongoCollection<Document> collection, String aggregateRootId) { FindIterable<Document> one = collection.find(new Document("rid", aggregateRootId)).sort(new Document("v", -1)).limit(1).projection(new Document("v",1)); final Document first = one.first(); if(first == null) return 0; return first.getLong("v") +1; }
@Override public Stream<? extends DomainEventData<?>> findSnapshots(MongoCollection<Document> snapshotCollection, String aggregateIdentifier) { FindIterable<Document> cursor = snapshotCollection.find(eq(eventConfiguration.aggregateIdentifierProperty(), aggregateIdentifier)) .sort(orderBy(descending(eventConfiguration.sequenceNumberProperty()))); return stream(cursor.spliterator(), false).map(this::extractSnapshot); }
@Override public List<JobInfo> findByType(final String type) { return collection() .find(byType(type)) .maxTime(mongoProperties.getDefaultReadTimeout(), TimeUnit.MILLISECONDS) .sort(orderByStarted(DESCENDING)) .map(this::decode) .into(new ArrayList<>()); }