@Override public MongoCursor<GridFSFile> iterator() { return underlying.iterator(); }
private Set<String> getTableMetadataNames(String schemaName) throws TableNotFoundException { MongoDatabase db = client.getDatabase(schemaName); MongoCursor<Document> cursor = db.getCollection(schemaCollection) .find().projection(new Document(TABLE_NAME_KEY, true)).iterator(); HashSet<String> names = new HashSet<>(); while (cursor.hasNext()) { names.add((cursor.next()).getString(TABLE_NAME_KEY)); } return names; }
private MongoCursor<Document> getCursor(final int startChunkIndex) { FindIterable<Document> findIterable; Document filter = new Document("files_id", fileId).append("n", new Document("$gte", startChunkIndex)); if (clientSession != null) { findIterable = chunksCollection.find(clientSession, filter); } else { findIterable = chunksCollection.find(filter); } return findIterable.batchSize(batchSize).sort(new Document("n", 1)).iterator(); }
/** * Copy the collection, sending to the recorder a record for each document. * * @param primary the connection to the replica set's primary node; may not be null * @param collectionId the identifier of the collection to be copied; may not be null * @param timestamp the timestamp in milliseconds at which the copy operation was started * @return number of documents that were copied * @throws InterruptedException if the thread was interrupted while the copy operation was running */ protected long copyCollection(MongoClient primary, CollectionId collectionId, long timestamp) throws InterruptedException { RecordsForCollection factory = recordMakers.forCollection(collectionId); MongoDatabase db = primary.getDatabase(collectionId.dbName()); MongoCollection<Document> docCollection = db.getCollection(collectionId.name()); long counter = 0; try (MongoCursor<Document> cursor = docCollection.find().iterator()) { while (running.get() && cursor.hasNext()) { Document doc = cursor.next(); logger.trace("Found existing doc in {}: {}", collectionId, doc); counter += factory.recordObject(collectionId, doc, timestamp); } } return counter; }
public MongoCursor<Document> execute(MongoSplit split, List<MongoColumnHandle> columns) { Document output = new Document(); for (MongoColumnHandle column : columns) { output.append(column.getName(), 1); } MongoCollection<Document> collection = getCollection(split.getSchemaTableName()); FindIterable<Document> iterable = collection.find(buildQuery(split.getTupleDomain())).projection(output); if (cursorBatchSize != 0) { iterable.batchSize(cursorBatchSize); } return iterable.iterator(); }
private Document findOne(Document query, Document projection) { MongoCollection col = controllerService.getDatabase(databaseName).getCollection(collection); MongoCursor<Document> it = (projection != null ? col.find(query).projection(projection) : col.find(query)).iterator(); Document retVal = it.hasNext() ? it.next() : null; it.close(); return retVal; } }
cursor = findIterable.iterator();
/** * Perform the given operation on all of the documents inside the named collection in the named database, if the database and * collection both exist. The operation is called once for each document, so if the collection exists but is empty then the * function will not be called. * * @param client the MongoDB client; may not be null * @param dbName the name of the database; may not be null * @param collectionName the name of the collection; may not be null * @param documentOperation the operation to perform; may not be null */ public static void onCollectionDocuments(MongoClient client, String dbName, String collectionName, BlockingConsumer<Document> documentOperation) { onCollection(client, dbName, collectionName, collection -> { try (MongoCursor<Document> cursor = collection.find().iterator()) { while (cursor.hasNext()) { try { documentOperation.accept(cursor.next()); } catch (InterruptedException e) { Thread.interrupted(); break; } } } }); }
MongoCursor<Document> cursor = null; try { cursor = collection.find().limit(batchSize).iterator(); for (; cursor.hasNext(); length++) { Document document = cursor.next();
private String getTransactionOwnerInMongoDB(TransactionXid transactionXid) { byte[] global = transactionXid.getGlobalTransactionId(); String instanceId = ByteUtils.byteArrayToString(global); try { String application = CommonUtils.getApplication(this.endpoint); String databaseName = application.replaceAll("\\W", "_"); MongoDatabase mdb = this.mongoClient.getDatabase(databaseName); MongoCollection<Document> collection = mdb.getCollection(CONSTANTS_TB_LOCKS); FindIterable<Document> findIterable = collection.find(Filters.eq(CONSTANTS_FD_GLOBAL, instanceId)); MongoCursor<Document> cursor = findIterable.iterator(); if (cursor.hasNext()) { Document document = cursor.next(); return document.getString("identifier"); } else { return null; } } catch (RuntimeException rex) { logger.error("Error occurred while querying the lock-owner of transaction(gxid= {}).", instanceId, rex); return null; } }
public void recover(TransactionRecoveryCallback callback) { MongoCursor<Document> transactionCursor = null; try { String application = CommonUtils.getApplication(this.endpoint); String databaseName = application.replaceAll("\\W", "_"); MongoDatabase mdb = this.mongoClient.getDatabase(databaseName); MongoCollection<Document> transactions = mdb.getCollection(CONSTANTS_TB_TRANSACTIONS); FindIterable<Document> transactionItr = transactions.find(Filters.eq("coordinator", true)); for (transactionCursor = transactionItr.iterator(); transactionCursor.hasNext();) { Document document = transactionCursor.next(); boolean error = document.getBoolean("error"); String targetApplication = document.getString("system"); long expectVersion = document.getLong("version"); long actualVersion = this.versionManager.getInstanceVersion(targetApplication); if (error == false && actualVersion > 0 && actualVersion <= expectVersion) { continue; // ignore } callback.recover(this.reconstructTransactionArchive(document)); } } catch (RuntimeException error) { logger.error("Error occurred while recovering transaction.", error); } catch (Exception error) { logger.error("Error occurred while recovering transaction.", error); } finally { IOUtils.closeQuietly(transactionCursor); } }
@Before @SuppressWarnings("unchecked") public void setUp() throws Exception { MongoDatabase db = mock(MongoDatabase.class); MongoCollection<Entity> collection = mock(MongoCollection.class); when(db.getCollection(anyString(), any(Class.class))).thenReturn(collection); when(collection.withCodecRegistry(any(CodecRegistry.class))).thenReturn(collection); RepositorySetup setup = RepositorySetup.builder().database(db) .executor(MoreExecutors.newDirectExecutorService()) .gson(new GsonBuilder().registerTypeAdapterFactory(new GsonAdaptersEntity()).create()) .build(); this.repository = new EntityRepository(setup); FindIterable<Entity> iterable = mock(FindIterable.class); when(collection.find(any(Bson.class))).thenReturn(iterable); MongoCursor<Entity> cursor = mock(MongoCursor.class); when(iterable.iterator()).thenReturn(cursor); this.cursor = cursor; }
return iterable.iterator();
for (transactionCursor = transactionItr.iterator(); transactionCursor.hasNext();) { Document document = transactionCursor.next(); boolean error = document.getBoolean("error");
/** * Use the given primary to read the oplog. * * @param primary the connection to the replica set's primary node; may not be null */ protected void readOplog(MongoClient primary) { BsonTimestamp oplogStart = source.lastOffsetTimestamp(replicaSet.replicaSetName()); logger.info("Reading oplog for '{}' primary {} starting at {}", replicaSet, primary.getAddress(), oplogStart); // Include none of the cluster-internal operations and only those events since the previous timestamp ... MongoCollection<Document> oplog = primary.getDatabase("local").getCollection("oplog.rs"); Bson filter = Filters.and(Filters.gt("ts", oplogStart), // start just after our last position Filters.exists("fromMigrate", false)); // skip internal movements across shards FindIterable<Document> results = oplog.find(filter) .sort(new Document("$natural", 1)) // force forwards collection scan .oplogReplay(true) // tells Mongo to not rely on indexes .cursorType(CursorType.TailableAwait); // tail and await new data // Read as much of the oplog as we can ... ServerAddress primaryAddress = primary.getAddress(); try (MongoCursor<Document> cursor = results.iterator()) { while (running.get() && cursor.hasNext()) { if (!handleOplogEvent(primaryAddress, cursor.next())) { // Something happened, and we're supposed to stop reading return; } } } }
private Transaction getTransactionFromMongoDB(TransactionXid xid) throws TransactionException { TransactionRecovery compensableRecovery = this.beanFactory.getCompensableRecovery(); CompensableLogger compensableLogger = this.beanFactory.getCompensableLogger(); MongoCursor<Document> transactionCursor = null; try { String application = CommonUtils.getApplication(this.endpoint); String databaseName = application.replaceAll("\\W", "_"); MongoDatabase mdb = this.mongoClient.getDatabase(databaseName); MongoCollection<Document> transactions = mdb.getCollection(CONSTANTS_TB_TRANSACTIONS); byte[] global = xid.getGlobalTransactionId(); String globalKey = ByteUtils.byteArrayToString(global); FindIterable<Document> transactionItr = transactions.find(Filters.eq(CONSTANTS_FD_GLOBAL, globalKey)); transactionCursor = transactionItr.iterator(); if (transactionCursor.hasNext() == false) { return null; } Document document = transactionCursor.next(); MongoCompensableLogger mongoCompensableLogger = (MongoCompensableLogger) compensableLogger; TransactionArchive archive = mongoCompensableLogger.reconstructTransactionArchive(document); return compensableRecovery.reconstruct(archive); } catch (RuntimeException error) { logger.error("Error occurred while getting transaction.", error); throw new TransactionException(XAException.XAER_RMERR); } catch (Exception error) { logger.error("Error occurred while getting transaction.", error); throw new TransactionException(XAException.XAER_RMERR); } finally { IOUtils.closeQuietly(transactionCursor); } }
transactionCursor = transactionItr.iterator(); if (transactionCursor.hasNext() == false) { return null;
@SuppressWarnings("resource") @Override public List<T> call() throws Exception { @Nullable Bson query = criteria != null ? convertToBson(criteria) : null; FindIterable<T> cursor = collection().find(query); if (!exclusion.isNil()) { cursor.projection(convertToBson(exclusion)); } if (!ordering.isNil()) { cursor.sort(convertToBson(ordering)); } cursor.skip(skip); if (limit != 0) { cursor.limit(limit); if (limit <= LARGE_BATCH_SIZE) { // if limit specified and is smaller than reasonable large batch size // then we force batch size to be the same as limit, // but negative, this force cursor to close right after result is sent cursor.batchSize(-limit); } } // close properly try (MongoCursor<T> iterator = cursor.iterator()) { return ImmutableList.copyOf(iterator); } } });
private void executeQueryInternal(CollectionCallback<FindIterable<Document>> collectionCallback, @Nullable CursorPreparer preparer, DocumentCallbackHandler callbackHandler, String collectionName) { try { MongoCursor<Document> cursor = null; try { FindIterable<Document> iterable = collectionCallback .doInCollection(getAndPrepareCollection(doGetDatabase(), collectionName)); if (preparer != null) { iterable = preparer.prepare(iterable); } cursor = iterable.iterator(); while (cursor.hasNext()) { callbackHandler.processDocument(cursor.next()); } } finally { if (cursor != null) { cursor.close(); } } } catch (RuntimeException e) { throw potentiallyConvertRuntimeException(e, exceptionTranslator); } }