private boolean existsIdx(final String name) { for (final Document doc : sessions.listIndexes()) { if (doc.getString("name").equals(name)) { return true; } } return false; }
private MongoColumnHandle buildColumnHandle(Document columnMeta) { String name = columnMeta.getString(FIELDS_NAME_KEY); String typeString = columnMeta.getString(FIELDS_TYPE_KEY); boolean hidden = columnMeta.getBoolean(FIELDS_HIDDEN_KEY, false); Type type = typeManager.getType(TypeSignature.parseTypeSignature(typeString)); return new MongoColumnHandle(name, type, hidden); }
private Set<String> getTableMetadataNames(String schemaName) throws TableNotFoundException { MongoDatabase db = client.getDatabase(schemaName); MongoCursor<Document> cursor = db.getCollection(schemaCollection) .find().projection(new Document(TABLE_NAME_KEY, true)).iterator(); HashSet<String> names = new HashSet<>(); while (cursor.hasNext()) { names.add((cursor.next()).getString(TABLE_NAME_KEY)); } return names; }
public static List<MongoIndex> parse(ListIndexesIterable<Document> indexes) { ImmutableList.Builder<MongoIndex> builder = ImmutableList.builder(); for (Document index : indexes) { // TODO: v, ns, sparse fields Document key = (Document) index.get("key"); String name = index.getString("name"); boolean unique = index.getBoolean("unique", false); if (key.containsKey("_fts")) { // Full Text Search continue; } builder.add(new MongoIndex(name, parseKey(key), unique)); } return builder.build(); }
/** * The content type of the file * * @return the content type of the file * @deprecated content type information should be stored the metadata document instead. */ @Deprecated public String getContentType() { if (extraElements != null && extraElements.containsKey("contentType")) { return extraElements.getString("contentType"); } else { throw new MongoGridFSException("No contentType data for this GridFS file"); } }
boolean indexExists = false; for (Document document : collection.listIndexes()) { if (MongoDbGrokPatternService.INDEX_NAME.equals(document.getString("name")) && document.getBoolean("unique")) { indexExists = true; break; for (Document document : collection.find()) { final ObjectId id = document.getObjectId("_id"); final String name = document.getString("name"); final String pattern = document.getString("pattern"); if (grokPatterns.contains(name)) { LOG.info("Marking duplicate Grok pattern <{}> for removal: {}\t{}", id, name, pattern);
for (; cursor.hasNext(); length++) { Document document = cursor.next(); String globalValue = document.getString(CONSTANTS_FD_GLOBAL); String branchValue = document.getString(CONSTANTS_FD_BRANCH); byte[] global = ByteUtils.stringToByteArray(globalValue); byte[] branch = ByteUtils.stringToByteArray(branchValue); TransactionXid branchXid = xidFactory.createBranchXid(globalXid, branch); String resourceId = document.getString("resource_id"); if (StringUtils.isBlank(resourceId)) { continue;
@Override public void upgrade() { if (clusterConfigService.get(MigrationCompleted.class) != null) { LOG.debug("Migration already done."); return; } // Do not overwrite an existing default index config boolean defaultDone = clusterConfigService.get(DefaultIndexSetConfig.class) != null; final ImmutableSet.Builder<String> builder = ImmutableSet.builder(); final FindIterable<Document> documents = collection.find(exists(FIELD_DEFAULT)).sort(ascending(FIELD_CREATION_DATE)); for (final Document document : documents) { final ObjectId id = document.getObjectId(FIELD_ID); final String idString = id.toHexString(); final boolean isDefault = firstNonNull(document.getBoolean(FIELD_DEFAULT), false); if (!defaultDone && isDefault) { defaultDone = true; clusterConfigService.write(DefaultIndexSetConfig.create(idString)); } final long modifiedCount = collection.updateOne(eq(FIELD_ID, id), unset(FIELD_DEFAULT)).getMatchedCount(); if (modifiedCount > 0) { LOG.info("Removed <default> field from index set <{}> ({})", document.getString(FIELD_TITLE), idString); builder.add(idString); } else { LOG.error("Couldn't remove <default> field from index set <{}> ({})", document.getString(FIELD_TITLE), idString); } } clusterConfigService.write(MigrationCompleted.create(builder.build())); }
MongoUtil.onCollectionDocuments(client, CONFIG_DATABASE_NAME, shardsCollection, doc -> { logger.info("Checking shard details from configuration replica set {}", seedAddresses); String shardName = doc.getString("_id"); String hostStr = doc.getString("host"); String replicaSetName = MongoUtil.replicaSetUsedIn(hostStr); replicaSetSpecs.add(new ReplicaSet(hostStr, replicaSetName, shardName));
/** * Get a {@link Struct} representation of the source {@link #partition(String) partition} and {@link #lastOffset(String) * offset} information. The Struct complies with the {@link #schema} for the MongoDB connector. * * @param replicaSetName the name of the replica set name for which the new offset is to be obtained; may not be null * @param oplogEvent the replica set oplog event that was last read; may be null if the position is the start of * the oplog * @return the source partition and offset {@link Struct}; never null * @see #schema() */ public Struct offsetStructForEvent(String replicaSetName, Document oplogEvent) { Position position = INITIAL_POSITION; String namespace = ""; if (oplogEvent != null) { BsonTimestamp ts = extractEventTimestamp(oplogEvent); Long opId = oplogEvent.getLong("h"); position = new Position(ts, opId); namespace = oplogEvent.getString("ns"); } positionsByReplicaSetName.put(replicaSetName, position); return offsetStructFor(replicaSetName, namespace, position, isInitialSyncOngoing(replicaSetName)); }
private String getTransactionOwnerInMongoDB(TransactionXid transactionXid) { byte[] global = transactionXid.getGlobalTransactionId(); String instanceId = ByteUtils.byteArrayToString(global); try { String application = CommonUtils.getApplication(this.endpoint); String databaseName = application.replaceAll("\\W", "_"); MongoDatabase mdb = this.mongoClient.getDatabase(databaseName); MongoCollection<Document> collection = mdb.getCollection(CONSTANTS_TB_LOCKS); FindIterable<Document> findIterable = collection.find(Filters.eq(CONSTANTS_FD_GLOBAL, instanceId)); MongoCursor<Document> cursor = findIterable.iterator(); if (cursor.hasNext()) { Document document = cursor.next(); return document.getString("identifier"); } else { return null; } } catch (RuntimeException rex) { logger.error("Error occurred while querying the lock-owner of transaction(gxid= {}).", instanceId, rex); return null; } }
public void recover(TransactionRecoveryCallback callback) { MongoCursor<Document> transactionCursor = null; try { String application = CommonUtils.getApplication(this.endpoint); String databaseName = application.replaceAll("\\W", "_"); MongoDatabase mdb = this.mongoClient.getDatabase(databaseName); MongoCollection<Document> transactions = mdb.getCollection(CONSTANTS_TB_TRANSACTIONS); FindIterable<Document> transactionItr = transactions.find(Filters.eq("coordinator", true)); for (transactionCursor = transactionItr.iterator(); transactionCursor.hasNext();) { Document document = transactionCursor.next(); boolean error = document.getBoolean("error"); String targetApplication = document.getString("system"); long expectVersion = document.getLong("version"); long actualVersion = this.versionManager.getInstanceVersion(targetApplication); if (error == false && actualVersion > 0 && actualVersion <= expectVersion) { continue; // ignore } callback.recover(this.reconstructTransactionArchive(document)); } } catch (RuntimeException error) { logger.error("Error occurred while recovering transaction.", error); } catch (Exception error) { logger.error("Error occurred while recovering transaction.", error); } finally { IOUtils.closeQuietly(transactionCursor); } }
/** * Generate and record one or more source records to describe the given event. * * @param oplogEvent the event; may not be null * @param timestamp the timestamp at which this operation is occurring * @return the number of source records that were generated; will be 0 or more * @throws InterruptedException if the calling thread was interrupted while waiting to submit a record to * the blocking consumer */ public int recordEvent(Document oplogEvent, long timestamp) throws InterruptedException { final Struct sourceValue = source.offsetStructForEvent(replicaSetName, oplogEvent); final Map<String, ?> offset = source.lastOffset(replicaSetName); Document patchObj = oplogEvent.get("o", Document.class); // Updates have an 'o2' field, since the updated object in 'o' might not have the ObjectID ... Object o2 = oplogEvent.get("o2"); String objId = o2 != null ? idObjToJson(o2) : idObjToJson(patchObj); assert objId != null; Operation operation = operationLiterals.get(oplogEvent.getString("op")); return createRecords(sourceValue, offset, operation, objId, patchObj, timestamp); }
boolean error = document.getBoolean("error"); String targetApplication = document.getString("created"); long expectVersion = document.getLong("version"); long actualVersion = this.versionManager.getInstanceVersion(targetApplication);
String propagatedBy = document.getString("propagated_by"); boolean compensable = document.getBoolean("compensable"); boolean coordinator = document.getBoolean("coordinator"); String global = document.getString(CONSTANTS_FD_GLOBAL); byte[] globalByteArray = ByteUtils.stringToByteArray(global); TransactionXid globalXid = compensableXidFactory.createGlobalXid(globalByteArray); archive.setXid(globalXid); String textVariables = document.getString("variables"); byte[] variablesByteArray = null; if (StringUtils.isNotBlank(textVariables) && StringUtils.equals(textVariables, "null") == false) {
Collation collation = Collation.of(source.getString("locale")); if (source.containsKey("strength")) { collation = collation.strength(source.getInteger("strength")); collation = collation.caseFirst(source.getString("caseFirst")); collation = collation.alternate(source.getString("alternate")); collation = collation.maxVariable(source.getString("maxVariable"));