public ObjectId store(InputStream content, @Nullable String filename, @Nullable String contentType, @Nullable Document metadata) { Assert.notNull(content, "InputStream must not be null!"); return getGridFs().uploadFromStream(filename, content, computeUploadOptionsFor(contentType, metadata)); }
public GridFSFindIterable find(Query query) { Assert.notNull(query, "Query must not be null!"); Document queryObject = getMappedQuery(query.getQueryObject()); Document sortObject = getMappedQuery(query.getSortObject()); return getGridFs().find(queryObject).sort(sortObject); }
public void delete(Query query) { for (GridFSFile gridFSFile : find(query)) { getGridFs().delete(((BsonObjectId) gridFSFile.getId()).getValue()); } }
private void deleteAllExcept(GridFSBucket bucket, String filename, ObjectId objectID) { List<ObjectId> idsToDelete = new ArrayList<>(); bucket.find(Filters.eq(FILENAME_TAG, filename)).forEach((Consumer<GridFSFile>) file -> idsToDelete.add(file.getObjectId())); LOGGER.debug("**** number of files to delete:" + idsToDelete.size()); idsToDelete.forEach(id -> { if (!id.equals(objectID)) { LOGGER.debug("**** delete:" + id); bucket.delete(id); } }); }
@Override protected Binary getBinary(InputStream in) throws IOException { try { // save the file to GridFS String inputName = "tmp-" + System.nanoTime(); ObjectId id = gridFSBucket.uploadFromStream(inputName, in); // now we know length and digest GridFSFile inputFile = gridFSBucket.find(Filters.eq(METADATA_PROPERTY_FILENAME, inputName)).first(); String digest = inputFile.getMD5(); // if the digest is already known then reuse it instead GridFSFile dbFile = gridFSBucket.find(Filters.eq(METADATA_PROPERTY_FILENAME, digest)).first(); if (dbFile == null) { // no existing file, set its filename as the digest gridFSBucket.rename(id, digest); } else { // file already existed, no need for the temporary one gridFSBucket.delete(id); } return new GridFSBinary(digest, blobProviderId); } finally { in.close(); } }
protected void saveDataToGridFS(byte[] data, String fileid) { Bson fileQuery = Filters.regex("filename", getId() + ".*"); try { final ArrayList<GridFSFile> es = new ArrayList<>(); gridFS.find(fileQuery).into(es); for (GridFSFile e : es) { gridFS.delete(e.getObjectId()); } } catch (Exception e) { log.error("failed to delete old gridfsfile", e); } gridFS.uploadFromStream(getId() + fileid, new ByteArrayInputStream(data)); }
@Override public Binary getBinary(Blob blob) throws IOException { if (!(blob instanceof FileBlob)) { return super.getBinary(blob); // just open the stream and call getBinary(InputStream) } // we already have a file so can compute the length and digest efficiently File file = blob.getFile(); String digest; try (InputStream in = new FileInputStream(file)) { digest = DigestUtils.md5Hex(in); } // if the digest is not already known then save to GridFS GridFSFile dbFile = gridFSBucket.find(Filters.eq(METADATA_PROPERTY_FILENAME, digest)).first(); if (dbFile == null) { try (InputStream in = new FileInputStream(file)) { gridFSBucket.uploadFromStream(digest, in); } } return new GridFSBinary(digest, blobProviderId); }
@Override public InputStream getAssociatedDocumentStream(String uniqueId, String fileName) { GridFSBucket gridFS = createGridFSConnection(); GridFSFile file = gridFS.find(new Document(ASSOCIATED_METADATA + "." + FILE_UNIQUE_ID_KEY, getGridFsId(uniqueId, fileName))).first(); if (file == null) { return null; } InputStream is = gridFS.openDownloadStream(file.getObjectId()); ; Document metadata = file.getMetadata(); if (metadata.containsKey(COMPRESSED_FLAG)) { boolean compressed = (boolean) metadata.remove(COMPRESSED_FLAG); if (compressed) { is = new InflaterInputStream(is); } } return is; }
GridFSFile file = gridFS.find(fileQuery).limit(1).first(); if (file != null) { final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); gridFS.downloadToStream(file.getObjectId(), outputStream); deSerializeData(outputStream.toByteArray());
public GridFsResource getResource(GridFSFile file) { Assert.notNull(file, "GridFSFile must not be null!"); return new GridFsResource(file, getGridFs().openDownloadStream(file.getObjectId())); }
List<StorageMetadata> list = new ArrayList<>(); if (!containerExists(bucket)) { LOGGER.debug("container " + bucket.getBucketName() + " existiert nicht, daher leere Liste"); return list; if (bucket.find(Filters.eq(FILENAME_TAG, bucketDirectory.getObjectHandle().getName())).iterator().hasNext()) { if (listRecursiveFlag.equals(ListRecursiveFlag.TRUE)) { String pattern = "^" + directoryname + ".*"; GridFSFindIterable gridFSFiles = bucket.find(regex(FILENAME_TAG, pattern, "i")); gridFSFiles.forEach((Consumer<GridFSFile>) file -> bucketPaths.add( new BucketPath(bucketDirectory.getObjectHandle().getContainer(), file.getFilename()))); GridFSFindIterable gridFSFiles = bucket.find(regex(FILENAME_TAG, pattern, "i")); gridFSFiles.forEach((Consumer<GridFSFile>) file -> bucketPaths.add( new BucketPath(bucketDirectory.getObjectHandle().getContainer(), file.getFilename())));
gridFSBucket.downloadToStream( file.getId(), exchange.getOutputStream());
@Override public void deleteAllDocuments() { GridFSBucket gridFS = createGridFSConnection(); gridFS.drop(); MongoDatabase db = mongoClient.getDatabase(database); MongoCollection<Document> coll = db.getCollection(rawCollectionName); coll.deleteMany(new Document()); }
@Override public void deleteAssociatedDocuments(String uniqueId) { GridFSBucket gridFS = createGridFSConnection(); gridFS.find(new Document(ASSOCIATED_METADATA + "." + DOCUMENT_UNIQUE_ID_KEY, uniqueId)) .forEach((Block<com.mongodb.client.gridfs.model.GridFSFile>) gridFSFile -> gridFS.delete(gridFSFile.getObjectId())); }
protected void saveDataToGridFS(byte[] data, String fileid) { Bson fileQuery = Filters.regex("filename", getId() + ".*"); try { final ArrayList<GridFSFile> es = new ArrayList<>(); gridFS.find(fileQuery).into(es); for (GridFSFile e : es) { gridFS.delete(e.getObjectId()); } } catch (Exception e) { log.error("failed to delete old gridfsfile", e); } gridFS.uploadFromStream(getId() + fileid, new ByteArrayInputStream(data)); }
private Optional<Media> finder(Bson query) { Bson filter = null; GridFSFile file = getGridFs().find(query).first(); InputStream inputStream = getGridFs().openDownloadStream(file.getId()); Document metadata = file.getMetadata();
GridFSFile file = gridFS.find(fileQuery).limit(1).first(); if (file != null) { final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); gridFS.downloadToStream(file.getObjectId(), outputStream); deSerializeData(outputStream.toByteArray());
public GridFsResource getResource(GridFSFile file) { Assert.notNull(file, "GridFSFile must not be null!"); return new GridFsResource(file, getGridFs().openDownloadStream(file.getObjectId())); }
private AssociatedDocument loadGridFSToAssociatedDocument(GridFSBucket gridFS, GridFSFile file, FetchType fetchType) throws IOException { AssociatedDocument.Builder aBuilder = AssociatedDocument.newBuilder(); aBuilder.setFilename(file.getFilename()); Document metadata = file.getMetadata(); boolean compressed = false; if (metadata.containsKey(COMPRESSED_FLAG)) { compressed = (boolean) metadata.remove(COMPRESSED_FLAG); } long timestamp = (long) metadata.remove(TIMESTAMP); aBuilder.setCompressed(compressed); aBuilder.setTimestamp(timestamp); aBuilder.setDocumentUniqueId((String) metadata.remove(DOCUMENT_UNIQUE_ID_KEY)); for (String field : metadata.keySet()) { aBuilder.addMetadata(Metadata.newBuilder().setKey(field).setValue((String) metadata.get(field))); } if (FetchType.FULL.equals(fetchType)) { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); gridFS.downloadToStream(file.getObjectId(), byteArrayOutputStream); byte[] bytes = byteArrayOutputStream.toByteArray(); if (null != bytes) { if (compressed) { bytes = CommonCompression.uncompressZlib(bytes); } aBuilder.setDocument(ByteString.copyFrom(bytes)); } } aBuilder.setIndexName(indexName); return aBuilder.build(); }
@Override public void deleteContainer(BucketDirectory bucketDirectory) { BucketPathUtil.checkContainerName(bucketDirectory.getObjectHandle().getContainer()); GridFSBuckets.create(database, bucketDirectory.getObjectHandle().getContainer()).drop(); }