public static BlobPath cleanPath() { return new BlobPath(); }
/** * Deletes a blob with giving name, ignoring if the blob does not exist. * * @param blobName * The name of the blob to delete. * @throws IOException if the blob exists but could not be deleted. */ default void deleteBlobIgnoringIfNotExists(String blobName) throws IOException { try { deleteBlob(blobName); } catch (final NoSuchFileException ignored) { // This exception is ignored } }
/** * Reads blob content from the input stream and writes it to the container in a new blob with the given name, * using an atomic write operation if the implementation supports it. When the BlobContainer implementation * does not provide a specific implementation of writeBlobAtomic(String, InputStream, long), then * the {@link #writeBlob(String, InputStream, long, boolean)} method is used. * * This method assumes the container does not already contain a blob of the same blobName. If a blob by the * same name already exists, the operation will fail and an {@link IOException} will be thrown. * * @param blobName * The name of the blob to write the contents of the input stream to. * @param inputStream * The input stream from which to retrieve the bytes to write to the blob. * @param blobSize * The size of the blob to be written, in bytes. It is implementation dependent whether * this value is used in writing the blob to the repository. * @param failIfAlreadyExists * whether to throw a FileAlreadyExistsException if the given blob already exists * @throws FileAlreadyExistsException if failIfAlreadyExists is true and a blob by the same name already exists * @throws IOException if the input stream could not be read, or the target blob could not be written to. */ default void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, boolean failIfAlreadyExists) throws IOException { writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); }
@Override public void verify(String seed, DiscoveryNode localNode) { assertSnapshotOrGenericThread(); if (isReadOnly()) { try { latestIndexBlobId(); } catch (IOException e) { throw new RepositoryVerificationException(metadata.name(), "path " + basePath() + " is not accessible on node " + localNode, e); } } else { BlobContainer testBlobContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed))); if (testBlobContainer.blobExists("master.dat")) { try { BytesArray bytes = new BytesArray(seed); try (InputStream stream = bytes.streamInput()) { testBlobContainer.writeBlob("data-" + localNode.getId() + ".dat", stream, bytes.length(), true); } } catch (IOException exp) { throw new RepositoryVerificationException(metadata.name(), "store location [" + blobStore() + "] is not accessible on the node [" + localNode + "]", exp); } } else { throw new RepositoryVerificationException(metadata.name(), "a file written by master to the store [" + blobStore() + "] cannot be accessed on the node [" + localNode + "]. " + "This might indicate that the store [" + blobStore() + "] is not shared between this node and the master node or " + "that permissions on the store don't allow reading files written by the master node"); } } }
Context(SnapshotId snapshotId, IndexId indexId, ShardId shardId, ShardId snapshotShardId) { this.snapshotId = snapshotId; this.shardId = shardId; blobContainer = blobStore().blobContainer(basePath().add("indices").add(indexId.getId()).add(Integer.toString(snapshotShardId.getId()))); }
@Override public String startVerification() { try { if (isReadOnly()) { // It's readonly - so there is not much we can do here to verify it apart from reading the blob store metadata latestIndexBlobId(); return "read-only"; } else { String seed = UUIDs.randomBase64UUID(); byte[] testBytes = Strings.toUTF8Bytes(seed); BlobContainer testContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed))); String blobName = "master.dat"; BytesArray bytes = new BytesArray(testBytes); try (InputStream stream = bytes.streamInput()) { testContainer.writeBlobAtomic(blobName, stream, bytes.length(), true); } return seed; } } catch (IOException exp) { throw new RepositoryVerificationException(metadata.name(), "path " + basePath() + " is not accessible on master node", exp); } }
/** * Checks obj in the blob container */ public boolean exists(BlobContainer blobContainer, String name) throws IOException { return blobContainer.blobExists(blobName(name)); }
@Override protected InputStream openSlice(long slice) throws IOException { return container.readBlob(info.partName(slice)); } }
/** * Checks if snapshot file already exists in the list of blobs * * @param fileInfo file to check * @param blobs list of blobs * @return true if file exists in the list of blobs */ private boolean snapshotFileExistsInBlobs(BlobStoreIndexShardSnapshot.FileInfo fileInfo, Map<String, BlobMetaData> blobs) { BlobMetaData blobMetaData = blobs.get(fileInfo.name()); if (blobMetaData != null) { return blobMetaData.length() == fileInfo.length(); } else if (blobs.containsKey(fileInfo.partName(0))) { // multi part file sum up the size and check int part = 0; long totalSize = 0; while (true) { blobMetaData = blobs.get(fileInfo.partName(part++)); if (blobMetaData == null) { break; } totalSize += blobMetaData.length(); } return totalSize == fileInfo.length(); } // no file, not exact and not multipart return false; }
/** * maintains single lazy instance of {@link BlobContainer} */ protected BlobContainer blobContainer() { assertSnapshotOrGenericThread(); BlobContainer blobContainer = this.blobContainer.get(); if (blobContainer == null) { synchronized (lock) { blobContainer = this.blobContainer.get(); if (blobContainer == null) { blobContainer = blobStore().blobContainer(basePath()); this.blobContainer.set(blobContainer); } } } return blobContainer; }
@Override protected void doClose() { BlobStore store; // to close blobStore if blobStore initialization is started during close synchronized (lock) { store = blobStore.get(); } if (store != null) { try { store.close(); } catch (Exception t) { logger.warn("cannot close blob store", t); } } }
private void writeAtomic(final String blobName, final BytesReference bytesRef, boolean failIfAlreadyExists) throws IOException { try (InputStream stream = bytesRef.streamInput()) { blobContainer().writeBlobAtomic(blobName, stream, bytesRef.length(), failIfAlreadyExists); } }
@Override public IndexMetaData getSnapshotIndexMetaData(final SnapshotId snapshotId, final IndexId index) throws IOException { final BlobPath indexPath = basePath().add("indices").add(index.getId()); return indexMetaDataFormat.read(blobStore().blobContainer(indexPath), snapshotId.getUUID()); }
public BlobPath add(String path) { List<String> paths = new ArrayList<>(this.paths); paths.add(path); return new BlobPath(Collections.unmodifiableList(paths)); }
/** * Deletes obj in the blob container */ public void delete(BlobContainer blobContainer, String name) throws IOException { blobContainer.deleteBlob(blobName(name)); }
/** * Writes blob with resolving the blob name using {@link #blobName} method. * <p> * The blob will be compressed and checksum will be written if required. * * @param obj object to be serialized * @param blobContainer blob container * @param name blob name */ public void write(T obj, BlobContainer blobContainer, String name) throws IOException { final String blobName = blobName(name); writeTo(obj, blobName, bytesArray -> { try (InputStream stream = bytesArray.streamInput()) { blobContainer.writeBlob(blobName, stream, bytesArray.length(), true); } }); }
/** * Writes blob in atomic manner with resolving the blob name using {@link #blobName} method. * <p> * The blob will be compressed and checksum will be written if required. * * Atomic move might be very inefficient on some repositories. It also cannot override existing files. * * @param obj object to be serialized * @param blobContainer blob container * @param name blob name */ public void writeAtomic(T obj, BlobContainer blobContainer, String name) throws IOException { final String blobName = blobName(name); writeTo(obj, blobName, bytesArray -> { try (InputStream stream = bytesArray.streamInput()) { blobContainer.writeBlobAtomic(blobName, stream, bytesArray.length(), true); } }); }
private void deleteIndexMetaDataBlobIgnoringErrors(final SnapshotInfo snapshotInfo, final IndexId indexId) { final SnapshotId snapshotId = snapshotInfo.snapshotId(); BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(basePath().add("indices").add(indexId.getId())); try { indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID()); } catch (IOException ex) { logger.warn(() -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, indexId.getName()), ex); } }