/** * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the * comparison of the files on a per-segment / per-commit level. */ private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store.MetadataSnapshot snapshot) throws Exception { final StoreFileMetaData metadata; if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { // we have a hash - check if our repo has a hash too otherwise we have // to calculate it. // we might have multiple parts even though the file is small... make sure we read all of it. try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) { BytesRefBuilder builder = new BytesRefBuilder(); Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash assert hash.length == 0; hash.bytes = builder.bytes(); hash.offset = 0; hash.length = builder.length(); } } } }
/** * Serializes file info into JSON * * @param file file info * @param builder XContent builder * @param params parameters */ public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); builder.field(NAME, file.name); builder.field(PHYSICAL_NAME, file.metadata.name()); builder.field(LENGTH, file.metadata.length()); if (file.metadata.checksum().equals(UNKNOWN_CHECKSUM) == false) { builder.field(CHECKSUM, file.metadata.checksum()); } if (file.partSize != null) { builder.field(PART_SIZE, file.partSize.getBytes()); } if (file.metadata.writtenBy() != null) { builder.field(WRITTEN_BY, file.metadata.writtenBy()); } if (file.metadata.hash() != null && file.metadata().hash().length > 0) { BytesRef br = file.metadata.hash(); builder.field(META_HASH, br.bytes, br.offset, br.length); } builder.endObject(); }
/** * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the * comparison of the files on a per-segment / per-commit level. */ private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store.MetadataSnapshot snapshot) throws Exception { final StoreFileMetaData metadata; if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { // we have a hash - check if our repo has a hash too otherwise we have // to calculate it. // we might have multiple parts even though the file is small... make sure we read all of it. try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) { BytesRefBuilder builder = new BytesRefBuilder(); Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash assert hash.length == 0; hash.bytes = builder.bytes(); hash.offset = 0; hash.length = builder.length(); } } } }
/** * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. * The new logic for StoreFileMetaData reads the entire <tt>.si</tt> and <tt>segments.n</tt> files to strengthen the * comparison of the files on a per-segment / per-commit level. */ private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store.MetadataSnapshot snapshot) throws Exception { final StoreFileMetaData metadata; if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { // we have a hash - check if our repo has a hash too otherwise we have // to calculate it. // we might have multiple parts even though the file is small... make sure we read all of it. try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) { BytesRefBuilder builder = new BytesRefBuilder(); Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash assert hash.length == 0; hash.bytes = builder.bytes(); hash.offset = 0; hash.length = builder.length(); } } } }
/** * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the * comparison of the files on a per-segment / per-commit level. */ private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store.MetadataSnapshot snapshot) throws Exception { final StoreFileMetaData metadata; if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { // we have a hash - check if our repo has a hash too otherwise we have // to calculate it. // we might have multiple parts even though the file is small... make sure we read all of it. try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) { BytesRefBuilder builder = new BytesRefBuilder(); Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash assert hash.length == 0; hash.bytes = builder.bytes(); hash.offset = 0; hash.length = builder.length(); } } } }
/** * This is a BWC layer to ensure we update the snapshots metdata with the corresponding hashes before we compare them. * The new logic for StoreFileMetaData reads the entire <tt>.si</tt> and <tt>segments.n</tt> files to strengthen the * comparison of the files on a per-segment / per-commit level. */ private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final FileInfo fileInfo, Store.MetadataSnapshot snapshot) throws Throwable { final StoreFileMetaData metadata; if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { // we have a hash - check if our repo has a hash too otherwise we have // to calculate it. // we might have multiple parts even though the file is small... make sure we read all of it. try (final InputStream stream = new PartSliceStream(blobContainer, fileInfo)) { BytesRefBuilder builder = new BytesRefBuilder(); Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash assert hash.length == 0; hash.bytes = builder.bytes(); hash.offset = 0; hash.length = builder.length(); } } } }
/** * Serializes file info into JSON * * @param file file info * @param builder XContent builder * @param params parameters */ public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); builder.field(NAME, file.name); builder.field(PHYSICAL_NAME, file.metadata.name()); builder.field(LENGTH, file.metadata.length()); if (file.metadata.checksum().equals(UNKNOWN_CHECKSUM) == false) { builder.field(CHECKSUM, file.metadata.checksum()); } if (file.partSize != null) { builder.field(PART_SIZE, file.partSize.getBytes()); } if (file.metadata.writtenBy() != null) { builder.field(WRITTEN_BY, file.metadata.writtenBy()); } if (file.metadata.hash() != null && file.metadata().hash().length > 0) { builder.field(META_HASH, file.metadata.hash()); } builder.endObject(); }
/** * Serializes file info into JSON * * @param file file info * @param builder XContent builder * @param params parameters */ public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); builder.field(NAME, file.name); builder.field(PHYSICAL_NAME, file.metadata.name()); builder.field(LENGTH, file.metadata.length()); if (file.metadata.checksum().equals(UNKNOWN_CHECKSUM) == false) { builder.field(CHECKSUM, file.metadata.checksum()); } if (file.partSize != null) { builder.field(PART_SIZE, file.partSize.getBytes()); } if (file.metadata.writtenBy() != null) { builder.field(WRITTEN_BY, file.metadata.writtenBy()); } if (file.metadata.hash() != null && file.metadata().hash().length > 0) { BytesRef br = file.metadata.hash(); builder.field(META_HASH, br.bytes, br.offset, br.length); } builder.endObject(); }
/** * Serializes file info into JSON * * @param file file info * @param builder XContent builder * @param params parameters */ public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); builder.field(NAME, file.name); builder.field(PHYSICAL_NAME, file.metadata.name()); builder.field(LENGTH, file.metadata.length()); if (file.metadata.checksum().equals(UNKNOWN_CHECKSUM) == false) { builder.field(CHECKSUM, file.metadata.checksum()); } if (file.partSize != null) { builder.field(PART_SIZE, file.partSize.getBytes()); } if (file.metadata.writtenBy() != null) { builder.field(WRITTEN_BY, file.metadata.writtenBy()); } if (file.metadata.hash() != null && file.metadata().hash().length > 0) { BytesRef br = file.metadata.hash(); builder.field(META_HASH, br.bytes, br.offset, br.length); } builder.endObject(); }
/** * Serializes file info into JSON * * @param file file info * @param builder XContent builder * @param params parameters */ public static void toXContent(FileInfo file, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); builder.field(Fields.NAME, file.name); builder.field(Fields.PHYSICAL_NAME, file.metadata.name()); builder.field(Fields.LENGTH, file.metadata.length()); if (file.metadata.checksum() != null) { builder.field(Fields.CHECKSUM, file.metadata.checksum()); } if (file.partSize != null) { builder.field(Fields.PART_SIZE, file.partSize.bytes()); } if (file.metadata.writtenBy() != null) { builder.field(Fields.WRITTEN_BY, file.metadata.writtenBy()); } if (file.metadata.hash() != null && file.metadata().hash().length > 0) { builder.field(Fields.META_HASH, file.metadata.hash()); } builder.endObject(); }