/** * Set headers for image length and if available, md5. * * @throws IOException */ static void setVerificationHeadersForPut(HttpURLConnection connection, File file) throws IOException { connection.setRequestProperty( Util.CONTENT_LENGTH, String.valueOf(file.length())); MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file); if (hash != null) { connection .setRequestProperty(Util.MD5_HEADER, hash.toString()); } }
/** * Set headers for content length, and, if available, md5. * @throws IOException */ public static void setVerificationHeadersForGet(HttpServletResponse response, File file) throws IOException { response.setHeader( Util.CONTENT_LENGTH, String.valueOf(file.length())); MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file); if (hash != null) { response.setHeader(Util.MD5_HEADER, hash.toString()); } }
public String toString() { return String.valueOf(layoutVersion) + FIELD_SEPARATOR + String.valueOf(namespaceID) + FIELD_SEPARATOR + String.valueOf(cTime) + FIELD_SEPARATOR + String.valueOf(editsTime) + FIELD_SEPARATOR + String.valueOf(checkpointTime) + FIELD_SEPARATOR + imageDigest.toString() + FIELD_SEPARATOR + checkpointState.toString(); }
/** * Set headers for content length, and, if available, md5. * @throws IOException */ public static void setVerificationHeadersForGet(HttpServletResponse response, File file) throws IOException { response.setHeader(TransferFsImage.CONTENT_LENGTH, String.valueOf(file.length())); MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file); if (hash != null) { response.setHeader(TransferFsImage.MD5_HEADER, hash.toString()); } }
/** * Set headers for content length, and, if available, md5. * @throws IOException */ public static void setVerificationHeadersForGet(HttpServletResponse response, File file) throws IOException { response.setHeader(TransferFsImage.CONTENT_LENGTH, String.valueOf(file.length())); MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file); if (hash != null) { response.setHeader(TransferFsImage.MD5_HEADER, hash.toString()); } }
protected static String getTraceSignature(String input) throws IOException { Path inputPath = new Path(input); FileSystem fs = inputPath.getFileSystem(new Configuration()); FileStatus status = fs.getFileStatus(inputPath); Path qPath = fs.makeQualified(status.getPath()); String traceID = status.getModificationTime() + qPath.toString() + status.getOwner() + status.getLen(); return MD5Hash.digest(traceID).toString(); }
/** * Set headers for image length and if available, md5. * * @throws IOException */ static void setVerificationHeadersForPut(HttpURLConnection connection, File file) throws IOException { connection.setRequestProperty(TransferFsImage.CONTENT_LENGTH, String.valueOf(file.length())); MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file); if (hash != null) { connection .setRequestProperty(TransferFsImage.MD5_HEADER, hash.toString()); } }
/** * Set headers for image length and if available, md5. * * @throws IOException */ static void setVerificationHeadersForPut(HttpURLConnection connection, File file) throws IOException { connection.setRequestProperty(TransferFsImage.CONTENT_LENGTH, String.valueOf(file.length())); MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file); if (hash != null) { connection .setRequestProperty(TransferFsImage.MD5_HEADER, hash.toString()); } }
/** * Map the HDFS based distributed cache file path from original cluster to * a unique file name on the simulated cluster. * <br> Unique distributed file names on simulated cluster are generated * using original cluster's <li>file path, <li>timestamp and <li> the * job-submitter for private distributed cache file. * <br> This implies that if on original cluster, a single HDFS file * considered as two private distributed cache files for two jobs of * different users, then the corresponding simulated jobs will have two * different files of the same size in public distributed cache, one for each * user. Both these simulated jobs will not share these distributed cache * files, thus leading to the same load as seen in the original cluster. * @param file distributed cache file path * @param timeStamp time stamp of dist cachce file * @param isPublic true if this distributed cache file is a public * distributed cache file * @param user job submitter on original cluster * @return the mapped path on simulated cluster */ private String mapDistCacheFilePath(String file, String timeStamp, boolean isPublic, String user) { String id = file + timeStamp; if (!isPublic) { // consider job-submitter for private distributed cache file id = id.concat(user); } return new Path(distCachePath, MD5Hash.digest(id).toString()).toUri() .getPath(); }
/** * This function returns a md5 hash of a file. * * @param file input file * @return The md5 string */ public static String getFileMD5(File file) throws IOException { return MD5FileUtils.computeMd5ForFile(file).toString(); }
/** * Write last checkpoint time and version file into the storage directory. * * The version file should always be written last. * Missing or corrupted version file indicates that * the checkpoint is not valid. * * @param sd storage directory * @throws IOException */ protected void setFields(Properties props, StorageDirectory sd ) throws IOException { super.setFields(props, sd); boolean uState = getDistributedUpgradeState(); int uVersion = getDistributedUpgradeVersion(); if(uState && uVersion != getLayoutVersion()) { props.setProperty("distributedUpgradeState", Boolean.toString(uState)); props.setProperty("distributedUpgradeVersion", Integer.toString(uVersion)); } if (this.newImageDigest) { this.setImageDigest(MD5Hash.digest( new FileInputStream(getImageFile(sd, NameNodeFile.IMAGE)))); } props.setProperty(MESSAGE_DIGEST_PROPERTY, this.getImageDigest().toString()); writeCheckpointTime(sd); }
bytes[0] += 1; md5 = new MD5Hash(bytes); props.setProperty(FSImage.MESSAGE_DIGEST_PROPERTY, md5.toString());
fileInfo.put(pathURI, new FileInfo(md5hash.toString(), md5hash.getFileLength(), 0)); } else { = MD5Hash.digest(new FileInputStream(tmp.toUri().getPath())); newPath = copyRemoteFiles(fs, filesDir, tmp, job, replication, md5hash.toString()); fileInfo.put(pathURI, new FileInfo(md5hash.toString(), md5hash.getFileLength(), 0)); = MD5Hash.digest(new FileInputStream(tmp.toUri().getPath())); newPath = copyRemoteFiles(fs, libjarsDir, tmp, job, replication, md5hash.toString()); DistributedCache.addSharedArchiveToClassPath(newPath, job); new FileInfo(md5hash.toString(), md5hash.getFileLength(), 0)); fileInfo.put(pathURI, new FileInfo(md5hash.toString(), md5hash.getFileLength(), 0));
md5.write(getBlockChecksumBuf()); if (LOG.isDebugEnabled()) { blockChecksumForDebug = md5.toString();