/** * Simple major.minor versioning scheme. Any incompatible changes should be across major * versions. Minor version differences are allowed -- meaning we should be able to read * dbs that are either earlier *or* later on the minor version. */ public static void checkVersion(DB db, StoreVersion newversion, ObjectMapper mapper) throws IOException { byte[] bytes = db.get(StoreVersion.KEY); if (bytes == null) { storeVersion(db, newversion, mapper); } else { StoreVersion version = mapper.readValue(bytes, StoreVersion.class); if (version.major != newversion.major) { throw new IOException("cannot read state DB with version " + version + ", incompatible " + "with current version " + newversion); } storeVersion(db, newversion, mapper); } }
private void loadSecretsFromDb() throws IOException { secretsFile = initRecoveryDb(SECRETS_RECOVERY_FILE_NAME); // Make sure this is protected in case its not in the NM recovery dir FileSystem fs = FileSystem.getLocal(_conf); fs.mkdirs(new Path(secretsFile.getPath()), new FsPermission((short) 0700)); db = LevelDBProvider.initLevelDB(secretsFile, CURRENT_VERSION, mapper); logger.info("Recovery location is: " + secretsFile.getPath()); if (db != null) { logger.info("Going to reload spark shuffle data"); DBIterator itr = db.iterator(); itr.seek(APP_CREDS_KEY_PREFIX.getBytes(StandardCharsets.UTF_8)); while (itr.hasNext()) { Map.Entry<byte[], byte[]> e = itr.next(); String key = new String(e.getKey(), StandardCharsets.UTF_8); if (!key.startsWith(APP_CREDS_KEY_PREFIX)) { break; } String id = parseDbAppKey(key); ByteBuffer secret = mapper.readValue(e.getValue(), ByteBuffer.class); logger.info("Reloading tokens for app: " + id); secretManager.registerApp(id, secret); } } }
checkVersion(tmpDb, version, mapper);
private void createSecretManager() throws IOException { secretManager = new ShuffleSecretManager(); secretsFile = initRecoveryDb(SECRETS_RECOVERY_FILE_NAME); // Make sure this is protected in case its not in the NM recovery dir FileSystem fs = FileSystem.getLocal(_conf); fs.mkdirs(new Path(secretsFile.getPath()), new FsPermission((short)0700)); db = LevelDBProvider.initLevelDB(secretsFile, CURRENT_VERSION, mapper); logger.info("Recovery location is: " + secretsFile.getPath()); if (db != null) { logger.info("Going to reload spark shuffle data"); DBIterator itr = db.iterator(); itr.seek(APP_CREDS_KEY_PREFIX.getBytes(StandardCharsets.UTF_8)); while (itr.hasNext()) { Map.Entry<byte[], byte[]> e = itr.next(); String key = new String(e.getKey(), StandardCharsets.UTF_8); if (!key.startsWith(APP_CREDS_KEY_PREFIX)) { break; } String id = parseDbAppKey(key); ByteBuffer secret = mapper.readValue(e.getValue(), ByteBuffer.class); logger.info("Reloading tokens for app: " + id); secretManager.registerApp(id, secret); } } }
checkVersion(tmpDb, version, mapper);
private void createSecretManager() throws IOException { secretManager = new ShuffleSecretManager(); secretsFile = initRecoveryDb(SECRETS_RECOVERY_FILE_NAME); // Make sure this is protected in case its not in the NM recovery dir FileSystem fs = FileSystem.getLocal(_conf); fs.mkdirs(new Path(secretsFile.getPath()), new FsPermission((short)0700)); db = LevelDBProvider.initLevelDB(secretsFile, CURRENT_VERSION, mapper); logger.info("Recovery location is: " + secretsFile.getPath()); if (db != null) { logger.info("Going to reload spark shuffle data"); DBIterator itr = db.iterator(); itr.seek(APP_CREDS_KEY_PREFIX.getBytes(StandardCharsets.UTF_8)); while (itr.hasNext()) { Map.Entry<byte[], byte[]> e = itr.next(); String key = new String(e.getKey(), StandardCharsets.UTF_8); if (!key.startsWith(APP_CREDS_KEY_PREFIX)) { break; } String id = parseDbAppKey(key); ByteBuffer secret = mapper.readValue(e.getValue(), ByteBuffer.class); logger.info("Reloading tokens for app: " + id); secretManager.registerApp(id, secret); } } }
/** * Simple major.minor versioning scheme. Any incompatible changes should be across major * versions. Minor version differences are allowed -- meaning we should be able to read * dbs that are either earlier *or* later on the minor version. */ public static void checkVersion(DB db, StoreVersion newversion, ObjectMapper mapper) throws IOException { byte[] bytes = db.get(StoreVersion.KEY); if (bytes == null) { storeVersion(db, newversion, mapper); } else { StoreVersion version = mapper.readValue(bytes, StoreVersion.class); if (version.major != newversion.major) { throw new IOException("cannot read state DB with version " + version + ", incompatible " + "with current version " + newversion); } storeVersion(db, newversion, mapper); } }
checkVersion(tmpDb, version, mapper);
@VisibleForTesting ExternalShuffleBlockResolver( TransportConf conf, File registeredExecutorFile, Executor directoryCleaner) throws IOException { this.conf = conf; this.registeredExecutorFile = registeredExecutorFile; int indexCacheEntries = conf.getInt("spark.shuffle.service.index.cache.entries", 1024); CacheLoader<File, ShuffleIndexInformation> indexCacheLoader = new CacheLoader<File, ShuffleIndexInformation>() { public ShuffleIndexInformation load(File file) throws IOException { return new ShuffleIndexInformation(file); } }; shuffleIndexCache = CacheBuilder.newBuilder() .maximumSize(indexCacheEntries).build(indexCacheLoader); db = LevelDBProvider.initLevelDB(this.registeredExecutorFile, CURRENT_VERSION, mapper); if (db != null) { executors = reloadRegisteredExecutors(db); } else { executors = Maps.newConcurrentMap(); } this.directoryCleaner = directoryCleaner; }
/** * Simple major.minor versioning scheme. Any incompatible changes should be across major * versions. Minor version differences are allowed -- meaning we should be able to read * dbs that are either earlier *or* later on the minor version. */ public static void checkVersion(DB db, StoreVersion newversion, ObjectMapper mapper) throws IOException { byte[] bytes = db.get(StoreVersion.KEY); if (bytes == null) { storeVersion(db, newversion, mapper); } else { StoreVersion version = mapper.readValue(bytes, StoreVersion.class); if (version.major != newversion.major) { throw new IOException("cannot read state DB with version " + version + ", incompatible " + "with current version " + newversion); } storeVersion(db, newversion, mapper); } }
checkVersion(tmpDb, version, mapper);
@VisibleForTesting ExternalShuffleBlockResolver( TransportConf conf, File registeredExecutorFile, Executor directoryCleaner) throws IOException { this.conf = conf; this.registeredExecutorFile = registeredExecutorFile; String indexCacheSize = conf.get("spark.shuffle.service.index.cache.size", "100m"); CacheLoader<File, ShuffleIndexInformation> indexCacheLoader = new CacheLoader<File, ShuffleIndexInformation>() { public ShuffleIndexInformation load(File file) throws IOException { return new ShuffleIndexInformation(file); } }; shuffleIndexCache = CacheBuilder.newBuilder() .maximumWeight(JavaUtils.byteStringAsBytes(indexCacheSize)) .weigher(new Weigher<File, ShuffleIndexInformation>() { public int weigh(File file, ShuffleIndexInformation indexInfo) { return indexInfo.getSize(); } }) .build(indexCacheLoader); db = LevelDBProvider.initLevelDB(this.registeredExecutorFile, CURRENT_VERSION, mapper); if (db != null) { executors = reloadRegisteredExecutors(db); } else { executors = Maps.newConcurrentMap(); } this.directoryCleaner = directoryCleaner; }
/** * Simple major.minor versioning scheme. Any incompatible changes should be across major * versions. Minor version differences are allowed -- meaning we should be able to read * dbs that are either earlier *or* later on the minor version. */ public static void checkVersion(DB db, StoreVersion newversion, ObjectMapper mapper) throws IOException { byte[] bytes = db.get(StoreVersion.KEY); if (bytes == null) { storeVersion(db, newversion, mapper); } else { StoreVersion version = mapper.readValue(bytes, StoreVersion.class); if (version.major != newversion.major) { throw new IOException("cannot read state DB with version " + version + ", incompatible " + "with current version " + newversion); } storeVersion(db, newversion, mapper); } }
@VisibleForTesting ExternalShuffleBlockResolver( TransportConf conf, File registeredExecutorFile, Executor directoryCleaner) throws IOException { this.conf = conf; this.registeredExecutorFile = registeredExecutorFile; String indexCacheSize = conf.get("spark.shuffle.service.index.cache.size", "100m"); CacheLoader<File, ShuffleIndexInformation> indexCacheLoader = new CacheLoader<File, ShuffleIndexInformation>() { public ShuffleIndexInformation load(File file) throws IOException { return new ShuffleIndexInformation(file); } }; shuffleIndexCache = CacheBuilder.newBuilder() .maximumWeight(JavaUtils.byteStringAsBytes(indexCacheSize)) .weigher(new Weigher<File, ShuffleIndexInformation>() { public int weigh(File file, ShuffleIndexInformation indexInfo) { return indexInfo.getSize(); } }) .build(indexCacheLoader); db = LevelDBProvider.initLevelDB(this.registeredExecutorFile, CURRENT_VERSION, mapper); if (db != null) { executors = reloadRegisteredExecutors(db); } else { executors = Maps.newConcurrentMap(); } this.directoryCleaner = directoryCleaner; }