fsOps.deleteIfExists(versionFile.toFile()); //So if we fail we are forced to try again LOG.debug("Removing destination file {} in preparation for move", dest); fsOps.deleteIfExists(dest.toFile()); if (type.needsExtraction()) { Path extractionTemp = topologyBasicBlobsRootDir.resolve(type.getTempExtractionDir(newVersion)); Path extractionDest = topologyBasicBlobsRootDir.resolve(type.getExtractionDir()); LOG.debug("Removing extraction dest {} in preparation for extraction", extractionDest); fsOps.deleteIfExists(extractionDest.toFile()); if (fsOps.fileExists(extractionTemp)) { fsOps.moveDirectoryPreferAtomic(extractionTemp.toFile(), extractionDest.toFile()); fsOps.setupStormCodeDir(owner, topologyBasicBlobsRootDir.toFile()); File sharedMemoryDirFinalLocation = new File(topologyBasicBlobsRootDir.toFile(), "shared_by_topology"); sharedMemoryDirFinalLocation.mkdirs(); fsOps.setupWorkerArtifactsDir(owner, sharedMemoryDirFinalLocation);
/** * Setup the container to run. By default this creates the needed directories/links in the local file system PREREQUISITE: All needed * blobs and topology, jars/configs have been downloaded and placed in the appropriate locations * * @throws IOException on any error */ protected void setup() throws IOException { _type.assertFull(); if (!_ops.doRequiredTopoFilesExist(_conf, _topologyId)) { LOG.info("Missing topology storm code, so can't launch worker with assignment {} for this supervisor {} on port {} with id {}", _assignment, _supervisorId, _port, _workerId); throw new IllegalStateException("Not all needed files are here!!!!"); } LOG.info("Setting up {}:{}", _supervisorId, _workerId); _ops.forceMkdir(new File(ConfigUtils.workerPidsRoot(_conf, _workerId))); _ops.forceMkdir(new File(ConfigUtils.workerTmpRoot(_conf, _workerId))); _ops.forceMkdir(new File(ConfigUtils.workerHeartbeatsRoot(_conf, _workerId))); File workerArtifacts = new File(ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port)); if (!_ops.fileExists(workerArtifacts)) { _ops.forceMkdir(workerArtifacts); _ops.setupWorkerArtifactsDir(_assignment.get_owner(), workerArtifacts); } String user = getWorkerUser(); writeLogMetadata(user); saveWorkerUser(user); createArtifactsLink(); createBlobstoreLinks(); }
/** * Dump a string to a file * * @param location where to write to * @param data the data to write * @throws IOException on any error */ public void dump(File location, String data) throws IOException { File parent = location.getParentFile(); if (!parent.exists()) { forceMkdir(parent); } try (Writer w = getWriter(location)) { w.write(data); } }
private void removeAll(String baseName) throws IOException { try (DirectoryStream<Path> children = fsOps.newDirectoryStream(topologyBasicBlobsRootDir)) { for (Path p : children) { String fileName = p.getFileName().toString(); if (fileName.startsWith(baseName)) { fsOps.deleteIfExists(p.toFile()); } } } }
/** * Create symlink from the containers directory/artifacts to the artifacts directory. * * @throws IOException on any error */ protected void createArtifactsLink() throws IOException { _type.assertFull(); if (!_symlinksDisabled) { File workerDir = new File(ConfigUtils.workerRoot(_conf, _workerId)); File topoDir = new File(ConfigUtils.workerArtifactsRoot(_conf, _topologyId, _port)); if (_ops.fileExists(workerDir)) { LOG.debug("Creating symlinks for worker-id: {} topology-id: {} to its port artifacts directory", _workerId, _topologyId); _ops.createSymlink(new File(workerDir, "artifacts"), topoDir); } } }
when(ops.doRequiredTopoFilesExist(superConf, topoId)).thenReturn(true); when(ops.fileExists(workerArtifacts)).thenReturn(true); when(ops.fileExists(workerRoot)).thenReturn(true); when(ops.getWriter(logMetadataFile)).thenReturn(yamlDump); verify(ops).forceMkdir(new File(workerRoot, "pids")); verify(ops).forceMkdir(new File(workerRoot, "tmp")); verify(ops).forceMkdir(new File(workerRoot, "heartbeats")); verify(ops).fileExists(workerArtifacts); verify(ops).getWriter(logMetadataFile); verify(ops).dump(workerUserFile, user); verify(ops).createSymlink(new File(workerRoot, "artifacts"), workerArtifacts); verify(ops, never()).createSymlink(new File(workerRoot, "resources"), new File(distRoot, "resources"));
if (!localResourceList.isEmpty()) { File userDir = getLocalUserFileCacheDir(topoOwner); if (!fsOps.fileExists(userDir)) { fsOps.forceMkdir(userDir); fsOps.setupBlobPermissions(userDir, topoOwner); if (!symlinksDisabled) { for (LocalizedResource localizedResource : localizedResources) { fsOps.createSymlink(new File(stormroot, symlinkName), rsrcFilePath);
when(ops.doRequiredTopoFilesExist(superConf, topoId)).thenReturn(true); when(ops.fileExists(workerArtifacts)).thenReturn(true); when(ops.fileExists(workerRoot)).thenReturn(true); when(ops.getWriter(logMetadataFile)).thenReturn(yamlDump); verify(ops).deleteIfExists(eq(new File(workerPidsRoot, String.valueOf(pid))), eq(user), any(String.class)); verify(iso).releaseResourcesForWorker(workerId); verify(ops).deleteIfExists(eq(new File(workerRoot, "pids")), eq(user), any(String.class)); verify(ops).deleteIfExists(eq(new File(workerRoot, "tmp")), eq(user), any(String.class)); verify(ops).deleteIfExists(eq(new File(workerRoot, "heartbeats")), eq(user), any(String.class)); verify(ops).deleteIfExists(eq(workerRoot), eq(user), any(String.class)); verify(ops).deleteIfExists(workerUserFile);
/** * Delete a file or a directory and all of the children. If it exists. * * @param path what to delete * @param user who to delete it as if doing it as someone else is supported * @param logPrefix if an external process needs to be launched to delete the object what prefix to include in the logs * @throws IOException on any error. */ public void deleteIfExists(File path, String user, String logPrefix) throws IOException { //by default no need to do this as a different user deleteIfExists(path); }
@Override public Void call() throws Exception { try { if (_fsOps.fileExists(_stormRoot)) { if (!_fsOps.supportsAtomicDirectoryMove()) { LOG.warn("{} may have partially downloaded blobs, recovering", _topologyId); _fsOps.deleteIfExists(_stormRoot); } else { LOG.warn("{} already downloaded blobs, skipping", _topologyId); try { downloadBaseBlobs(tr); _fsOps.moveDirectoryPreferAtomic(tr, _stormRoot); _fsOps.setupStormCodeDir(owner, _stormRoot); deleteAll = false; } finally { if (deleteAll) { LOG.warn("Failed to download basic resources for topology-id {}", _topologyId); _fsOps.deleteIfExists(tr); _fsOps.deleteIfExists(_stormRoot);
verify(ops).fileExists(userDir); verify(ops).forceMkdir(userDir); verify(ops).createSymlink(new File(stormRoot, simpleLocalName), new File(simpleCurrentLocalFile)); } finally { try {
when(ops.doRequiredTopoFilesExist(superConf, topoId)).thenReturn(true); when(ops.slurp(stormcode)).thenReturn(serializedState);
ops = AdvancedFSOps.make(conf); assert (port > 0); _topologyId = assignment.get_topology_id(); if (!_ops.doRequiredTopoFilesExist(_conf, _topologyId)) { LOG.info( "Missing topology storm code, so can't launch worker with assignment {} for this supervisor {} on port {} with id {}",
if (!Files.exists(topologyBasicBlobsRootDir)) { Files.createDirectories(topologyBasicBlobsRootDir); fsOps.setupStormCodeDir(owner, topologyBasicBlobsRootDir.toFile()); extractDirFromJar(urlConnection.getJarFileURL().getFile(), ServerConfigUtils.RESOURCES_SUBDIR, extractionDest); } else { fsOps.copyDirectory(new File(url.getFile()), extractionDest.toFile()); v -> { Path path = topologyBasicBlobsRootDir.resolve(type.getTempFileName(v)); fsOps.forceMkdir(path.getParent()); return path; }, fsOps::getOutputStream);
/** * @return the user that some operations should be done as. * * @throws IOException on any error */ protected String getWorkerUser() throws IOException { LOG.info("GET worker-user for {}", _workerId); File file = new File(ConfigUtils.workerUserFile(_conf, _workerId)); if (_ops.fileExists(file)) { return _ops.slurpString(file).trim(); } else if (_assignment != null && _assignment.is_set_owner()) { return _assignment.get_owner(); } if (ConfigUtils.isLocalMode(_conf)) { return System.getProperty("user.name"); } else { File f = new File(ConfigUtils.workerArtifactsRoot(_conf)); if (f.exists()) { return Files.getOwner(f.toPath()).getName(); } throw new IllegalStateException("Could not recover the user for " + _workerId); } }
public AsyncLocalizer(Map<String, Object> conf, StormMetricsRegistry metricsRegistry) throws IOException { this(conf, AdvancedFSOps.make(conf), ConfigUtils.supervisorLocalDir(conf), metricsRegistry); }
@Override protected void downloadBaseBlobs(File tmproot) throws Exception { _fsOps.forceMkdir(tmproot); String stormCodeKey = ConfigUtils.masterStormCodeKey(_topologyId); String stormConfKey = ConfigUtils.masterStormConfKey(_topologyId); BlobStore blobStore = Utils.getNimbusBlobStore(_conf, null); try { try (OutputStream codeOutStream = _fsOps.getOutputStream(codePath)){ blobStore.readBlobTo(stormCodeKey, codeOutStream, null); try (OutputStream confOutStream = _fsOps.getOutputStream(confPath)) { blobStore.readBlobTo(stormConfKey, confOutStream, null); Utils.extractDirFromJar(urlConnection.getJarFileURL().getFile(), ConfigUtils.RESOURCES_SUBDIR, new File(targetDir)); } else { _fsOps.copyDirectory(new File(url.getFile()), new File(targetDir, ConfigUtils.RESOURCES_SUBDIR));
@Test public void testRecovery() throws Exception { final String topoId = "test_topology"; final String workerId = "myWorker"; final int supervisorPort = 6628; final int port = 8080; LocalAssignment la = new LocalAssignment(); la.set_topology_id(topoId); Map<String, Integer> workerState = new HashMap<String, Integer>(); workerState.put(workerId, port); LocalState ls = mock(LocalState.class); when(ls.getApprovedWorkers()).thenReturn(workerState); Map<String, Object> superConf = new HashMap<>(); AdvancedFSOps ops = mock(AdvancedFSOps.class); when(ops.doRequiredTopoFilesExist(superConf, topoId)).thenReturn(true); MockBasicContainer mc = new MockBasicContainer(ContainerType.RECOVER_FULL, superConf, "SUPERVISOR", supervisorPort, port, la, null, ls, null, new StormMetricsRegistry(), new HashMap<>(), ops, "profile"); assertEquals(workerId, mc._workerId); }
when(ops.doRequiredTopoFilesExist(superConf, topoId)).thenReturn(true); when(ops.slurpString(workerArtifactsPid)).thenReturn(String.valueOf(pid));
try (Writer writer = _ops.getWriter(file)) { yaml.dump(data, writer);