@Override protected ClusterClient<ApplicationId> createYarnClusterClient( AbstractYarnClusterDescriptor descriptor, int numberTaskManagers, int slotsPerTaskManager, ApplicationReport report, Configuration flinkConfiguration, boolean perJobCluster) throws Exception { return new RestClusterClient<>( flinkConfiguration, report.getApplicationId()); } }
private void logApplicationReport(ApplicationReport appReport) { LOG.info("Application " + appReport.getApplicationId() + " finished with state " + appReport .getYarnApplicationState() + " and final state " + appReport .getFinalApplicationStatus() + " at " + appReport.getFinishTime()); if (appReport.getYarnApplicationState() == YarnApplicationState.FAILED) { LOG.warn("Application failed. Diagnostics " + appReport.getDiagnostics()); LOG.warn("If log aggregation is activated in the Hadoop cluster, we recommend to retrieve " + "the full application log using this command:" + System.lineSeparator() + "\tyarn logs -applicationId " + appReport.getApplicationId() + System.lineSeparator() + "(It sometimes takes a few seconds until the logs are aggregated)"); } }
@VisibleForTesting Optional<ApplicationId> getReconnectableApplicationId() throws YarnException, IOException { List<ApplicationReport> applicationReports = this.yarnClient.getApplications(APPLICATION_TYPES, RECONNECTABLE_APPLICATION_STATES); if (applicationReports == null || applicationReports.isEmpty()) { return Optional.absent(); } // Try to find an application with a matching application name for (ApplicationReport applicationReport : applicationReports) { if (this.applicationName.equals(applicationReport.getName())) { return Optional.of(applicationReport.getApplicationId()); } } return Optional.absent(); }
if (applicationReport.isPresent()) { messageBuilder.append("\n"); messageBuilder.append("\tApplication ID: ").append(applicationReport.get().getApplicationId()).append("\n"); messageBuilder.append("\tApplication attempt ID: ") .append(applicationReport.get().getCurrentApplicationAttemptId()).append("\n");
/** * Queries RM for the list of applications with the given tag that have started * after the given timestamp. */ private Set<ApplicationId> getYarnChildJobs(String tag, long timestamp) { Set<ApplicationId> childYarnJobs = new HashSet<ApplicationId>(); LOG.info(String.format("Querying RM for tag = %s, starting with ts = %s", tag, timestamp)); GetApplicationsRequest gar = GetApplicationsRequest.newInstance(); gar.setScope(ApplicationsRequestScope.OWN); gar.setStartRange(timestamp, System.currentTimeMillis()); gar.setApplicationTags(Collections.singleton(tag)); try { ApplicationClientProtocol proxy = ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class); GetApplicationsResponse apps = proxy.getApplications(gar); List<ApplicationReport> appsList = apps.getApplicationList(); for(ApplicationReport appReport : appsList) { childYarnJobs.add(appReport.getApplicationId()); } } catch (IOException ioe) { throw new RuntimeException("Exception occurred while finding child jobs", ioe); } catch (YarnException ye) { throw new RuntimeException("Exception occurred while finding child jobs", ye); } return childYarnJobs; } }
this.numberTaskManagers = numberTaskManagers; this.slotsPerTaskManager = slotsPerTaskManager; this.appId = appReport.getApplicationId(); this.trackingURL = appReport.getTrackingUrl(); this.newlyCreatedCluster = newlyCreatedCluster;
public static Set<ApplicationId> getChildYarnJobs(Configuration conf, String tag) throws IOException, YarnException { Set<ApplicationId> childYarnJobs = new HashSet<ApplicationId>(); GetApplicationsRequest gar = GetApplicationsRequest.newInstance(); gar.setScope(ApplicationsRequestScope.OWN); gar.setApplicationTags(Collections.singleton(tag)); ApplicationClientProtocol proxy = ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class); GetApplicationsResponse apps = proxy.getApplications(gar); List<ApplicationReport> appsList = apps.getApplicationList(); for(ApplicationReport appReport : appsList) { if (isAdmin() || appReport.getApplicationTags().contains(QueryState.USERID_TAG + "=" + SessionState.get() .getUserName())) { childYarnJobs.add(appReport.getApplicationId()); } } if (childYarnJobs.isEmpty()) { LOG.info("No child applications found"); } else { LOG.info("Found child YARN applications: " + StringUtils.join(childYarnJobs, ",")); } return childYarnJobs; }
/** * Sleep a bit between the tests (we are re-using the YARN cluster for the tests). */ @After public void sleep() throws IOException, YarnException { Deadline deadline = Deadline.now().plus(Duration.ofSeconds(10)); boolean isAnyJobRunning = yarnClient.getApplications().stream() .anyMatch(YarnTestBase::isApplicationRunning); while (deadline.hasTimeLeft() && isAnyJobRunning) { try { Thread.sleep(500); } catch (InterruptedException e) { Assert.fail("Should not happen"); } isAnyJobRunning = yarnClient.getApplications().stream() .anyMatch(YarnTestBase::isApplicationRunning); } if (isAnyJobRunning) { final List<String> runningApps = yarnClient.getApplications().stream() .filter(YarnTestBase::isApplicationRunning) .map(app -> "App " + app.getApplicationId() + " is in state " + app.getYarnApplicationState() + '.') .collect(Collectors.toList()); if (!runningApps.isEmpty()) { Assert.fail("There is at least one application on the cluster that is not finished." + runningApps); } } }
ApplicationId id = app.getApplicationId(); yc.killApplication(id);
applicationId = appReport.getApplicationId().toString();
/** * Scan all clusters to recover the soft state. */ @VisibleForTesting void scanAll() throws IOException, YarnException { ConcurrentHashMap<UUID, InstanceInfo> newInstances = new ConcurrentHashMap<>(); for (ClusterInfo cluster : clusters.values()) { List<ApplicationReport> reports = cluster.client() .getApplications(Collections.singleton(ATHENAX_APPLICATION_TYPE)); for (ApplicationReport report : reports) { InstanceInfo instance = Utils.extractInstanceInfo(cluster.name(), report); if (instance == null) { LOG.warn("Failed to retrieve instance info for {}:{}", cluster.name(), report.getApplicationId()); } else { newInstances.put(instance.metadata().uuid(), instance); } } } LOG.info("Inspected {} active instances", newInstances.size()); instances.set(newInstances); listener.onUpdatedInstances(newInstances); } }
static InstanceInfo extractInstanceInfo(String clusterName, ApplicationReport report) { InstanceMetadata md = getMetadata(report.getApplicationTags()); if (md == null) { return null; } ApplicationResourceUsageReport usage = report.getApplicationResourceUsageReport(); InstanceStatus stat = new InstanceStatus() .allocatedVCores((long) usage.getUsedResources().getVirtualCores()) .allocatedMB((long) usage.getUsedResources().getMemory()) .clusterId(clusterName) .applicationId(report.getApplicationId().toString()) .startedTime(report.getStartTime()) .runningContainers((long) usage.getNumUsedContainers()) .trackingUrl(report.getTrackingUrl()) .state(InstanceStatus.StateEnum.fromValue(report.getYarnApplicationState().toString())); return new InstanceInfo(clusterName, report.getApplicationId(), md, stat); } }
@Override public String apply(final ApplicationReport input) { Preconditions.checkNotNull(input, "YARN application should be filled"); return input.getApplicationId().toString(); } });
@Override public int compare(ApplicationReport o1, ApplicationReport o2) { return o1.getApplicationId().getId() - o2.getApplicationId().getId(); }
private void logTrackingAndRMUrls(ApplicationReport report) { LOG.info("URL to track running application (will proxy to TensorBoard once it has started): " + report.getTrackingUrl()); LOG.info("ResourceManager web address for application: " + Utils.buildRMUrl(yarnConf, report.getApplicationId().toString())); }
public static JobStatus[] fromYarnApps(List<ApplicationReport> applications, Configuration conf) { List<JobStatus> jobStatuses = new ArrayList<JobStatus>(); for (ApplicationReport application : applications) { // each applicationReport has its own jobFile org.apache.hadoop.mapreduce.JobID jobId = TypeConverter.fromYarn(application.getApplicationId()); jobStatuses.add(TypeConverter.fromYarn(application, MRApps.getJobFile(conf, application.getUser(), jobId))); } return jobStatuses.toArray(new JobStatus[jobStatuses.size()]); }
@Override public void execute(String[] args, ConsoleReader reader) throws Exception { String opId = args[1]; String port = null; if (args.length == 3) { port = args[2]; } printJson(recordingsAgent.stopRecording(currentApp.getApplicationId().toString(), opId, port)); }
public Collection<CloudAppInstanceInfo> getSubmittedApplications() { List<CloudAppInstanceInfo> appIds = new ArrayList<CloudAppInstanceInfo>(); for (ApplicationReport report : yarnClient.listApplications("DATAFLOW")) { appIds.add(new CloudAppInstanceInfo(report.getApplicationId().toString(), report.getName(), report.getYarnApplicationState().toString(), report.getOriginalTrackingUrl())); } return appIds; }
private void setupMocks(final String mrJobId, final String wfExternalId, final String yarnApplicationId) throws HadoopAccessorException, IOException, URISyntaxException, InterruptedException, YarnException { when(hadoopJobIdFinder.find()).thenReturn(mrJobId); when(applicationReport.getApplicationType()).thenReturn("MAPREDUCE"); when(workflowActionBean.getWfId()).thenReturn("workflowId"); when(workflowActionBean.getExternalId()).thenReturn(wfExternalId); when(applicationReport.getYarnApplicationState()).thenReturn(YarnApplicationState.RUNNING); when(applicationId.toString()).thenReturn(yarnApplicationId); when(applicationReport.getApplicationId()).thenReturn(applicationId); when(reader.read()).thenReturn(Lists.newArrayList(applicationReport)); }
@Test public void testGetLastYarnIdOnOneElementSuccess() { when(applicationReport.getApplicationId()).thenReturn(applicationId); when(applicationId.toString()).thenReturn("application_1534164756526_0000"); final String lastYarnId = yarnApplicationIdFinder.getLastYarnId(Collections.singletonList(applicationReport)); assertEquals("last YARN id should be the only element in the list", "application_1534164756526_0000", lastYarnId); }