@Override protected void serviceInit(Configuration conf) throws Exception { Configuration config = new XLearningConfiguration(conf); config.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true); // This is required for WebApps to use https if enabled. XLearningWebAppUtil.initialize(getConfig()); try { doSecureLogin(conf); } catch (IOException ie) { throw new YarnRuntimeException("History Server Failed to login", ie); } jobHistoryService = new JobHistory(); historyContext = (HistoryContext) jobHistoryService; stateStore = createStateStore(conf); this.jhsDTSecretManager = createJHSSecretManager(conf, stateStore); clientService = createHistoryClientService(); aggLogDelService = new AggregatedLogDeletionService(); addService(stateStore); addService(new HistoryServerSecretManagerService()); addService(clientService); addService(aggLogDelService); super.serviceInit(config); }
@Override public Void run() throws IOException { jobHistoryService.refreshJobRetentionSettings(); return null; } });
public void refreshJobRetentionSettings() { if (getServiceState() == STATE.STARTED) { conf = createConf(); long maxHistoryAge = conf.getLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS, JHAdminConfig.DEFAULT_MR_HISTORY_MAX_AGE); hsManager.setMaxHistoryAge(maxHistoryAge); if (futureHistoryCleaner != null) { futureHistoryCleaner.cancel(false); } futureHistoryCleaner = null; scheduleHistoryCleaner(); } else { LOG.warn("Failed to execute refreshJobRetentionSettings : Job History service is not started"); } }
@Override protected void serviceInit(Configuration conf) throws Exception { LOG.info("JobHistory Init"); this.conf = conf; this.appID = ApplicationId.newInstance(0, 0); this.appAttemptID = RecordFactoryProvider.getRecordFactory(conf) .newRecordInstance(ApplicationAttemptId.class); moveThreadInterval = conf.getLong( JHAdminConfig.MR_HISTORY_MOVE_INTERVAL_MS, JHAdminConfig.DEFAULT_MR_HISTORY_MOVE_INTERVAL_MS); hsManager = createHistoryFileManager(); hsManager.init(conf); try { hsManager.initExisting(); } catch (IOException e) { throw new YarnRuntimeException("Failed to intialize existing directories", e); } storage = createHistoryStorage(); if (storage instanceof Service) { ((Service) storage).init(conf); } storage.setHistoryFileManager(hsManager); super.serviceInit(conf); }
@Override public Map<JobId, Job> getAllJobs(ApplicationId appID) { if (LOG.isDebugEnabled()) { LOG.debug("Called getAllJobs(AppId): " + appID); } // currently there is 1 to 1 mapping between app and job id org.apache.hadoop.mapreduce.JobID oldJobID = TypeConverter.fromYarn(appID); Map<JobId, Job> jobs = new HashMap<JobId, Job>(); JobId jobID = TypeConverter.toYarn(oldJobID); jobs.put(jobID, getJob(jobID)); return jobs; }
private void scheduleHistoryCleaner() { boolean startCleanerService = conf.getBoolean( JHAdminConfig.MR_HISTORY_CLEANER_ENABLE, true); if (startCleanerService) { cleanerInterval = conf.getLong( JHAdminConfig.MR_HISTORY_CLEANER_INTERVAL_MS, JHAdminConfig.DEFAULT_MR_HISTORY_CLEANER_INTERVAL_MS); futureHistoryCleaner = scheduledExecutor.scheduleAtFixedRate( new HistoryCleaner(), getInitDelaySecs() * 1000l, cleanerInterval, TimeUnit.MILLISECONDS); } }
public void refreshLoadedJobCache() { if (getServiceState() == STATE.STARTED) { if (storage instanceof CachedHistoryStorage) { ((CachedHistoryStorage) storage).refreshLoadedJobCache(); } else { throw new UnsupportedOperationException(storage.getClass().getName() + " is expected to be an instance of " + CachedHistoryStorage.class.getName()); } } else { LOG.warn("Failed to execute refreshLoadedJobCache: JobHistory service is not started"); } }
@Override public void refreshLoadedJobCache() throws IOException { UserGroupInformation user = checkAcls("refreshLoadedJobCache"); try { jobHistoryService.refreshLoadedJobCache(); } catch (UnsupportedOperationException e) { HSAuditLogger.logFailure(user.getShortUserName(), "refreshLoadedJobCache", adminAcl.toString(), HISTORY_ADMIN_SERVER, e.getMessage()); throw e; } HSAuditLogger.logSuccess(user.getShortUserName(), "refreshLoadedJobCache", HISTORY_ADMIN_SERVER); }
@Override protected void serviceStart() throws Exception { hsManager.start(); if (storage instanceof Service) { ((Service) storage).start(); } scheduledExecutor = new ScheduledThreadPoolExecutor(2, new ThreadFactoryBuilder().setNameFormat("Log Scanner/Cleaner #%d") .build()); scheduledExecutor.scheduleAtFixedRate(new MoveIntermediateToDoneRunnable(), moveThreadInterval, moveThreadInterval, TimeUnit.MILLISECONDS); // Start historyCleaner scheduleHistoryCleaner(); super.serviceStart(); }
public void refreshJobRetentionSettings() { if (getServiceState() == STATE.STARTED) { conf = createConf(); long maxHistoryAge = conf.getLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS, JHAdminConfig.DEFAULT_MR_HISTORY_MAX_AGE); hsManager.setMaxHistoryAge(maxHistoryAge); if (futureHistoryCleaner != null) { futureHistoryCleaner.cancel(false); } futureHistoryCleaner = null; scheduleHistoryCleaner(); } else { LOG.warn("Failed to execute refreshJobRetentionSettings : Job History service is not started"); } }
@Override protected void serviceInit(Configuration conf) throws Exception { LOG.info("JobHistory Init"); this.conf = conf; this.appID = ApplicationId.newInstance(0, 0); this.appAttemptID = RecordFactoryProvider.getRecordFactory(conf) .newRecordInstance(ApplicationAttemptId.class); moveThreadInterval = conf.getLong( JHAdminConfig.MR_HISTORY_MOVE_INTERVAL_MS, JHAdminConfig.DEFAULT_MR_HISTORY_MOVE_INTERVAL_MS); hsManager = createHistoryFileManager(); hsManager.init(conf); try { hsManager.initExisting(); } catch (IOException e) { throw new YarnRuntimeException("Failed to intialize existing directories", e); } storage = createHistoryStorage(); if (storage instanceof Service) { ((Service) storage).init(conf); } storage.setHistoryFileManager(hsManager); super.serviceInit(conf); }
@Override public Map<JobId, Job> getAllJobs(ApplicationId appID) { if (LOG.isDebugEnabled()) { LOG.debug("Called getAllJobs(AppId): " + appID); } // currently there is 1 to 1 mapping between app and job id org.apache.hadoop.mapreduce.JobID oldJobID = TypeConverter.fromYarn(appID); Map<JobId, Job> jobs = new HashMap<JobId, Job>(); JobId jobID = TypeConverter.toYarn(oldJobID); jobs.put(jobID, getJob(jobID)); return jobs; }
private void scheduleHistoryCleaner() { boolean startCleanerService = conf.getBoolean( JHAdminConfig.MR_HISTORY_CLEANER_ENABLE, true); if (startCleanerService) { cleanerInterval = conf.getLong( JHAdminConfig.MR_HISTORY_CLEANER_INTERVAL_MS, JHAdminConfig.DEFAULT_MR_HISTORY_CLEANER_INTERVAL_MS); futureHistoryCleaner = scheduledExecutor.scheduleAtFixedRate( new HistoryCleaner(), getInitDelaySecs() * 1000l, cleanerInterval, TimeUnit.MILLISECONDS); } }
public void refreshLoadedJobCache() { if (getServiceState() == STATE.STARTED) { if (storage instanceof CachedHistoryStorage) { ((CachedHistoryStorage) storage).refreshLoadedJobCache(); } else { throw new UnsupportedOperationException(storage.getClass().getName() + " is expected to be an instance of " + CachedHistoryStorage.class.getName()); } } else { LOG.warn("Failed to execute refreshLoadedJobCache: JobHistory service is not started"); } }
@Override public void refreshLoadedJobCache() throws IOException { UserGroupInformation user = checkAcls("refreshLoadedJobCache"); try { jobHistoryService.refreshLoadedJobCache(); } catch (UnsupportedOperationException e) { HSAuditLogger.logFailure(user.getShortUserName(), "refreshLoadedJobCache", adminAcl.toString(), HISTORY_ADMIN_SERVER, e.getMessage()); throw e; } HSAuditLogger.logSuccess(user.getShortUserName(), "refreshLoadedJobCache", HISTORY_ADMIN_SERVER); }
@Override protected void serviceStart() throws Exception { hsManager.start(); if (storage instanceof Service) { ((Service) storage).start(); } scheduledExecutor = new ScheduledThreadPoolExecutor(2, new ThreadFactoryBuilder().setNameFormat("Log Scanner/Cleaner #%d") .build()); scheduledExecutor.scheduleAtFixedRate(new MoveIntermediateToDoneRunnable(), moveThreadInterval, moveThreadInterval, TimeUnit.MILLISECONDS); // Start historyCleaner scheduleHistoryCleaner(); super.serviceStart(); }
public void refreshJobRetentionSettings() { if (getServiceState() == STATE.STARTED) { conf = createConf(); long maxHistoryAge = conf.getLong(JHAdminConfig.MR_HISTORY_MAX_AGE_MS, JHAdminConfig.DEFAULT_MR_HISTORY_MAX_AGE); hsManager.setMaxHistoryAge(maxHistoryAge); if (futureHistoryCleaner != null) { futureHistoryCleaner.cancel(false); } futureHistoryCleaner = null; scheduleHistoryCleaner(); } else { LOG.warn("Failed to execute refreshJobRetentionSettings : Job History service is not started"); } }
@Override protected void serviceInit(Configuration conf) throws Exception { LOG.info("JobHistory Init"); this.conf = conf; this.appID = ApplicationId.newInstance(0, 0); this.appAttemptID = RecordFactoryProvider.getRecordFactory(conf) .newRecordInstance(ApplicationAttemptId.class); moveThreadInterval = conf.getLong( JHAdminConfig.MR_HISTORY_MOVE_INTERVAL_MS, JHAdminConfig.DEFAULT_MR_HISTORY_MOVE_INTERVAL_MS); hsManager = createHistoryFileManager(); hsManager.init(conf); try { hsManager.initExisting(); } catch (IOException e) { throw new YarnRuntimeException("Failed to intialize existing directories", e); } storage = createHistoryStorage(); if (storage instanceof Service) { ((Service) storage).init(conf); } storage.setHistoryFileManager(hsManager); super.serviceInit(conf); }
@Override protected void serviceInit(Configuration conf) throws Exception { Configuration config = new YarnConfiguration(conf); config.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true); // This is required for WebApps to use https if enabled. MRWebAppUtil.initialize(getConfig()); try { doSecureLogin(conf); } catch(IOException ie) { throw new YarnRuntimeException("History Server Failed to login", ie); } jobHistoryService = new JobHistory(); historyContext = (HistoryContext)jobHistoryService; stateStore = createStateStore(conf); this.jhsDTSecretManager = createJHSSecretManager(conf, stateStore); clientService = createHistoryClientService(); aggLogDelService = new AggregatedLogDeletionService(); hsAdminServer = new HSAdminServer(aggLogDelService, jobHistoryService); addService(stateStore); addService(new HistoryServerSecretManagerService()); addService(jobHistoryService); addService(clientService); addService(aggLogDelService); addService(hsAdminServer); super.serviceInit(config); }
@Override public Map<JobId, Job> getAllJobs(ApplicationId appID) { if (LOG.isDebugEnabled()) { LOG.debug("Called getAllJobs(AppId): " + appID); } // currently there is 1 to 1 mapping between app and job id org.apache.hadoop.mapreduce.JobID oldJobID = TypeConverter.fromYarn(appID); Map<JobId, Job> jobs = new HashMap<JobId, Job>(); JobId jobID = TypeConverter.toYarn(oldJobID); jobs.put(jobID, getJob(jobID)); return jobs; }