private JobGroup getBuildJobGroup() { if (buildJobGroup == null || buildJobGroup.getMaxThreads() != description.getMaxConcurrentBuilds()) { buildJobGroup = new JobGroup(getClass().getName(), description.getMaxConcurrentBuilds(), 0); } return buildJobGroup; }
jobGroup.cancel(); break;
NLS.bind(DataTransferMessages.SmartImportJob_crawling, childFolder.getLocation().toString()), childFolder, res); if (crawlerJobGroup.getMaxThreads() == 0 || crawlerJobGroup.getActiveJobs().size() < crawlerJobGroup.getMaxThreads()) { crawlerJob.setJobGroup(crawlerJobGroup); jobs.add(crawlerJob);
if (currentJob != null) { JobGroup jobGroup = currentJob.getJobGroup(); if (timeout == 0 && jobGroup != null && jobGroup.getMaxThreads() != 0 && jobGroup == job.getJobGroup()) throw new IllegalStateException("Joining on a job belonging to the same group is not allowed"); //$NON-NLS-1$
/** * Builds a new instance of the job * * @param rootDirectory * the root directory to import and analyze * @param workingSets * working sets to assign to imported projects * @param configureProjects * whether we want to configure projects (natures etc...) * according to their metadata * @param recuriveChildrenDetection * whether to recurse for detection of nested projects */ public SmartImportJob(File rootDirectory, Set<IWorkingSet> workingSets, boolean configureProjects, boolean recuriveChildrenDetection) { super(rootDirectory.getAbsolutePath()); this.workspaceRoot = ResourcesPlugin.getWorkspace().getRoot(); this.rootDirectory = rootDirectory; if (workingSets != null) { this.workingSets = workingSets.toArray(new IWorkingSet[workingSets.size()]); } else { this.workingSets = new IWorkingSet[0]; } this.configureProjects = configureProjects; this.deepChildrenDetection = recuriveChildrenDetection; this.report = Collections.synchronizedMap(new HashMap<IProject, List<ProjectConfigurator>>()); this.errors = Collections.synchronizedMap(new HashMap<IPath, Exception>()); this.crawlerJobGroup = new JobGroup(DataTransferMessages.SmartImportJob_detectAndConfigureProjects, 0, 1); }
synchronized (lock) { JobGroup jobGroup = job.getJobGroup(); if (jobGroup != null && jobGroup.getState() == JobGroup.CANCELING) shouldRun = false; InternalJob internal = job;
if (currentJob != null) { JobGroup jobGroup = currentJob.getJobGroup(); if (timeout == 0 && jobGroup != null && jobGroup.getMaxThreads() != 0 && jobGroup == job.getJobGroup()) throw new IllegalStateException("Joining on a job belonging to the same group is not allowed"); //$NON-NLS-1$
private IStatus run0(IProgressMonitor monitor) throws Exception { if (!javaElements.isEmpty()) { final Queue<RefactoringUnit> toRefactor = collectRefactoringUnits(javaElements, monitor); final int nbCores = Runtime.getRuntime().availableProcessors(); final int nbWorkers = computeNbWorkers(toRefactor.size(), nbCores); final JobGroup jobGroup = new JobGroup("Job name", nbWorkers, nbWorkers); for (int i = 0; i < nbWorkers; i++) { final Job job = new ApplyRefactoringsJob(toRefactor, clone(refactoringRulesToApply), environment); job.setJobGroup(jobGroup); job.setUser(true); job.schedule(); } } return Status.OK_STATUS; }
synchronized (lock) { JobGroup jobGroup = job.getJobGroup(); if (jobGroup != null && jobGroup.getState() == JobGroup.CANCELING) shouldRun = false; InternalJob internal = job;
jobGroup.cancel(); break;