Tabnine Logo
ProportionalCapacityPreemptionPolicy
Code IndexAdd Tabnine to your IDE (free)

How to use
ProportionalCapacityPreemptionPolicy
in
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity

Best Java code snippets using org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy (Showing top 20 results out of 315)

origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

 syncKillableContainersFromScheduler();
  cloneQueues(root, Resources
      .clone(nlm.getResourceByLabel(partitionToLookAt, clusterResources)),
    partitionToLookAt);
this.leafQueueNames = ImmutableSet.copyOf(getLeafQueueNames(
  getQueueByPartition(CapacitySchedulerConfiguration.ROOT,
    RMNodeLabelsManager.NO_LABEL)));
 logToCSV(new ArrayList<>(leafQueueNames));
preemptOrkillSelectedContainerAfterWait(toPreemptPerSelector, currentTime);
cleanupStaledPreemptionCandidates(currentTime);
origin: com.github.jiayuhan-it/hadoop-yarn-server-resourcemanager

@Override
public void editSchedule() {
 CSQueue root = scheduler.getRootQueue();
 Resource clusterResources = Resources.clone(scheduler.getClusterResource());
 clusterResources = getNonLabeledResources(clusterResources);
 setNodeLabels(scheduler.getRMContext().getNodeLabelManager()
   .getNodeLabels());
 containerBasedPreemptOrKill(root, clusterResources);
}
origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager

public ProportionalCapacityPreemptionPolicy(Configuration config,
  RMContext context, CapacityScheduler scheduler, Clock clock) {
 init(config, context, scheduler);
 this.clock = clock;
}
origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

@Override
public synchronized void editSchedule() {
 updateConfigIfNeeded();
 long startTs = clock.getTime();
 CSQueue root = scheduler.getRootQueue();
 Resource clusterResources = Resources.clone(scheduler.getClusterResource());
 containerBasedPreemptOrKill(root, clusterResources);
 if (LOG.isDebugEnabled()) {
  LOG.debug("Total time used=" + (clock.getTime() - startTs) + " ms.");
 }
}
origin: com.github.jiayuhan-it/hadoop-yarn-server-resourcemanager

/**
 * This method recursively computes the ideal assignment of resources to each
 * level of the hierarchy. This ensures that leafs that are over-capacity but
 * with parents within capacity will not be preempted. Preemptions are allowed
 * within each subtree according to local over/under capacity.
 *
 * @param root the root of the cloned queue hierachy
 * @param totalPreemptionAllowed maximum amount of preemption allowed
 * @return a list of leaf queues updated with preemption targets
 */
private List<TempQueue> recursivelyComputeIdealAssignment(
  TempQueue root, Resource totalPreemptionAllowed) {
 List<TempQueue> leafs = new ArrayList<TempQueue>();
 if (root.getChildren() != null &&
   root.getChildren().size() > 0) {
  // compute ideal distribution at this level
  computeIdealResourceDistribution(rc, root.getChildren(),
    totalPreemptionAllowed, root.idealAssigned);
  // compute recursively for lower levels and build list of leafs
  for(TempQueue t : root.getChildren()) {
   leafs.addAll(recursivelyComputeIdealAssignment(t, totalPreemptionAllowed));
  }
 } else {
  // we are in a leaf nothing to do, just return yourself
  return Collections.singletonList(root);
 }
 return leafs;
}
origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

@Test
public void testIgnore() {
 ProportionalCapacityPreemptionPolicy policy =
   buildPolicy(Q_DATA_FOR_IGNORE);
 policy.editSchedule();
 // don't correct imbalances without demand
 verify(mDisp, never()).handle(isA(ContainerPreemptEvent.class));
}
origin: com.github.jiayuhan-it/hadoop-yarn-server-resourcemanager

 tRoot = cloneQueues(root, clusterResources);
  percentageClusterPreemptionAllowed);
List<TempQueue> queues =
 recursivelyComputeIdealAssignment(tRoot, totalPreemptionAllowed);
  getContainersToPreempt(queues, clusterResources);
 logToCSV(queues);
origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

  isPreemptionToBalanceEnabled);
buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig, true);
policy.editSchedule();
    IsPreemptionRequestFor(getAppAttemptId(2))));
assertEquals(60, policy.getQueuePartitions().get("a")
  .get("").getIdealAssigned().getMemorySize());
assertEquals(60, policy.getQueuePartitions().get("a")
  .get("").getIdealAssigned().getVirtualCores());
assertEquals(40, policy.getQueuePartitions().get("b")
  .get("").getIdealAssigned().getMemorySize());
assertEquals(40, policy.getQueuePartitions().get("b")
  .get("").getIdealAssigned().getVirtualCores());
origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

@Before
public void setup() {
 super.setup();
 policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
}
origin: com.github.jiayuhan-it/hadoop-yarn-server-resourcemanager

 new ArrayList<RMContainer>(app.getLiveContainers());
sortContainers(containers);
 if(isLabeledContainer(c)){
  continue;
origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

@Test
public void testRefreshPreemptionProperties() throws Exception {
 ProportionalCapacityPreemptionPolicy policy =
   buildPolicy(Q_DATA_FOR_IGNORE);
 assertEquals(
   CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_MONITORING_INTERVAL,
   policy.getMonitoringInterval());
 assertEquals(
   CapacitySchedulerConfiguration.DEFAULT_PREEMPTION_OBSERVE_ONLY,
   policy.isObserveOnly());
 CapacitySchedulerConfiguration newConf =
   new CapacitySchedulerConfiguration(conf);
 long newMonitoringInterval = 5000;
 boolean newObserveOnly = true;
 newConf.setLong(
   CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL,
   newMonitoringInterval);
 newConf.setBoolean(CapacitySchedulerConfiguration.PREEMPTION_OBSERVE_ONLY,
   newObserveOnly);
 when(mCS.getConfiguration()).thenReturn(newConf);
 policy.editSchedule();
 assertEquals(newMonitoringInterval, policy.getMonitoringInterval());
 assertEquals(newObserveOnly, policy.isObserveOnly());
}
origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

   TempQueuePerPartition subq = cloneQueues(c, partitionResource,
     partitionToLookAt);
addTempQueuePartition(ret);
return ret;
origin: com.github.jiayuhan-it/hadoop-yarn-server-resourcemanager

Resource childrensPreemptable = Resource.newInstance(0, 0);
for (CSQueue c : root.getChildQueues()) {
 TempQueue subq = cloneQueues(c, clusterResources);
 Resources.addTo(childrensPreemptable, subq.preemptableExtra);
 ret.addChild(subq);
origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager

@Test
public void testContainerOrdering(){
 List<RMContainer> containers = new ArrayList<RMContainer>();
 ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance(
   ApplicationId.newInstance(TS, 10), 0);
 // create a set of containers
 RMContainer rm1 = mockContainer(appAttId, 5, mock(Resource.class), 3);
 RMContainer rm2 = mockContainer(appAttId, 3, mock(Resource.class), 3);
 RMContainer rm3 = mockContainer(appAttId, 2, mock(Resource.class), 2);
 RMContainer rm4 = mockContainer(appAttId, 1, mock(Resource.class), 2);
 RMContainer rm5 = mockContainer(appAttId, 4, mock(Resource.class), 1);
 // insert them in non-sorted order
 containers.add(rm3);
 containers.add(rm2);
 containers.add(rm1);
 containers.add(rm5);
 containers.add(rm4);
 // sort them
 ProportionalCapacityPreemptionPolicy.sortContainers(containers);
 // verify the "priority"-first, "reverse container-id"-second
 // ordering is enforced correctly
 assert containers.get(0).equals(rm1);
 assert containers.get(1).equals(rm2);
 assert containers.get(2).equals(rm3);
 assert containers.get(3).equals(rm4);
 assert containers.get(4).equals(rm5);
}

origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager

computeFixpointAllocation(rc, tot_guarant, nonZeroGuarQueues, unassigned,
  false);
 computeFixpointAllocation(rc, tot_guarant, zeroGuarQueues, unassigned,
   true);
origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager

@Test
public void testObserveOnly() {
 int[][] qData = new int[][]{
  //  /   A   B   C
  { 100, 40, 40, 20 },  // abs
  { 100, 100, 100, 100 },  // maxCap
  { 100, 90, 10,  0 },  // used
  {  80, 10, 20, 50 },  // pending
  {   0,  0,  0,  0 },  // reserved
  {   2,  1,  1,  0 },  // apps
  {  -1,  1,  1,  0 },  // req granularity
  {   3,  0,  0,  0 },  // subqueues
 };
 conf.setBoolean(OBSERVE_ONLY, true);
 ProportionalCapacityPreemptionPolicy policy = buildPolicy(qData);
 policy.editSchedule();
 // verify even severe imbalance not affected
 verify(mDisp, never()).handle(isA(ContainerPreemptEvent.class));
}
origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager

 tRoot = cloneQueues(root, clusterResources);
  percentageClusterPreemptionAllowed);
List<TempQueue> queues =
 recursivelyComputeIdealAssignment(tRoot, totalPreemptionAllowed);
  getContainersToPreempt(queues, clusterResources);
 logToCSV(queues);
origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

policy.editSchedule();
    getAppAttemptId(3))));
assertEquals(16, policy.getQueuePartitions().get("a")
  .get("").getIdealAssigned().getMemorySize());
assertEquals(42, policy.getQueuePartitions().get("b")
  .get("").getIdealAssigned().getMemorySize());
assertEquals(42, policy.getQueuePartitions().get("c")
  .get("").getIdealAssigned().getMemorySize());
origin: org.apache.hadoop/hadoop-yarn-server-resourcemanager

@Before
public void setup() {
 super.setup();
 rc = new DominantResourceCalculator();
 when(cs.getResourceCalculator()).thenReturn(rc);
 policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
}
origin: ch.cern.hadoop/hadoop-yarn-server-resourcemanager

 new ArrayList<RMContainer>(app.getLiveContainers());
sortContainers(containers);
 if(isLabeledContainer(c)){
  continue;
org.apache.hadoop.yarn.server.resourcemanager.monitor.capacityProportionalCapacityPreemptionPolicy

Javadoc

This class implement a SchedulingEditPolicy that is designed to be paired with the CapacityScheduler. At every invocation of editSchedule() it computes the ideal amount of resources assigned to each queue (for each queue in the hierarchy), and determines whether preemption is needed. Overcapacity is distributed among queues in a weighted fair manner, where the weight is the amount of guaranteed capacity for the queue. Based on this ideal assignment it determines whether preemption is required and select a set of containers from each application that would be killed if the corresponding amount of resources is not freed up by the application. If not in observeOnly mode, it triggers preemption requests via a ContainerPreemptEvent that the ResourceManager will ensure to deliver to the application (or to execute). If the deficit of resources is persistent over a long enough period of time this policy will trigger forced termination of containers (again by generating ContainerPreemptEvent).

Most used methods

  • cloneQueues
    This method walks a tree of CSQueue and clones the portion of the state relevant for preemption in T
  • containerBasedPreemptOrKill
    This method selects and tracks containers to be preemptionCandidates. If a container is in the targe
  • init
  • logToCSV
  • sortContainers
    Compare by reversed priority order first, and then reversed containerId order
  • <init>
  • computeFixpointAllocation
    Given a set of queues compute the fix-point distribution of unassigned resources among them. As pend
  • computeIdealResourceDistribution
    This method computes (for a single level in the tree, passed as a List) the ideal assignment of reso
  • editSchedule
  • getContainersToPreempt
    Based a resource preemption target drop reservations of containers and if necessary select container
  • getMostUnderservedQueues
  • getNonLabeledResources
    This method returns all non labeled resources.
  • getMostUnderservedQueues,
  • getNonLabeledResources,
  • getResourceCalculator,
  • isLabeledContainer,
  • preemptAMContainers,
  • preemptFrom,
  • recursivelyComputeIdealAssignment,
  • resetCapacity,
  • setNodeLabels,
  • addTempQueuePartition

Popular in Java

  • Updating database using SQL prepared statement
  • scheduleAtFixedRate (Timer)
  • getSupportFragmentManager (FragmentActivity)
  • getContentResolver (Context)
  • File (java.io)
    An "abstract" representation of a file system entity identified by a pathname. The pathname may be a
  • InputStream (java.io)
    A readable source of bytes.Most clients will use input streams that read data from the file system (
  • Calendar (java.util)
    Calendar is an abstract base class for converting between a Date object and a set of integer fields
  • List (java.util)
    An ordered collection (also known as a sequence). The user of this interface has precise control ove
  • Queue (java.util)
    A collection designed for holding elements prior to processing. Besides basic java.util.Collection o
  • JOptionPane (javax.swing)
  • Top plugins for WebStorm
Tabnine Logo
  • Products

    Search for Java codeSearch for JavaScript code
  • IDE Plugins

    IntelliJ IDEAWebStormVisual StudioAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimGoLandRubyMineEmacsJupyter NotebookJupyter LabRiderDataGripAppCode
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogTabnine AcademyTerms of usePrivacy policyJava Code IndexJavascript Code Index
Get Tabnine for your IDE now