.collect(Collectors.groupingBy((o) -> o.getAgentId().getValue())) .entrySet().stream() .filter((e) -> e.getValue().size() > 0) slaveAndRackHelper.getRackIdOrDefault(offersList.get(0)), slaveId, offersList.get(0).getHostname(), slaveAndRackHelper.getTextAttributes(offersList.get(0)), slaveAndRackHelper.getReservedSlaveAttributes(offersList.get(0)));
LOG.debug("Received offer ID {} with roles {} from {} ({}) for {} cpu(s), {} memory, {} ports, and {} disk", offer.getId().getValue(), rolesInfo, offer.getHostname(), offer.getAgentId().getValue(), MesosUtils.getNumCpus(offer), MesosUtils.getMemory(offer), MesosUtils.getNumPorts(offer), MesosUtils.getDisk(offer)); mesosSchedulerClient.decline(Collections.singletonList(offer.getId())); offersToCheck.remove(offer); LOG.debug("Will decline offer {}, slave {} is not currently in a state to launch tasks", offer.getId().getValue(), offer.getHostname()); if (cachedOffers.containsKey(o.getId().getValue())) { offerCache.returnOffer(cachedOffers.remove(o.getId().getValue())); } else { offerCache.cacheOffer(start, o); offersAcceptedFromSlave.removeAll(leftoverOffers); offersAcceptedFromSlave.stream() .filter((offer) -> cachedOffers.containsKey(offer.getId().getValue())) .map((o) -> cachedOffers.remove(o.getId().getValue())) .forEach(offerCache::useOffer); acceptedOffers.addAll(offersAcceptedFromSlave.stream().map(Offer::getId).collect(Collectors.toList())); } else { offerHolder.getOffers().forEach((o) -> { if (cachedOffers.containsKey(o.getId().getValue())) { offerCache.returnOffer(cachedOffers.remove(o.getId().getValue())); } else { offerCache.cacheOffer(start, o); .filter((o) -> !acceptedOffers.contains(o.getId()) && !cachedOffers.containsKey(o.getId().getValue())) .map(Offer::getId) .collect(Collectors.toList()));
taskIds.add(taskHolder.getTask().getTaskId()); toLaunch.add(taskHolder.getMesosTask()); LOG.debug("Launching {} with offer {}", taskHolder.getTask().getTaskId(), offers.get(0).getId()); LOG.trace("Launching {} mesos task: {}", taskHolder.getTask().getTaskId(), MesosUtils.formatForLogging(taskHolder.getMesosTask())); List<Long> ports = MesosUtils.getAllPorts(offer.getResourcesList()); boolean offerCanBeReclaimedFromUnusedResources = offer.getResourcesList().stream() offer.getId().getValue(), offer.getHostname(), MesosUtils.getNumCpus(offer), MesosUtils.getMemory(offer), MesosUtils.getDisk(offer) ); currentResources = MesosUtils.subtractResources(currentResources, offer.getResourcesList());
.collect(Collectors.groupingBy((o) -> o.getAgentId().getValue())) .entrySet().stream() .filter((e) -> e.getValue().size() > 0) slaveAndRackHelper.getRackIdOrDefault(offersList.get(0)), slaveId, offersList.get(0).getHostname(), slaveAndRackHelper.getTextAttributes(offersList.get(0)), slaveAndRackHelper.getReservedSlaveAttributes(offersList.get(0)));
LOG.debug("Received offer ID {} with roles {} from {} ({}) for {} cpu(s), {} memory, {} ports, and {} disk", offer.getId().getValue(), rolesInfo, offer.getHostname(), offer.getAgentId().getValue(), MesosUtils.getNumCpus(offer), MesosUtils.getMemory(offer), MesosUtils.getNumPorts(offer), MesosUtils.getDisk(offer)); mesosSchedulerClient.decline(Collections.singletonList(offer.getId())); offersToCheck.remove(offer); LOG.debug("Will decline offer {}, slave {} is not currently in a state to launch tasks", offer.getId().getValue(), offer.getHostname()); if (cachedOffers.containsKey(o.getId().getValue())) { offerCache.returnOffer(cachedOffers.remove(o.getId().getValue())); } else { offerCache.cacheOffer(start, o); offersAcceptedFromSlave.removeAll(leftoverOffers); offersAcceptedFromSlave.stream() .filter((offer) -> cachedOffers.containsKey(offer.getId().getValue())) .map((o) -> cachedOffers.remove(o.getId().getValue())) .forEach(offerCache::useOffer); acceptedOffers.addAll(offersAcceptedFromSlave.stream().map(Offer::getId).collect(Collectors.toList())); } else { offerHolder.getOffers().forEach((o) -> { if (cachedOffers.containsKey(o.getId().getValue())) { offerCache.returnOffer(cachedOffers.remove(o.getId().getValue())); } else { offerCache.cacheOffer(start, o); .filter((o) -> !acceptedOffers.contains(o.getId()) && !cachedOffers.containsKey(o.getId().getValue())) .map(Offer::getId) .collect(Collectors.toList()));
bldr.addResources(MesosUtils.getDiskResource(desiredTaskResources.getDiskMb(), requiredRole)); bldr.setAgentId(offerHolder.getOffers().get(0).getAgentId());
Protos.Event.Offers.newBuilder() .addAllOffers(Collections.singletonList( org.apache.mesos.v1.Protos.Offer.newBuilder() .setHostname(hostname) .setId(org.apache.mesos.v1.Protos.OfferID.newBuilder().setValue(offerId))
List<CachedOffer> cachedOffersFromHolder = offerHolder.getOffers().stream().map((o) -> offerIdToCachedOffer.get(o.getId().getValue())).collect(Collectors.toList()); List<CachedOffer> unusedCachedOffers = unusedOffers.stream().map((o) -> offerIdToCachedOffer.get(o.getId().getValue())).collect(Collectors.toList()); unusedCachedOffers.forEach((cachedOffer) -> { offerIdToCachedOffer.remove(cachedOffer.getOfferId());
final AgentID agentId = offer.getAgentId(); final List<OfferID> ids = newArrayList(offer.getId()); final Map<String, List<Resource>> resources = offer.getResourcesList() .stream() .collect(groupingBy(Resource::getName));
taskIds.add(taskHolder.getTask().getTaskId()); toLaunch.add(taskHolder.getMesosTask()); LOG.debug("Launching {} with offer {}", taskHolder.getTask().getTaskId(), offers.get(0).getId()); LOG.trace("Launching {} mesos task: {}", taskHolder.getTask().getTaskId(), MesosUtils.formatForLogging(taskHolder.getMesosTask())); List<Long> ports = MesosUtils.getAllPorts(offer.getResourcesList()); boolean offerCanBeReclaimedFromUnusedResources = offer.getResourcesList().stream() offer.getId().getValue(), offer.getHostname(), MesosUtils.getNumCpus(offer), MesosUtils.getMemory(offer), MesosUtils.getDisk(offer) ); currentResources = MesosUtils.subtractResources(currentResources, offer.getResourcesList());
Protos.Event.Offers.newBuilder() .addAllOffers(Collections.singletonList( org.apache.mesos.v1.Protos.Offer.newBuilder() .setHostname(hostname) .setId(org.apache.mesos.v1.Protos.OfferID.newBuilder().setValue(offerId))
return Offer.newBuilder() .setId(OfferID.newBuilder().setValue("offer" + r.nextInt(1000)).build()) .setFrameworkId(frameworkId)
when(slaveAndRackHelper.getRackIdOrDefault(offer)).thenReturn("DEFAULT"); offer = Offer.newBuilder() .setAgentId(AgentID.newBuilder().setValue("1")) .setId(OfferID.newBuilder().setValue("1")) 1, "DEFAULT", offer.getAgentId().getValue(), offer.getHostname(), Collections.emptyMap(), Collections.emptyMap());
public CheckResult checkOffer(Offer offer) { final String slaveId = offer.getAgentId().getValue(); final String rackId = slaveAndRackHelper.getRackIdOrDefault(offer); final String host = slaveAndRackHelper.getMaybeTruncatedHost(offer); final Map<String, String> textAttributes = slaveAndRackHelper.getTextAttributes(offer); final SingularitySlave slave = new SingularitySlave(slaveId, host, rackId, textAttributes, Optional.absent()); CheckResult result = check(slave, slaveManager); if (result == CheckResult.NEW) { if (inactiveSlaveManager.isInactive(slave.getHost())) { LOG.info("Slave {} on inactive host {} attempted to rejoin. Marking as decommissioned.", slave, host); slaveManager.changeState(slave, MachineState.STARTING_DECOMMISSION, Optional.of(String.format("Slave %s on inactive host %s attempted to rejoin cluster.", slaveId, host)), Optional.absent()); } else { LOG.info("Offer revealed a new slave {}", slave); } } final SingularityRack rack = new SingularityRack(rackId); if (check(rack, rackManager) == CheckResult.NEW) { LOG.info("Offer revealed a new rack {}", rack); } return result; }
protected SingularityTask prepTask(SingularityRequest request, SingularityDeploy deploy, long launchTime, int instanceNo, boolean separateHosts, Optional<String> runId) { SingularityPendingTask pendingTask = buildPendingTask(request, deploy, launchTime, instanceNo, runId); SingularityTaskRequest taskRequest = new SingularityTaskRequest(request, deploy, pendingTask); Offer offer; if (separateHosts) { offer = createOffer(125, 1024, 2048, String.format("slave%s", instanceNo), String.format("host%s", instanceNo)); } else { offer = createOffer(125, 1024, 2048); } SingularityTaskId taskId = new SingularityTaskId(request.getId(), deploy.getId(), launchTime, instanceNo, offer.getHostname(), "rack1"); TaskID taskIdProto = TaskID.newBuilder().setValue(taskId.toString()).build(); TaskInfo taskInfo = TaskInfo.newBuilder() .setAgentId(offer.getAgentId()) .setExecutor(ExecutorInfo.newBuilder().setExecutorId(ExecutorID.newBuilder().setValue("executorID"))) .setTaskId(taskIdProto) .setName("name") .build(); SingularityTask task = new SingularityTask(taskRequest, taskId, Collections.singletonList(mesosProtosUtils.offerFromProtos(offer)), mesosProtosUtils.taskFromProtos(taskInfo), Optional.of("rack1")); taskManager.savePendingTask(pendingTask); return task; }
@Test public void testOfferCacheRescindOffers() { configuration.setCacheOffers(true); configuration.setOfferCacheSize(2); List<Offer> offers2 = resourceOffers(); // cached as well sms.rescind(offers2.get(0).getId()); sms.rescind(offers2.get(1).getId()); initRequest(); initFirstDeploy(); requestResource.postRequest(request.toBuilder().setSlavePlacement(Optional.of(SlavePlacement.SEPARATE)).setInstances(Optional.of(2)).build(), singularityUser); schedulerPoller.runActionOnPoll(); Assert.assertEquals(0, taskManager.getActiveTasks().size()); resourceOffers(); int numTasks = taskManager.getActiveTasks().size(); Assert.assertEquals(2, numTasks); startAndDeploySecondRequest(); schedulerPoller.runActionOnPoll(); Assert.assertEquals(numTasks, taskManager.getActiveTasks().size()); resourceOffers(); Assert.assertTrue(taskManager.getActiveTasks().size() > numTasks); }
@Test public void testLiteralHostPortSelection() { String[] rangesNotOverlappingRequestedPorts = {"23:24", "25:25", "31:32", "50:51"}; int numPorts = 1; List<Long> requestedPorts = Arrays.asList(50L, 51L); Resource resource = MesosUtils.getPortsResource(numPorts, buildOffer(rangesNotOverlappingRequestedPorts).getResourcesList(), requestedPorts); Assert.assertTrue(MesosUtils.getAllPorts(Collections.singletonList(resource)).containsAll(requestedPorts)); Assert.assertEquals(numPorts + requestedPorts.size(), MesosUtils.getNumPorts(Collections.singletonList(resource))); String[] rangesOverlappingRequestPorts = {"23:28"}; numPorts = 4; requestedPorts = Arrays.asList(25L, 27L); resource = MesosUtils.getPortsResource(numPorts, buildOffer(rangesOverlappingRequestPorts).getResourcesList(), requestedPorts); Assert.assertTrue(MesosUtils.getAllPorts(Collections.singletonList(resource)).containsAll(requestedPorts)); Assert.assertEquals(numPorts + requestedPorts.size(), MesosUtils.getNumPorts(Collections.singletonList(resource))); }
public Map<String, String> getTextAttributes(Offer offer) { Map<String, String> textAttributes = new HashMap<>(); for (Attribute attribute : offer.getAttributesList()) { if (!attribute.getName().equals(rackIdAttributeKey)) { if (attribute.hasText()) { textAttributes.put(attribute.getName(), attribute.getText().getValue()); } else if (attribute.hasScalar()) { textAttributes.put(attribute.getName(), Double.toString(attribute.getScalar().getValue())); } else if (attribute.hasRanges()) { textAttributes.put(attribute.getName(), attribute.getRanges().getRangeList().toString()); } } } return textAttributes; }
public static double getMemory(Offer offer) { return getMemory(offer.getResourcesList(), Optional.<String>absent()); }
public String getMaybeTruncatedHost(Offer offer) { return getMaybeTruncatedHost(offer.getHostname()); }