ConsistentHash<Node> hash = new ConsistentHash<Node>(NODE_HASH); hash.addAll(hashSource); for (Node n : hash.list(fullDisplayName)) { final Computer c = n.toComputer(); if (c == null || c.isOffline()) {
/** * Adds a new node with the default number of replica. */ public synchronized void add(T node) { add(node,defaultReplication); }
/** * Compresses a string into an integer with MD5. */ private int md5(String s) { MD5 md5 = new MD5(); md5.update(s.getBytes()); byte[] digest = new byte[16]; md5.digest(digest); // 16 bytes -> 4 bytes for (int i=0; i<4; i++) digest[i] ^= digest[i+4]+digest[i+8]+digest[i+12]; return (b2i(digest[0])<< 24)|(b2i(digest[1])<<16)|(b2i(digest[2])<< 8)|b2i(digest[3]); }
@Override public Mapping map(Task task, MappingWorksheet ws) { // build consistent hash for each work chunk List<ConsistentHash<ExecutorChunk>> hashes = new ArrayList<ConsistentHash<ExecutorChunk>>(ws.works.size()); for (int i=0; i<ws.works.size(); i++) { ConsistentHash<ExecutorChunk> hash = new ConsistentHash<ExecutorChunk>(new Hash<ExecutorChunk>() { public String hash(ExecutorChunk node) { return node.getName(); } }); // Build a Map to pass in rather than repeatedly calling hash.add() because each call does lots of expensive work List<ExecutorChunk> chunks = ws.works(i).applicableExecutorChunks(); Map<ExecutorChunk, Integer> toAdd = Maps.newHashMapWithExpectedSize(chunks.size()); for (ExecutorChunk ec : chunks) { toAdd.put(ec, ec.size()*100); } hash.addAll(toAdd); hashes.add(hash); } // do a greedy assignment Mapping m = ws.new Mapping(); assert m.size()==ws.works.size(); // just so that you the reader of the source code don't get confused with the for loop index if (assignGreedily(m,task,hashes,0)) { assert m.isCompletelyValid(); return m; } else return null; }
private void makeBuildable(BuildableItem p) { if(Hudson.FLYWEIGHT_SUPPORT && p.task instanceof FlyweightTask && !ifBlockedByHudsonShutdown(p.task)) { ConsistentHash<Node> hash = new ConsistentHash<Node>(new Hash<Node>() { public String hash(Node node) { return node.getNodeName(); } }); Hudson h = Hudson.getInstance(); hash.add(h, h.getNumExecutors()*100); for (Node n : h.getNodes()) hash.add(n,n.getNumExecutors()*100); Label lbl = p.task.getAssignedLabel(); for (Node n : hash.list(p.task.getFullDisplayName())) { Computer c = n.toComputer(); if (c==null || c.isOffline()) continue; if (lbl!=null && !lbl.contains(n)) continue; c.startFlyWeightTask(new WorkUnitContext(p).createWorkUnit(p.task)); pendings.add(p); return; } // if the execution get here, it means we couldn't schedule it anywhere. // so do the scheduling like other normal jobs. } buildables.put(p.task,p); }
@Override public Mapping map(Task task, MappingWorksheet ws) { // build consistent hash for each work chunk List<ConsistentHash<ExecutorChunk>> hashes = new ArrayList<ConsistentHash<ExecutorChunk>>(ws.works.size()); for (int i=0; i<ws.works.size(); i++) { ConsistentHash<ExecutorChunk> hash = new ConsistentHash<ExecutorChunk>(new Hash<ExecutorChunk>() { public String hash(ExecutorChunk node) { return node.getName(); } }); for (ExecutorChunk ec : ws.works(i).applicableExecutorChunks()) hash.add(ec,ec.size()*100); hashes.add(hash); } // do a greedy assignment Mapping m = ws.new Mapping(); assert m.size()==ws.works.size(); // just so that you the reader of the source code don't get confused with the for loop index if (assignGreedily(m,task,hashes,0)) { assert m.isCompletelyValid(); return m; } else return null; }
private synchronized void addInternal(T node, int replica) { if (replica==0) { items.remove(node); } else { Point[] points = new Point[replica]; String seed = hash.hash(node); for (int i=0; i<replica; i++) points[i] = new Point(md5(seed+':'+i),node); items.put(node,points); } }
private boolean assignGreedily(Mapping m, Task task, List<ConsistentHash<ExecutorChunk>> hashes, int i) { if (i==hashes.size()) return true; // fully assigned String key = task.getAffinityKey() + (i>0 ? String.valueOf(i) : ""); for (ExecutorChunk ec : hashes.get(i).list(key)) { // let's attempt this assignment m.assign(i,ec); if (m.isPartiallyValid() && assignGreedily(m,task,hashes,i+1)) return true; // successful greedily allocation // otherwise 'ec' wasn't a good fit for us. try next. } // every attempt failed m.assign(i,null); return false; } };
private void makeBuildable(BuildableItem p) { if(Hudson.FLYWEIGHT_SUPPORT && p.task instanceof FlyweightTask && !ifBlockedByHudsonShutdown(p.task)) { ConsistentHash<Node> hash = new ConsistentHash<Node>(new Hash<Node>() { public String hash(Node node) { return node.getNodeName(); } }); Hudson h = Hudson.getInstance(); hash.add(h, h.getNumExecutors()*100); for (Node n : h.getNodes()) hash.add(n,n.getNumExecutors()*100); Label lbl = p.task.getAssignedLabel(); for (Node n : hash.list(p.task.getFullDisplayName())) { Computer c = n.toComputer(); if (c==null || c.isOffline()) continue; if (lbl!=null && !lbl.contains(n)) continue; c.startFlyWeightTask(new WorkUnitContext(p).createWorkUnit(p.task)); pendings.add(p); return; } // if the execution get here, it means we couldn't schedule it anywhere. // so do the scheduling like other normal jobs. } buildables.put(p.task,p); }
@Override public Mapping map(Task task, MappingWorksheet ws) { // build consistent hash for each work chunk List<ConsistentHash<ExecutorChunk>> hashes = new ArrayList<ConsistentHash<ExecutorChunk>>(ws.works.size()); for (int i=0; i<ws.works.size(); i++) { ConsistentHash<ExecutorChunk> hash = new ConsistentHash<ExecutorChunk>(new Hash<ExecutorChunk>() { public String hash(ExecutorChunk node) { return node.getName(); } }); for (ExecutorChunk ec : ws.works(i).applicableExecutorChunks()) hash.add(ec,ec.size()*100); hashes.add(hash); } // do a greedy assignment Mapping m = ws.new Mapping(); assert m.size()==ws.works.size(); // just so that you the reader of the source code don't get confused with the for loop index if (assignGreedily(m,task,hashes,0)) { assert m.isCompletelyValid(); return m; } else return null; }
private synchronized void addInternal(T node, int replica) { if (replica==0) { items.remove(node); } else { Point[] points = new Point[replica]; String seed = hash.hash(node); for (int i=0; i<replica; i++) points[i] = new Point(md5(seed+':'+i),node); items.put(node,points); } }
private boolean assignGreedily(Mapping m, Task task, List<ConsistentHash<ExecutorChunk>> hashes, int i) { if (i==hashes.size()) return true; // fully assigned String key = task.getFullDisplayName() + (i>0 ? String.valueOf(i) : ""); for (ExecutorChunk ec : hashes.get(i).list(key)) { // let's attempt this assignment m.assign(i,ec); if (m.isPartiallyValid() && assignGreedily(m,task,hashes,i+1)) return true; // successful greedily allocation // otherwise 'ec' wasn't a good fit for us. try next. } // every attempt failed m.assign(i,null); return false; } };
@Override public Mapping map(Task task, MappingWorksheet ws) { // build consistent hash for each work chunk List<ConsistentHash<ExecutorChunk>> hashes = new ArrayList<ConsistentHash<ExecutorChunk>>(ws.works.size()); for (int i=0; i<ws.works.size(); i++) { ConsistentHash<ExecutorChunk> hash = new ConsistentHash<ExecutorChunk>(new Hash<ExecutorChunk>() { public String hash(ExecutorChunk node) { return node.getName(); } }); // Build a Map to pass in rather than repeatedly calling hash.add() because each call does lots of expensive work List<ExecutorChunk> chunks = ws.works(i).applicableExecutorChunks(); Map<ExecutorChunk, Integer> toAdd = Maps.newHashMapWithExpectedSize(chunks.size()); for (ExecutorChunk ec : chunks) { toAdd.put(ec, ec.size()*100); } hash.addAll(toAdd); hashes.add(hash); } // do a greedy assignment Mapping m = ws.new Mapping(); assert m.size()==ws.works.size(); // just so that you the reader of the source code don't get confused with the for loop index if (assignGreedily(m,task,hashes,0)) { assert m.isCompletelyValid(); return m; } else return null; }
private void makeBuildable(BuildableItem p) { if(Hudson.FLYWEIGHT_SUPPORT && p.task instanceof FlyweightTask && !ifBlockedByHudsonShutdown(p.task)) { ConsistentHash<Node> hash = new ConsistentHash<Node>(new Hash<Node>() { public String hash(Node node) { return node.getNodeName(); } }); Hudson h = Hudson.getInstance(); hash.add(h, h.getNumExecutors()*100); for (Node n : h.getNodes()) hash.add(n,n.getNumExecutors()*100); Label lbl = p.task.getAssignedLabel(); for (Node n : hash.list(p.task.getFullDisplayName())) { Computer c = n.toComputer(); if (c==null || c.isOffline()) continue; if (lbl!=null && !lbl.contains(n)) continue; c.startFlyWeightTask(new WorkUnitContext(p).createWorkUnit(p.task)); pendings.add(p); return; } // if the execution get here, it means we couldn't schedule it anywhere. // so do the scheduling like other normal jobs. } buildables.put(p.task,p); }