private boolean stop() { return get().isStop() && compareAndSet(Work.STOP_SIGNALLED, Work.STOPPED); }
if (isSpinning() && !selfAssign()) doWaitSpin(); continue; if (stop()) while (isStopped()) LockSupport.park(); assigned = get().assigned; if (assigned == null) continue; set(Work.WORKING); boolean shutdown; while (true) if (!selfAssign()) startSpinning(); if (get().assigned != null) assigned = get().assigned; set(Work.WORKING); if (assign(Work.STOPPED, true)) break;
boolean assign(Work work, boolean self) { Work state = get(); while (state.canAssign(self)) { if (!compareAndSet(state, work)) { state = get(); continue; } // if we were spinning, exit the state (decrement the count); this is valid even if we are already spinning, // as the assigning thread will have incremented the spinningCount if (state.isSpinning()) stopSpinning(); // if we're being descheduled, place ourselves in the descheduled collection if (work.isStop()) pool.descheduled.put(workerId, this); // if we're currently stopped, and the new state is not a stop signal // (which we can immediately convert to stopped), unpark the worker if (state.isStopped() && (!work.isStop() || !stop())) LockSupport.unpark(thread); return true; } return false; }
void schedule(Work work) { // we try to hand-off our work to the spinning queue before the descheduled queue, even though we expect it to be empty // all we're doing here is hoping to find a worker without work to do, but it doesn't matter too much what we find; // we atomically set the task so even if this were a collection of all workers it would be safe, and if they are both // empty we schedule a new thread Map.Entry<Long, SEPWorker> e; while (null != (e = spinning.pollFirstEntry()) || null != (e = descheduled.pollFirstEntry())) if (e.getValue().assign(work, false)) return; if (!work.isStop()) new SEPWorker(workerId.incrementAndGet(), work, this); }
private boolean selfAssign() { // if we aren't permitted to assign in this state, fail if (!get().canAssign(true)) return false; for (SEPExecutor exec : pool.executors) { if (exec.takeWorkPermit(true)) { Work work = new Work(exec); // we successfully started work on this executor, so we must either assign it to ourselves or ... if (assign(work, true)) return true; // ... if we fail, schedule it to another worker pool.schedule(work); // and return success as we must have already been assigned a task assert get().assigned != null; return true; } } return false; }
private boolean isSpinning() { return get().isSpinning(); }
SEPWorker(Long workerId, Work initialState, SharedExecutorPool pool) { this.pool = pool; this.workerId = workerId; thread = new Thread(this, pool.poolName + "-Worker-" + workerId); thread.setDaemon(true); set(initialState); thread.start(); }
private void doWaitSpin() { // pick a random sleep interval based on the number of threads spinning, so that // we should always have a thread about to wake up, but most threads are sleeping long sleep = 10000L * pool.spinningCount.get(); sleep = Math.min(1000000, sleep); sleep *= Math.random(); sleep = Math.max(10000, sleep); long start = System.nanoTime(); // place ourselves in the spinning collection; if we clash with another thread just exit Long target = start + sleep; if (pool.spinning.putIfAbsent(target, this) != null) return; LockSupport.parkNanos(sleep); // remove ourselves (if haven't been already) - we should be at or near the front, so should be cheap-ish pool.spinning.remove(target, this); // finish timing and grab spinningTime (before we finish timing so it is under rather than overestimated) long end = System.nanoTime(); long spin = end - start; long stopCheck = pool.stopCheck.addAndGet(spin); maybeStop(stopCheck, end); if (prevStopCheck + spin == stopCheck) soleSpinnerSpinTime += spin; else soleSpinnerSpinTime = 0; prevStopCheck = stopCheck; }
private void maybeStop(long stopCheck, long now) { long delta = now - stopCheck; if (delta <= 0) { // if stopCheck has caught up with present, we've been spinning too much, so if we can atomically // set it to the past again, we should stop a worker if (pool.stopCheck.compareAndSet(stopCheck, now - stopCheckInterval)) { // try and stop ourselves; // if we've already been assigned work stop another worker if (!assign(Work.STOP_SIGNALLED, true)) pool.schedule(Work.STOP_SIGNALLED); } } else if (soleSpinnerSpinTime > stopCheckInterval && pool.spinningCount.get() == 1) { // permit self-stopping assign(Work.STOP_SIGNALLED, true); } else { // if stop check has gotten too far behind present, update it so new spins can affect it while (delta > stopCheckInterval * 2 && !pool.stopCheck.compareAndSet(stopCheck, now - stopCheckInterval)) { stopCheck = pool.stopCheck.get(); delta = now - stopCheck; } } }
if (isSpinning() && !selfAssign()) doWaitSpin(); continue; if (stop()) while (isStopped()) LockSupport.park(); assigned = get().assigned; if (assigned == null) continue; set(Work.WORKING); boolean shutdown; while (true) if (!selfAssign()) startSpinning(); if (get().assigned != null) assigned = get().assigned; set(Work.WORKING); if (assign(Work.STOPPED, true)) break;
boolean assign(Work work, boolean self) { Work state = get(); while (state.canAssign(self)) { if (!compareAndSet(state, work)) { state = get(); continue; } // if we were spinning, exit the state (decrement the count); this is valid even if we are already spinning, // as the assigning thread will have incremented the spinningCount if (state.isSpinning()) stopSpinning(); // if we're being descheduled, place ourselves in the descheduled collection if (work.isStop()) pool.descheduled.put(workerId, this); // if we're currently stopped, and the new state is not a stop signal // (which we can immediately convert to stopped), unpark the worker if (state.isStopped() && (!work.isStop() || !stop())) LockSupport.unpark(thread); return true; } return false; }
void schedule(Work work) { // we try to hand-off our work to the spinning queue before the descheduled queue, even though we expect it to be empty // all we're doing here is hoping to find a worker without work to do, but it doesn't matter too much what we find; // we atomically set the task so even if this were a collection of all workers it would be safe, and if they are both // empty we schedule a new thread Map.Entry<Long, SEPWorker> e; while (null != (e = spinning.pollFirstEntry()) || null != (e = descheduled.pollFirstEntry())) if (e.getValue().assign(work, false)) return; if (!work.isStop()) new SEPWorker(workerId.incrementAndGet(), work, this); }
private boolean selfAssign() { // if we aren't permitted to assign in this state, fail if (!get().canAssign(true)) return false; for (SEPExecutor exec : pool.executors) { if (exec.takeWorkPermit(true)) { Work work = new Work(exec); // we successfully started work on this executor, so we must either assign it to ourselves or ... if (assign(work, true)) return true; // ... if we fail, schedule it to another worker pool.schedule(work); // and return success as we must have already been assigned a task assert get().assigned != null; return true; } } return false; }
private boolean isStopped() { return get().isStopped(); }
SEPWorker(Long workerId, Work initialState, SharedExecutorPool pool) { this.pool = pool; this.workerId = workerId; thread = new FastThreadLocalThread(this, pool.poolName + "-Worker-" + workerId); thread.setDaemon(true); set(initialState); thread.start(); }
private void doWaitSpin() { // pick a random sleep interval based on the number of threads spinning, so that // we should always have a thread about to wake up, but most threads are sleeping long sleep = 10000L * pool.spinningCount.get(); sleep = Math.min(1000000, sleep); sleep *= Math.random(); sleep = Math.max(10000, sleep); long start = System.nanoTime(); // place ourselves in the spinning collection; if we clash with another thread just exit Long target = start + sleep; if (pool.spinning.putIfAbsent(target, this) != null) return; LockSupport.parkNanos(sleep); // remove ourselves (if haven't been already) - we should be at or near the front, so should be cheap-ish pool.spinning.remove(target, this); // finish timing and grab spinningTime (before we finish timing so it is under rather than overestimated) long end = System.nanoTime(); long spin = end - start; long stopCheck = pool.stopCheck.addAndGet(spin); maybeStop(stopCheck, end); if (prevStopCheck + spin == stopCheck) soleSpinnerSpinTime += spin; else soleSpinnerSpinTime = 0; prevStopCheck = stopCheck; }
private void maybeStop(long stopCheck, long now) { long delta = now - stopCheck; if (delta <= 0) { // if stopCheck has caught up with present, we've been spinning too much, so if we can atomically // set it to the past again, we should stop a worker if (pool.stopCheck.compareAndSet(stopCheck, now - stopCheckInterval)) { // try and stop ourselves; // if we've already been assigned work stop another worker if (!assign(Work.STOP_SIGNALLED, true)) pool.schedule(Work.STOP_SIGNALLED); } } else if (soleSpinnerSpinTime > stopCheckInterval && pool.spinningCount.get() == 1) { // permit self-stopping assign(Work.STOP_SIGNALLED, true); } else { // if stop check has gotten too far behind present, update it so new spins can affect it while (delta > stopCheckInterval * 2 && !pool.stopCheck.compareAndSet(stopCheck, now - stopCheckInterval)) { stopCheck = pool.stopCheck.get(); delta = now - stopCheck; } } }
if (isSpinning() && !selfAssign()) doWaitSpin(); continue; if (stop()) while (isStopped()) LockSupport.park(); assigned = get().assigned; if (assigned == null) continue; set(Work.WORKING); boolean shutdown; while (true) if (!selfAssign()) startSpinning(); if (get().assigned != null) assigned = get().assigned; set(Work.WORKING); if (assign(Work.STOPPED, true)) break;
boolean assign(Work work, boolean self) { Work state = get(); while (state.canAssign(self)) { if (!compareAndSet(state, work)) { state = get(); continue; } // if we were spinning, exit the state (decrement the count); this is valid even if we are already spinning, // as the assigning thread will have incremented the spinningCount if (state.isSpinning()) stopSpinning(); // if we're being descheduled, place ourselves in the descheduled collection if (work.isStop()) pool.descheduled.put(workerId, this); // if we're currently stopped, and the new state is not a stop signal // (which we can immediately convert to stopped), unpark the worker if (state.isStopped() && (!work.isStop() || !stop())) LockSupport.unpark(thread); return true; } return false; }