/** {@inheritDoc} */ @Override public Serializable execute() { assert taskSes != null; try { boolean attr = taskSes.waitForAttribute(TEST_ATTR_KEY, SETS_ATTR_COUNT, 20000); assert attr : "Failed to wait for attribute value."; } catch (InterruptedException e) { throw new IgniteException("Got interrupted while waiting for attribute to be set.", e); } Integer res = taskSes.getAttribute(TEST_ATTR_KEY); assert res != null && res.equals(SETS_ATTR_COUNT) : "Unexpected result [res=" + res + ", expected=" + SETS_ATTR_COUNT + ']'; log.info("Session attribute order was correct for job [res=" + res + ", expected=" + SETS_ATTR_COUNT + ']'); taskSes.setAttribute(TEST_ATTR_KEY, SETS_ATTR_COUNT); return null; } }
/** {@inheritDoc} */ @Override public ClusterNode failover(FailoverContext ctx, List<ClusterNode> top) { U.warn(log, "Returning 'null' node for failed job (failover will not happen) [job=" + ctx.getJobResult().getJob() + ", task=" + ctx.getTaskSession().getTaskName() + ", sessionId=" + ctx.getTaskSession().getId() + ']'); return null; }
/** {@inheritDoc} */ @Override public ComputeJobResultPolicy result(ComputeJobResult res, List<ComputeJobResult> received) { if (res.getException() != null) throw new IgniteException("Job resulted in error: " + res, res.getException()); assert ses.getJobSiblings().size() == jobCnt; if (jobCnt < JOB_COUNT) { mapper.send(new TestJob(++jobCnt)); assert ses.getJobSiblings().size() == jobCnt; } return ComputeJobResultPolicy.WAIT; }
/** {@inheritDoc} */ @Override public String execute() { assert taskSes != null; taskSes.saveCheckpoint("testAllCheckpoint", "CheckpointTestState"); taskSes.loadCheckpoint("testAllCheckpoint"); taskSes.removeCheckpoint("testAllCheckpoint"); return "GridAllCheckpointEventsSuccess-test-all-checkpoint-event-success."; } }
/** {@inheritDoc} */ @Override public ClusterNode getBalancedNode(ComputeTaskSession ses, List<ClusterNode> top, ComputeJob job) { A.notNull(ses, "ses", top, "top"); if (isPerTask) { // Note that every session operates from single thread which // allows us to use concurrent map and avoid synchronization. RoundRobinPerTaskLoadBalancer taskBalancer = perTaskBalancers.get(ses.getId()); if (taskBalancer == null) perTaskBalancers.put(ses.getId(), taskBalancer = new RoundRobinPerTaskLoadBalancer()); return taskBalancer.getBalancedNode(top); } return balancer.getBalancedNode(top); }
assert ses != null; final UUID taskNodeId = ses.getTaskNodeId(); Object topic = TOPIC_JOB_SIBLINGS.topic(ses.getId(), topicIdGen.getAndIncrement()); new GridJobSiblingsRequest(ses.getId(), loc ? topic : null, loc ? null : U.marshal(marsh, topic)),
/** {@inheritDoc} */ @Override public Object reduce(List<ComputeJobResult> results) { try { Thread.sleep(100); } catch (InterruptedException e) { throw new IgniteException("Got interrupted while while sleeping.", e); } Serializable ser = taskSes.getAttribute(TEST_ATTR_KEY); assert ser != null; assert attrVal.equals(ser); return null; } }
/** {@inheritDoc} */ @Override public Serializable execute() { WaitAttributeType m = argument(0); checkSessionAttributes(taskSes, "fut", m); IgniteUuid jobId = jobCtx.getJobId(); for (int i = 0; i < ATTR_NUM; i ++) { String key = createKey(jobId.toString(), m, i); String val = createValue(jobId.toString(), m, i); taskSes.setAttribute(key, val); } // Check that attributes just set are present. checkSessionAttributes(taskSes, jobId.toString(), m); Collection<ComputeJobSibling> siblings = taskSes.getJobSiblings(); if (log.isInfoEnabled()) log.info("Got siblings from job [size=" + siblings.size() + ", siblings=" + siblings + ']'); // Check attributes from siblings. for (ComputeJobSibling sibling : taskSes.getJobSiblings()) { if (!sibling.getJobId().equals(jobId)) checkSessionAttributes(taskSes, sibling.getJobId().toString(), m); } try { taskSes.waitForAttribute("done", true, 0); } catch (InterruptedException e) { throw new IgniteException("Got interrupted while waiting for 'done' attribute.", e); } return null; } }
fut.getTaskSession().mapFuture().get(); info("Task job siblings [size=" + ses.getJobSiblings().size() + ", siblings=" + ses.getJobSiblings() + ']'); String val = createValue("fut", type, i); ses.setAttribute(key, val); for (ComputeJobSibling sibling : ses.getJobSiblings()) { info("Checking session attributes for sibling: " + sibling); ses.setAttribute("done", true);
@Override public String call() { return ses.getTaskName(); } };
/** {@inheritDoc} */ @Override public Serializable execute() { assert taskSes != null; Object arg = argument(0); assert arg != null; Serializable ser = taskSes.getAttribute(arg); assert ser != null; int val = (Integer)ser + 1; // Generate garbage. for (int i = 0; i < 10; i++) taskSes.setAttribute(arg, i); // Set final value. taskSes.setAttribute(arg, val); if (log.isDebugEnabled()) log.debug("Set session attribute [name=" + arg + ", value=" + val + ']'); return val; } }
/** {@inheritDoc} */ @Override public Serializable reduce(List<ComputeJobResult> results) { try { if (taskSes.waitForAttribute(TEST_ATTR_KEY, SETS_ATTR_COUNT, 20000)) { log.info("Successfully waited for attribute [key=" + TEST_ATTR_KEY + ", val=" + SETS_ATTR_COUNT + ']'); } } catch (InterruptedException e) { throw new IgniteException("Got interrupted while waiting for attribute to be set.", e); } return taskSes.getAttribute(TEST_ATTR_KEY); } }
/** {@inheritDoc} */ @Override public Void reduce(List<ComputeJobResult> results) { assert ses.loadCheckpoint(CP_KEY) != null; return null; } }
@Override @SuppressWarnings({"UnconditionalWait"}) public Serializable execute() { assert taskSes != null; if (log.isInfoEnabled()) log.info("Computing job [job=" + this + ", arg=" + argument(0) + ']'); startSignal.countDown(); try { if (startSignal.await(WAIT_TIME, TimeUnit.MILLISECONDS) == false) fail(); GridTaskSessionAttributeTestListener lsnr = new GridTaskSessionAttributeTestListener(log); taskSes.addAttributeListener(lsnr, false); if (log.isInfoEnabled()) log.info("Set attribute 'testName'."); taskSes.setAttribute("testName", "testVal"); synchronized (lsnr) { lsnr.wait(WAIT_TIME); } return lsnr.getAttributes().isEmpty() ? 0 : 1; } catch (InterruptedException e) { throw new IgniteException("Failed to wait for listener due to interruption.", e); } } });
@Override public Object execute() { X.println("Executing FailoverTestTask job on node " + ignite.configuration().getNodeId()); Boolean cpVal = ses.loadCheckpoint(CP_KEY); assert cpVal != null; if (cpVal) { ses.saveCheckpoint(CP_KEY, false); throw new ComputeExecutionRejectedException("Failing over the job."); } return null; } },
assert GLOBAL_VAL.equals(taskSes.loadCheckpoint(GLOBAL_KEY)); assert SES_VAL.equals(taskSes.loadCheckpoint(SES_KEY)); assert GLOBAL_VAL.equals(taskSes.loadCheckpoint(GLOBAL_KEY)); assert SES_VAL.equals(taskSes.loadCheckpoint(SES_KEY)); assert GLOBAL_VAL_OVERWRITTEN.equals(taskSes.loadCheckpoint(SES_KEY)); assert SES_VAL_OVERWRITTEN.equals(taskSes.loadCheckpoint(GLOBAL_KEY)); assert !taskSes.removeCheckpoint(GLOBAL_KEY); assert !taskSes.removeCheckpoint(SES_KEY); assert taskSes.loadCheckpoint(GLOBAL_KEY) == null; assert taskSes.loadCheckpoint(SES_KEY) == null;
for (int i = 0; i < EMIT_SEQUENCE_LENGTH; i++) { try { taskSes.setAttribute(String.valueOf(i), i); sleep(executionDuration); Map<?, ?> attrs = taskSes.getAttributes();
log.info("Executing job loaded by class loader: " + ldr.getClass().getName()); if (argument(0) != null && ignite.configuration().getNodeId().equals(taskSes.getTaskNodeId())) { log.error("Remote job is executed on local node.");
/** {@inheritDoc} */ @Override public Integer execute() { assert ignite.configuration().getNodeId().equals(argument(0)); if (sleep) { try { Thread.sleep(Long.MAX_VALUE); } catch (InterruptedException e) { log.info("Job has been cancelled. Caught exception: " + e); Thread.currentThread().interrupt(); } } return System.identityHashCode(ses.getClassLoader()); } }
for (UUID id : waitCtx.getTaskSession().getTopology()) { if (id.equals(nodeId)) { found = true;