/** {@inheritDoc} */ @Override protected void body() throws InterruptedException { addrs = requestAddresses(mcastAddr, sockAddr).get1(); }
/** {@inheritDoc} */ @Override public Collection<String> fields() { Collection<String> res = new ArrayList<>(fields.size()); for (T2<String, Integer> t : fields) res.add(t.get1()); return res; }
/** {@inheritDoc} */ @Override public Collection<String> fields() { Collection<String> res = new ArrayList<>(fields.size()); for (T2<String, Integer> t : fields) res.add(t.get1()); return res; }
/** * Starts grid with given configuration. Note that this method is no-op if grid with the name * provided in given configuration is already started. * * @param cfg Grid configuration. This cannot be {@code null}. * @return Started grid. * @throws IgniteCheckedException If grid could not be started. This exception will be thrown * also if named grid has already been started. */ public static Ignite start(IgniteConfiguration cfg) throws IgniteCheckedException { return start(cfg, null, true).get1(); }
/** * Starts a grid with given configuration. If the grid is already started and failIfStarted set to TRUE * an exception will be thrown. * * @param cfg Grid configuration. This cannot be {@code null}. * @param failIfStarted When flag is {@code true} and grid with specified name has been already started * the exception is thrown. Otherwise the existing instance of the grid is returned. * @return Started grid or existing grid. * @throws IgniteCheckedException If grid could not be started. This exception will be thrown * also if named grid has already been started. */ public static Ignite start(IgniteConfiguration cfg, boolean failIfStarted) throws IgniteCheckedException { return start(cfg, null, failIfStarted).get1(); }
/** {@inheritDoc} */ @Nullable @Override public Map<PartitionHashRecord, List<PartitionEntryHashRecord>> reduce(List<ComputeJobResult> results) throws IgniteException { Map<PartitionHashRecord, List<PartitionEntryHashRecord>> totalRes = new HashMap<>(); for (ComputeJobResult res : results) { T2<PartitionHashRecord, List<PartitionEntryHashRecord>> nodeRes = res.getData(); totalRes.put(nodeRes.get1(), nodeRes.get2()); } return totalRes; }
/** * Cancel thread execution and completes all notification futures. */ @Override public synchronized void cancel() { super.cancel(); while (!queue.isEmpty()) { T2<GridFutureAdapter, Runnable> notification = queue.poll(); if (notification != null) notification.get1().onDone(); } }
/** * @param map Regular java map with counters. * @param partsCnt Total cache partitions. * @return Full counters map. */ static CachePartitionFullCountersMap fromCountersMap(Map<Integer, T2<Long, Long>> map, int partsCnt) { CachePartitionFullCountersMap map0 = new CachePartitionFullCountersMap(partsCnt); for (Map.Entry<Integer, T2<Long, Long>> e : map.entrySet()) { T2<Long, Long> cntrs = e.getValue(); map0.initialUpdCntrs[e.getKey()] = cntrs.get1(); map0.updCntrs[e.getKey()] = cntrs.get2(); } return map0; } }
/** * @param arg Partition key. */ private RetrieveConflictValuesJob(T2<PartitionHashRecord, List<PartitionEntryHashRecord>> arg) { partHashRecord = arg.get1(); entryHashRecords = arg.get2(); partKey = partHashRecord.partitionKey(); }
/** * @param map Partition ID to partition counters map. * @param partsCnt Total cache partitions. * @return Partial local counters map. */ static CachePartitionPartialCountersMap fromCountersMap(Map<Integer, T2<Long, Long>> map, int partsCnt) { CachePartitionPartialCountersMap map0 = new CachePartitionPartialCountersMap(partsCnt); TreeMap<Integer, T2<Long, Long>> sorted = new TreeMap<>(map); for (Map.Entry<Integer, T2<Long, Long>> e : sorted.entrySet()) map0.add(e.getKey(), e.getValue().get1(), e.getValue().get2()); map0.trim(); return map0; }
/** * @param nodeIdx Node index. * @return Tuple with number of primary and backup keys. */ private T2<Integer, Integer> offheapKeysCount(int nodeIdx) { T2<List<Integer>, List<Integer>> keys = offheapKeys(nodeIdx); return new T2<>(keys.get1().size(), keys.get2().size()); }
/** * @param nodeIdx Node index. * @return Tuple with number of primary and backup keys. */ private T2<Integer, Integer> swapKeysCount(int nodeIdx) { T2<List<Integer>, List<Integer>> keys = swapKeys(nodeIdx); return new T2<>(keys.get1().size(), keys.get2().size()); }
/** * @param cls Message class. * @param nodeName Node name. * @return {@code True} if has blocked message. */ private boolean hasMessage(Class<?> cls, String nodeName) { for (T2<ClusterNode, GridIoMessage> msg : blockedMsgs) { if (msg.get2().message().getClass() == cls && nodeName.equals(msg.get1().attribute(ATTR_IGNITE_INSTANCE_NAME))) return true; } return false; }
/** {@inheritDoc} */ @Override protected Collection<? extends ComputeJob> split(int gridSize, T2<Factory<ComputeJobAdapter>, Factory<Object>> factoriesJobAndArg) throws IgniteException { Collection<ComputeJob> jobs = new HashSet<>(); for (int i = 0; i < MAX_JOB_COUNT; ++i) { ComputeJobAdapter job = factoriesJobAndArg.get1().create(); job.setArguments(factoriesJobAndArg.get2().create()); jobs.add(job); } return jobs; }
/** * */ void stopBlock() { synchronized (this) { blockNodeId = null; for (T2<ClusterNode, GridIoMessage> msg : blockedMsgs) { log.info("Send blocked message: " + msg.get2().message()); super.sendMessage(msg.get1(), msg.get2()); } } } }
/** * @throws Exception If failed. */ @Test public void testLoopbackEndpointsRegistration() throws Exception { IgniteConfiguration cfg = gridConfiguration(); cfg.setFileSystemConfiguration( igfsConfiguration(IgfsIpcEndpointType.TCP, IgfsIpcEndpointConfiguration.DFLT_PORT, null) ); G.start(cfg); T2<Integer, Integer> res = checkRegisteredIpcEndpoints(); // One regular enpoint + one management endpoint. assertEquals(2, res.get1().intValue()); assertEquals(0, res.get2().intValue()); }