/** * @param cntrs Update counters. */ private void addUpdateCounters(Map<Integer, T2<Long, Long>> cntrs) { if (updateCntrs == null) updateCntrs = new HashMap<>(); for (Map.Entry<Integer, T2<Long, Long>> e : cntrs.entrySet()) { T2<Long, Long> cntr0 = updateCntrs.get(e.getKey()); T2<Long, Long> cntr1 = e.getValue(); if (cntr0 == null || cntr1.get2() > cntr0.get2()) updateCntrs.put(e.getKey(), cntr1); } }
/** {@inheritDoc} */ @Nullable @Override public Map<PartitionHashRecord, List<PartitionEntryHashRecord>> reduce(List<ComputeJobResult> results) throws IgniteException { Map<PartitionHashRecord, List<PartitionEntryHashRecord>> totalRes = new HashMap<>(); for (ComputeJobResult res : results) { T2<PartitionHashRecord, List<PartitionEntryHashRecord>> nodeRes = res.getData(); totalRes.put(nodeRes.get1(), nodeRes.get2()); } return totalRes; }
/** * @param map Regular java map with counters. * @param partsCnt Total cache partitions. * @return Full counters map. */ static CachePartitionFullCountersMap fromCountersMap(Map<Integer, T2<Long, Long>> map, int partsCnt) { CachePartitionFullCountersMap map0 = new CachePartitionFullCountersMap(partsCnt); for (Map.Entry<Integer, T2<Long, Long>> e : map.entrySet()) { T2<Long, Long> cntrs = e.getValue(); map0.initialUpdCntrs[e.getKey()] = cntrs.get1(); map0.updCntrs[e.getKey()] = cntrs.get2(); } return map0; } }
/** * Gets set of methods with given annotation. * * @param dep Deployment. * @param cls Class in which search for methods. * @param annCls Annotation. * @return Set of methods with given annotations. */ GridResourceMethod[] getMethodsWithAnnotation(@Nullable GridDeployment dep, Class<?> cls, Class<? extends Annotation> annCls) { ClassDescriptor desc = descriptor(dep, cls); T2<GridResourceField[], GridResourceMethod[]> t2 = desc.annotatedMembers(annCls); return t2 == null ? GridResourceMethod.EMPTY_ARRAY : t2.get2(); }
/** * @param arg Partition key. */ private RetrieveConflictValuesJob(T2<PartitionHashRecord, List<PartitionEntryHashRecord>> arg) { partHashRecord = arg.get1(); entryHashRecords = arg.get2(); partKey = partHashRecord.partitionKey(); }
/** * @param map Partition ID to partition counters map. * @param partsCnt Total cache partitions. * @return Partial local counters map. */ static CachePartitionPartialCountersMap fromCountersMap(Map<Integer, T2<Long, Long>> map, int partsCnt) { CachePartitionPartialCountersMap map0 = new CachePartitionPartialCountersMap(partsCnt); TreeMap<Integer, T2<Long, Long>> sorted = new TreeMap<>(map); for (Map.Entry<Integer, T2<Long, Long>> e : sorted.entrySet()) map0.add(e.getKey(), e.getValue().get1(), e.getValue().get2()); map0.trim(); return map0; }
/** {@inheritDoc} */ @Override public void releaseHistoryForPreloading() { for (Map.Entry<T2<Integer, Integer>, T2<Long, WALPointer>> e : reservedForPreloading.entrySet()) { try { cctx.wal().release(e.getValue().get2()); } catch (IgniteCheckedException ex) { U.error(log, "Could not release WAL reservation", ex); throw new IgniteException(ex); } } reservedForPreloading.clear(); }
@Override public Boolean call() throws Exception { return ops.get(i0).get2().apply(futRes); } }).get(2, SECONDS));
/** * @param nodeIdx Node index. * @return Tuple with number of primary and backup keys. */ private T2<Integer, Integer> offheapKeysCount(int nodeIdx) { T2<List<Integer>, List<Integer>> keys = offheapKeys(nodeIdx); return new T2<>(keys.get1().size(), keys.get2().size()); }
/** * @param nodeIdx Node index. * @return Tuple with number of primary and backup keys. */ private T2<Integer, Integer> swapKeysCount(int nodeIdx) { T2<List<Integer>, List<Integer>> keys = swapKeys(nodeIdx); return new T2<>(keys.get1().size(), keys.get2().size()); }
/** * @param cls Message class. * @param nodeName Node name. * @return {@code True} if has blocked message. */ private boolean hasMessage(Class<?> cls, String nodeName) { for (T2<ClusterNode, GridIoMessage> msg : blockedMsgs) { if (msg.get2().message().getClass() == cls && nodeName.equals(msg.get1().attribute(ATTR_IGNITE_INSTANCE_NAME))) return true; } return false; }
/** {@inheritDoc} */ @Override public void onDeferredDelete(GridCacheEntryEx entry, GridCacheVersion ver) { assert entry.isNear(); try { T2<KeyCacheObject, GridCacheVersion> evicted = rmvQueue.add(new T2<>(entry.key(), ver)); if (evicted != null) removeVersionedEntry(evicted.get1(), evicted.get2()); } catch (InterruptedException ignore) { if (log.isDebugEnabled()) log.debug("Failed to enqueue deleted entry [key=" + entry.key() + ", ver=" + ver + ']'); Thread.currentThread().interrupt(); } } }
/** {@inheritDoc} */ @Override protected Collection<? extends ComputeJob> split(int gridSize, T2<Factory<ComputeJobAdapter>, Factory<Object>> factoriesJobAndArg) throws IgniteException { Collection<ComputeJob> jobs = new HashSet<>(); for (int i = 0; i < MAX_JOB_COUNT; ++i) { ComputeJobAdapter job = factoriesJobAndArg.get1().create(); job.setArguments(factoriesJobAndArg.get2().create()); jobs.add(job); } return jobs; }
/** * @throws Exception If failed. */ @Test public void testLoopbackEndpointsRegistration() throws Exception { IgniteConfiguration cfg = gridConfiguration(); cfg.setFileSystemConfiguration( igfsConfiguration(IgfsIpcEndpointType.TCP, IgfsIpcEndpointConfiguration.DFLT_PORT, null) ); G.start(cfg); T2<Integer, Integer> res = checkRegisteredIpcEndpoints(); // One regular enpoint + one management endpoint. assertEquals(2, res.get1().intValue()); assertEquals(0, res.get2().intValue()); }
/** * @throws Exception If failed. */ @Test public void testEncryptedCacheDestroy() throws Exception { T2<IgniteEx, IgniteEx> grids = startTestGrids(true); createEncryptedCache(grids.get1(), grids.get2(), cacheName(), null); checkEncryptedCaches(grids.get1(), grids.get2()); String encryptedCacheName = cacheName(); grids.get1().destroyCache(encryptedCacheName); checkCacheDestroyed(grids.get2(), encryptedCacheName, null, true); stopAllGrids(true); grids = startTestGrids(false); checkCacheDestroyed(grids.get1(), encryptedCacheName, null, true); }