/** * Sets port range router will be allowed to try. * <p> * Note: zero-range means only user-specified port will be used. * * @param portRange Port range. * @see #DFLT_PORT_RANGE */ public void setPortRange(int portRange) { A.ensure(portRange >= 0, "portRange >= 0"); this.portRange = portRange; }
/** * Initializes tuple with given object count. * * @param cnt Count of objects to be stored in the tuple. */ public GridTupleV(int cnt) { A.ensure(cnt > 0, "cnt > 0"); vals = new Object[cnt]; }
/** * Retrieves value at given index. * * @param i Index of the value to get. * @param <V> Value type. * @return Value at given index. */ @SuppressWarnings({"unchecked"}) public <V> V get(int i) { A.ensure(i < vals.length, "i < vals.length"); return (V)vals[i]; }
/** * Sets read stripe size. Defines number of file channels to be used concurrently. Default is equal to number of * CPU cores available to this JVM. * * @param readStripesNum Read stripe number. */ public void setReadStripesNumber(int readStripesNum) { A.ensure(readStripesNum == -1 || (readStripesNum & (readStripesNum - 1)) == 0, "readStripesNum must be positive and power of two"); this.readStripesNum = readStripesNum; }
/** {@inheritDoc} */ @Override public GridCacheQuery<T> pageSize(int pageSize) { A.ensure(pageSize > 0, "pageSize > 0"); this.pageSize = pageSize; return this; }
/** * Constructs random eviction policy with maximum size. * * @param max Maximum allowed size of cache before entry will start getting evicted. */ public GridCacheRandomEvictionPolicy(int max) { A.ensure(max > 0, "max > 0"); this.max = max; }
/** * Constructs LRU eviction policy with maximum size. * * @param max Maximum allowed size of cache before entry will start getting evicted. */ public GridCacheLruEvictionPolicy(int max) { A.ensure(max > 0, "max > 0"); this.max = max; }
/** * Constructs FIFO eviction policy with maximum size. Empty entries are allowed. * * @param max Maximum allowed size of cache before entry will start getting evicted. */ public GridCacheFifoEvictionPolicy(int max) { A.ensure(max > 0, "max > 0"); this.max = max; }
/** * Creates identity hash set of given size. * * @param size Start size for the set. */ public GridIdentityHashSet(int size) { super(new IdentityHashMap<E, Object>(size)); A.ensure(size >= 0, "size >= 0"); }
/** * Sets implementation of node load probe. By default {@link GridAdaptiveProcessingTimeLoadProbe} * is used which proportionally distributes load based on the average job execution * time on every node. * * @param probe Implementation of node load probe */ @GridSpiConfiguration(optional = true) public void setLoadProbe(GridAdaptiveLoadProbe probe) { A.ensure(probe != null, "probe != null"); this.probe = probe; }
/** {@inheritDoc} */ @GridSpiConfiguration(optional = true) @Override public void setWaitingJobsNumber(int waitJobsNum) { A.ensure(waitJobsNum >= 0, "waitingJobsNum >= 0"); this.waitJobsNum = waitJobsNum; }
/** * Gets value at given index within internal list. Note that this method will iterate through * the list to get a value at the specified index. * * @param idx Index to get value at (must be non-negative and less than {@link #size()}). * @return Value at give index. */ public V get(int idx) { A.ensure(idx >= 0 && idx < size(), "idx >= 0 && idx < size()"); return vals.get(idx); }
/** {@inheritDoc} */ @GridSpiConfiguration(optional = true) @Override public void setWaitJobsThreshold(int waitJobsThreshold) { A.ensure(waitJobsThreshold >= 0, "waitJobsThreshold >= 0"); this.waitJobsThreshold = waitJobsThreshold; }
/** {@inheritDoc} */ @Override public void putAll(Map<? extends V1, ? extends V2> m) { A.notNull(m, "m"); A.ensure(m.size() <= 1, "m.size() <= 1"); for (Map.Entry<? extends V1, ? extends V2> e : m.entrySet()) put(e.getKey(), e.getValue()); }
/** {@inheritDoc} */ @Override public void timeInterval(long timeInterval) { A.ensure(timeInterval >= 0, "timeInterval >= 0"); if (!guard.enterBusy()) throw new IllegalStateException("Continuous query can't be changed after it was executed."); try { this.timeInterval = timeInterval; } finally { guard.leaveBusy(); } }
/** {@inheritDoc} */ @Override public void autoFlushFrequency(long autoFlushFreq) { A.ensure(autoFlushFreq >= 0, "autoFlushFreq >= 0"); long old = this.autoFlushFreq; if (autoFlushFreq != old) { this.autoFlushFreq = autoFlushFreq; if (autoFlushFreq != 0 && old == 0) flushQ.add(this); else if (autoFlushFreq == 0) flushQ.remove(this); } }
/** {@inheritDoc} */ @Override public void addEvents(Collection<?> evts) throws GridException { A.ensure(!F.isEmpty(evts), "evts cannot be null or empty"); addEventsToStage(firstStage, evts); }
/** {@inheritDoc} */ @Override public R get(long timeout, TimeUnit unit) throws GridClientException { A.ensure(timeout >= 0, "timeout >= 0"); try { if (!done && !tryAcquireSharedNanos(0, unit.toNanos(timeout))) throw new GridClientFutureTimeoutException("Failed to get future result due to waiting timed out."); } catch (InterruptedException e) { throw new GridClientException("Operation was interrupted.", e); } return getResult(); }
/** {@inheritDoc} */ @Override public GridCompute withTimeout(long timeout) { A.ensure(timeout >= 0, "timeout >= 0"); guard(); try { ctx.task().setThreadContext(TC_TIMEOUT, timeout); } finally { unguard(); } return this; }
/** {@inheritDoc} */ @Override public GridNode mapPartitionToNode(int part) { A.ensure(part >= 0 && part < partitions(), "part >= 0 && part < total partitions"); return F.first(cctx.affinity().nodes(part, topologyVersion())); }