public DeviceLocal() { int numDevices = Nd4j.getAffinityManager().getNumberOfDevices(); for (int i = 0; i < numDevices; i++) { locksMap.add(new ReentrantReadWriteLock()); } }
/** * This method removes object stored for current device * */ public void clear() { int deviceId = Nd4j.getAffinityManager().getDeviceForCurrentThread(); try { locksMap.get(deviceId).writeLock().lock(); backingMap.remove(deviceId); } finally { locksMap.get(deviceId).writeLock().unlock(); } } }
private PerformanceTracker() { // we put in initial holders, one per device val nd = Nd4j.getAffinityManager().getNumberOfDevices(); for (int e = 0; e < nd; e++) { bandwidth.put(e, new AveragingTransactionsHolder()); operations.put(e, new AveragingTransactionsHolder()); } }
/** * This method sets object for current device * * @param object */ public void set(T object) { set(Nd4j.getAffinityManager().getDeviceForCurrentThread(), object); }
/** * This method duplicates array, and stores it to all devices * * @param array */ public void broadcast(INDArray array) { if (array == null) return; Nd4j.getExecutioner().commit(); int numDevices = Nd4j.getAffinityManager().getNumberOfDevices(); for (int i = 0; i < numDevices; i++) { // if current thread equal to this device - we just save it, without duplication if (Nd4j.getAffinityManager().getDeviceForCurrentThread() == i) { set(i, array); } else { set(i, Nd4j.getAffinityManager().replicateToDevice(i, array)); } } } }
/** * This method returns object local to current deviceId * * @return */ @Nullable public T get() { return get(Nd4j.getAffinityManager().getDeviceForCurrentThread()); }
public DataSet convertDataSet(int num) { int batchNumCount = 0; List<DataSet> dataSets = new ArrayList(); FileSystem fs = CommonUtils.openHdfsConnect(); try { while (batchNumCount != num && fileIterator.hasNext()) { ++ batchNumCount; String fullPath = fileIterator.next(); Writable labelText = new Text(FilenameUtils.getBaseName((new File(fullPath)).getParent())); INDArray features = null; INDArray label = Nd4j.zeros(1, labels.size()).putScalar(new int[]{0, labels.indexOf(labelText)}, 1); InputStream imageios = fs.open(new Path(fullPath)); features = asMatrix(imageios); imageios.close(); Nd4j.getAffinityManager().tagLocation(features, AffinityManager.Location.HOST); dataSets.add(new DataSet(features, label)); } } catch (Exception e) { throw new RuntimeException(e.getCause()); } finally { CommonUtils.closeHdfsConnect(fs); } if (dataSets.size() == 0) { return new DataSet(); } else { DataSet result = DataSet.merge( dataSets ); return result; } }
if (isDebug.get()) log.info("Workspace [{}] device_{}, current cycle: {}; max cycle: {}", id, Nd4j.getAffinityManager().getDeviceForCurrentThread(), cycleAllocations.get(), maxCycle.get());
/** * Setup the given byte buffer * for serialization (note that this is for uncompressed INDArrays) * 4 bytes int for rank * 4 bytes for data opType * shape buffer * data buffer * * @param arr the array to setup * @param allocated the byte buffer to setup * @param rewind whether to rewind the byte buffer or nt */ public static void doByteBufferPutUnCompressed(INDArray arr, ByteBuffer allocated, boolean rewind) { // ensure we send data to host memory Nd4j.getExecutioner().commit(); Nd4j.getAffinityManager().ensureLocation(arr, AffinityManager.Location.HOST); ByteBuffer buffer = arr.data().pointer().asByteBuffer().order(ByteOrder.nativeOrder()); ByteBuffer shapeBuffer = arr.shapeInfoDataBuffer().pointer().asByteBuffer().order(ByteOrder.nativeOrder()); //2 four byte ints at the beginning allocated.putInt(arr.rank()); //put data opType next so its self describing allocated.putInt(arr.data().dataType().ordinal()); allocated.put(shapeBuffer); allocated.put(buffer); if (rewind) allocated.rewind(); }
this.guid = Nd4j.getWorkspaceManager().getUUID(); this.memoryManager = Nd4j.getMemoryManager(); this.deviceId = Nd4j.getAffinityManager().getDeviceForCurrentThread();
.workers(Nd4j.getAffinityManager().getNumberOfDevices())
.prefetchBuffer(16 * Nd4j.getAffinityManager().getNumberOfDevices()) .reportScoreAfterAveraging(true) .averagingFrequency(10) .workers(Nd4j.getAffinityManager().getNumberOfDevices()) .build();
.workers(Nd4j.getAffinityManager().getNumberOfDevices())
@Override public Thread newThread(@NotNull Runnable r) { Thread t = Executors.defaultThreadFactory().newThread(r); int cThread = workerCounter.getAndIncrement(); t.setName("ParallelWrapper training thread " + cThread); t.setDaemon(true); t.setUncaughtExceptionHandler(handler); Nd4j.getAffinityManager().attachThreadToDevice(t, cThread % Nd4j.getAffinityManager().getNumberOfDevices()); return t; } });
public void init() { int numDevices = Nd4j.getAffinityManager().getNumberOfDevices(); deviceCachedAmount = new ArrayList<>(); for (int i = 0; i < numDevices; i++) { deviceCachedAmount.add(new AtomicLong()); } }
public DeviceTADManager() { int numDevices = Nd4j.getAffinityManager().getNumberOfDevices(); for (int i = 0; i < numDevices; i++) { tadCache.add(i, new ConcurrentHashMap<TadDescriptor, Pair<DataBuffer, DataBuffer>>()); } }
@Override public Thread newThread(@NotNull Runnable r) { Thread t = Executors.defaultThreadFactory().newThread(r); int cThread = workerCounter.getAndIncrement(); t.setName("ParallelWrapper training thread " + cThread); t.setDaemon(true); t.setUncaughtExceptionHandler(handler); Nd4j.getAffinityManager().attachThreadToDevice(t, cThread % Nd4j.getAffinityManager().getNumberOfDevices()); return t; } });
public EventsProvider() { int numDev = Nd4j.getAffinityManager().getNumberOfDevices(); for (int i = 0; i < numDev; i++) { queue.add(new ConcurrentLinkedQueue<cudaEvent_t>()); } }
public MagicQueue build() { if (numberOfBuckets < 1) numberOfBuckets = Nd4j.getAffinityManager().getNumberOfDevices(); MagicQueue queue = new MagicQueue(numberOfBuckets, capacity, type); queue.mode = this.mode; return queue; } }
/** * This method returns object local to current deviceId * * @return */ @Nullable public T get() { return get(Nd4j.getAffinityManager().getDeviceForCurrentThread()); }