private ReadWriteLock updateCount(final File file, final Function<Integer, Integer> update) { final String key = getMapKey(file); boolean updated = false; Tuple<ReadWriteLock, Integer> updatedTuple = null; while (!updated) { final Tuple<ReadWriteLock, Integer> tuple = lockMap.computeIfAbsent(key, k -> new Tuple<>(new ReentrantReadWriteLock(), 0)); final Integer updatedCount = update.apply(tuple.getValue()); updatedTuple = new Tuple<>(tuple.getKey(), updatedCount); updated = lockMap.replace(key, tuple, updatedTuple); } return updatedTuple.getKey(); }
public void releaseWriteLock(final File file) { final String key = getMapKey(file); boolean updated = false; while (!updated) { final Tuple<ReadWriteLock, Integer> tuple = lockMap.get(key); if (tuple == null) { throw new IllegalMonitorStateException("Lock is not owned"); } // If this is the only reference to the lock, remove it from the map and then unlock. if (tuple.getValue() <= 1) { updated = lockMap.remove(key, tuple); if (updated) { tuple.getKey().writeLock().unlock(); } } else { final Tuple<ReadWriteLock, Integer> updatedTuple = new Tuple<>(tuple.getKey(), tuple.getValue() - 1); updated = lockMap.replace(key, tuple, updatedTuple); if (updated) { tuple.getKey().writeLock().unlock(); } } } }
@Override public synchronized void purgeTimingInfo(final long cutoff) { logger.debug("Purging any entries from Performance Tracker that is older than {}", new Object[] {new Date(cutoff)}); final Iterator<Map.Entry<Tuple<String, String>, TimingInfo>> itr = directoryToTimingInfo.entrySet().iterator(); int purgedCount = 0; long earliestTimestamp = System.currentTimeMillis(); while (itr.hasNext()) { final Map.Entry<Tuple<String, String>, TimingInfo> entry = itr.next(); final TimingInfo timingInfo = entry.getValue(); final long creationTime = timingInfo.getCreationTimestamp(); if (creationTime < cutoff) { itr.remove(); purgedCount++; directoryCanonicalization.remove(entry.getKey().getKey()); } else { earliestTimestamp = Math.min(earliestTimestamp, creationTime); } } this.earliestTimestamp = earliestTimestamp; logger.debug("Purged {} entries from Performance Tracker; now holding {} entries", new Object[] {purgedCount, directoryToTimingInfo.size()}); }
public static Set<ConfigurableComponent> getRestrictedComponents(final VersionedProcessGroup group, final NiFiServiceFacade serviceFacade) { final Set<ConfigurableComponent> restrictedComponents = new HashSet<>(); final Set<Tuple<String, BundleCoordinate>> componentTypes = new HashSet<>(); populateComponentTypes(group, componentTypes); for (final Tuple<String, BundleCoordinate> tuple : componentTypes) { final ConfigurableComponent component = serviceFacade.getTempComponent(tuple.getKey(), tuple.getValue()); if (component == null) { throw new NiFiCoreException("Could not create an instance of component " + tuple.getKey() + " using bundle coordinates " + tuple.getValue()); } final boolean isRestricted = component.getClass().isAnnotationPresent(Restricted.class); if (isRestricted) { restrictedComponents.add(component); } } return restrictedComponents; }
public static String toTableNameStr(Tuple<String, String> tableName) { return toTableNameStr(tableName.getKey(), tableName.getValue()); }
@Override public Object call() throws IOException { while (!eventQueue.isEmpty() || !finishedAdding.get()) { try { final Tuple<StandardProvenanceEventRecord, Integer> tuple; try { tuple = eventQueue.poll(10, TimeUnit.MILLISECONDS); } catch (final InterruptedException ie) { Thread.currentThread().interrupt(); continue; } if (tuple == null) { continue; } indexingAction.index(tuple.getKey(), indexWriter.getIndexWriter(), tuple.getValue()); } catch (final Throwable t) { logger.error("Failed to index Provenance Event for " + writerFile + " to " + indexingDirectory, t); if (indexingFailureCount.incrementAndGet() >= MAX_INDEXING_FAILURE_COUNT) { return null; } } } return null; } };
@Override public <K, V> boolean putIfAbsent(final K key, final V value, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) throws IOException { return withConnection(redisConnection -> { final Tuple<byte[],byte[]> kv = serialize(key, value, keySerializer, valueSerializer); boolean set = redisConnection.setNX(kv.getKey(), kv.getValue()); if (ttl != -1L && set) { redisConnection.expire(kv.getKey(), ttl); } return set; }); }
if (tuple == null || startTime > tuple.getKey()) { latestIndexByStorageDir.put(storageDir, new Tuple<>(startTime, indexLoc));
Tuple<String, String> storedLastModifiedTuple = parseStateValue(workingMap.get(LAST_MODIFIED + ":" + url)); if (Long.parseLong(storedLastModifiedTuple.getKey()) < currentTime) { workingMap.put(LAST_MODIFIED + ":" + url, currentTime + ":" + receivedLastModified.getValue()); changed = true; Tuple<String, String> storedLastModifiedTuple = parseStateValue(workingMap.get(ETAG + ":" + url)); if (Long.parseLong(storedLastModifiedTuple.getKey()) < currentTime) { workingMap.put(ETAG + ":" + url, currentTime + ":" + receivedEtag.getValue()); changed = true;
route.protocol = SiteToSiteTransportProtocol.valueOf(protocolAndRoutingName.getKey().toUpperCase()); route.name = protocolAndRoutingName.getValue(); routeDefinition.getValue().forEach(routingConfigNameAndPropertyKey -> { final String routingConfigName = routingConfigNameAndPropertyKey.getKey(); final String propertyKey = routingConfigNameAndPropertyKey.getValue(); final String routingConfigValue = properties.getProperty(propertyKey);
final Exception exception = errOrReader.get().getKey(); if (exception != null) { throw new LookupFailureException(String.format("Failed to lookup with %s", coordinates), exception);
private SchemaVersionInfo getSchemaVersionInfo(final SchemaRegistryClient client, final SchemaVersionKey key) throws org.apache.nifi.schema.access.SchemaNotFoundException { try { // Try to fetch the SchemaVersionInfo from the cache. final Tuple<SchemaVersionInfo, Long> timestampedVersionInfo = schemaVersionByKeyCache.get(key); // Determine if the timestampedVersionInfo is expired boolean fetch = false; if (timestampedVersionInfo == null) { fetch = true; } else { final long minTimestamp = System.nanoTime() - versionInfoCacheNanos; fetch = timestampedVersionInfo.getValue() < minTimestamp; } // If not expired, use what we got from the cache if (!fetch) { return timestampedVersionInfo.getKey(); } // schema version info was expired or not found in cache. Fetch from schema registry final SchemaVersionInfo versionInfo = client.getSchemaVersionInfo(key); if (versionInfo == null) { throw new org.apache.nifi.schema.access.SchemaNotFoundException("Could not find schema with name '" + key.getSchemaName() + "' and version " + key.getVersion()); } // Store new version in cache. final Tuple<SchemaVersionInfo, Long> tuple = new Tuple<>(versionInfo, System.nanoTime()); schemaVersionByKeyCache.put(key, tuple); return versionInfo; } catch (final SchemaNotFoundException e) { throw new org.apache.nifi.schema.access.SchemaNotFoundException(e); } }
private OkHttpClient createOkHttpClient(final NiFiProperties properties) { final String connectionTimeout = properties.getClusterNodeConnectionTimeout(); final long connectionTimeoutMs = FormatUtils.getTimeDuration(connectionTimeout, TimeUnit.MILLISECONDS); final String readTimeout = properties.getClusterNodeReadTimeout(); final long readTimeoutMs = FormatUtils.getTimeDuration(readTimeout, TimeUnit.MILLISECONDS); OkHttpClient.Builder okHttpClientBuilder = new OkHttpClient().newBuilder(); okHttpClientBuilder.connectTimeout(connectionTimeoutMs, TimeUnit.MILLISECONDS); okHttpClientBuilder.readTimeout(readTimeoutMs, TimeUnit.MILLISECONDS); okHttpClientBuilder.followRedirects(true); final int connectionPoolSize = properties.getClusterNodeMaxConcurrentRequests(); okHttpClientBuilder.connectionPool(new ConnectionPool(connectionPoolSize, 5, TimeUnit.MINUTES)); final Tuple<SSLSocketFactory, X509TrustManager> tuple = createSslSocketFactory(properties); if (tuple != null) { okHttpClientBuilder.sslSocketFactory(tuple.getKey(), tuple.getValue()); } return okHttpClientBuilder.build(); }
return timestampedVersionInfo.getKey();
@Override public ValidationResult validate(final String subject, final String input, final ValidationContext validationContext) { final Tuple<String, ValidationResult> lastResult = this.cachedResult; if (lastResult != null && lastResult.getKey().equals(input)) { return lastResult.getValue(); } else { String error = null; final File stylesheet = new File(input); final TransformerFactory tFactory = new net.sf.saxon.TransformerFactoryImpl(); final StreamSource styleSource = new StreamSource(stylesheet); try { tFactory.newTransformer(styleSource); } catch (final Exception e) { error = e.toString(); } this.cachedResult = new Tuple<>(input, new ValidationResult.Builder() .input(input) .subject(subject) .valid(error == null) .explanation(error) .build()); return this.cachedResult.getValue(); } } }
@Override public <K, V> void put(final K key, final V value, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) throws IOException { withConnection(redisConnection -> { final Tuple<byte[],byte[]> kv = serialize(key, value, keySerializer, valueSerializer); redisConnection.set(kv.getKey(), kv.getValue(), Expiration.seconds(ttl), SetOption.upsert()); return null; }); }
final AtlasObjectId queueId = queueTuple.getKey(); path.getOutputs().add(queueId);
protected Referenceable createTableRef(String clusterName, Tuple<String, String> tableName) { final Referenceable ref = new Referenceable(TYPE_TABLE); ref.set(ATTR_NAME, tableName.getValue()); ref.set(ATTR_QUALIFIED_NAME, toQualifiedName(clusterName, toTableNameStr(tableName))); ref.set(ATTR_DB, createDatabaseRef(clusterName, tableName.getKey())); return ref; }
createCompleteFlowPath(nifiFlow, lineagePath, createdFlowPaths); for (Tuple<NiFiFlowPath, DataSetRefs> createdFlowPath : createdFlowPaths) { final NiFiFlowPath flowPath = createdFlowPath.getKey();