private ReadWriteLock updateCount(final File file, final Function<Integer, Integer> update) { final String key = getMapKey(file); boolean updated = false; Tuple<ReadWriteLock, Integer> updatedTuple = null; while (!updated) { final Tuple<ReadWriteLock, Integer> tuple = lockMap.computeIfAbsent(key, k -> new Tuple<>(new ReentrantReadWriteLock(), 0)); final Integer updatedCount = update.apply(tuple.getValue()); updatedTuple = new Tuple<>(tuple.getKey(), updatedCount); updated = lockMap.replace(key, tuple, updatedTuple); } return updatedTuple.getKey(); }
@SuppressWarnings("unchecked") private Map<String, Referenceable> fromReferenceable(Object _refs, Metrics metrics) { if (_refs == null) { return Collections.emptyMap(); } final Collection<Referenceable> refs = (Collection<Referenceable>) _refs; return refs.stream().map(ref -> { // This ref is created within this reporting cycle, and it may not have GUID assigned yet, if it is a brand new reference. // If cache has the Reference, then use it because instances in the cache are guaranteed to have GUID assigned. // Brand new Referenceables have to have all mandatory attributes. final String typeName = ref.getTypeName(); final Id id = ref.getId(); final String refQualifiedName = (String) ref.get(ATTR_QUALIFIED_NAME); final String typedRefQualifiedName = toTypedQualifiedName(typeName, refQualifiedName); final Referenceable refFromCacheIfAvailable = typedQualifiedNameToRef.computeIfAbsent(typedRefQualifiedName, k -> { if (id.isAssigned()) { // If this referenceable has Guid assigned, then add this one to cache. guidToQualifiedName.put(id._getId(), refQualifiedName); typedQualifiedNameToRef.put(typedRefQualifiedName, ref); } return ref; }); return new Tuple<>(refQualifiedName, refFromCacheIfAvailable); }).filter(tuple -> tuple.getValue() != null) .collect(toMap(Tuple::getKey, Tuple::getValue)); } }
private static Tuple<String, String> parseTableName(String connectedDatabaseName, String tableNameStr) { final String[] tableNameSplit = tableNameStr.split("\\."); if (tableNameSplit.length != 1 && tableNameSplit.length != 2) { logger.warn("Unexpected table name format: {}", tableNameStr); return null; } final String databaseName = tableNameSplit.length == 2 ? tableNameSplit[0] : connectedDatabaseName; final String tableName = tableNameSplit.length == 2 ? tableNameSplit[1] : tableNameSplit[0]; return new Tuple<>(databaseName, tableName); }
@Override public synchronized void purgeTimingInfo(final long cutoff) { logger.debug("Purging any entries from Performance Tracker that is older than {}", new Object[] {new Date(cutoff)}); final Iterator<Map.Entry<Tuple<String, String>, TimingInfo>> itr = directoryToTimingInfo.entrySet().iterator(); int purgedCount = 0; long earliestTimestamp = System.currentTimeMillis(); while (itr.hasNext()) { final Map.Entry<Tuple<String, String>, TimingInfo> entry = itr.next(); final TimingInfo timingInfo = entry.getValue(); final long creationTime = timingInfo.getCreationTimestamp(); if (creationTime < cutoff) { itr.remove(); purgedCount++; directoryCanonicalization.remove(entry.getKey().getKey()); } else { earliestTimestamp = Math.min(earliestTimestamp, creationTime); } } this.earliestTimestamp = earliestTimestamp; logger.debug("Purged {} entries from Performance Tracker; now holding {} entries", new Object[] {purgedCount, directoryToTimingInfo.size()}); }
public static Set<ConfigurableComponent> getRestrictedComponents(final VersionedProcessGroup group, final NiFiServiceFacade serviceFacade) { final Set<ConfigurableComponent> restrictedComponents = new HashSet<>(); final Set<Tuple<String, BundleCoordinate>> componentTypes = new HashSet<>(); populateComponentTypes(group, componentTypes); for (final Tuple<String, BundleCoordinate> tuple : componentTypes) { final ConfigurableComponent component = serviceFacade.getTempComponent(tuple.getKey(), tuple.getValue()); if (component == null) { throw new NiFiCoreException("Could not create an instance of component " + tuple.getKey() + " using bundle coordinates " + tuple.getValue()); } final boolean isRestricted = component.getClass().isAnnotationPresent(Restricted.class); if (isRestricted) { restrictedComponents.add(component); } } return restrictedComponents; }
protected static Tuple<String, String> parseStateValue(String mapValue) { int indexOfColon = mapValue.indexOf(":"); String timestamp = mapValue.substring(0, indexOfColon); String value = mapValue.substring(indexOfColon + 1); return new Tuple<>(timestamp, value); } }
Tuple<String, String> storedLastModifiedTuple = parseStateValue(workingMap.get(LAST_MODIFIED + ":" + url)); if (Long.parseLong(storedLastModifiedTuple.getKey()) < currentTime) { workingMap.put(LAST_MODIFIED + ":" + url, currentTime + ":" + receivedLastModified.getValue()); changed = true; Tuple<String, String> storedLastModifiedTuple = parseStateValue(workingMap.get(ETAG + ":" + url)); if (Long.parseLong(storedLastModifiedTuple.getKey()) < currentTime) { workingMap.put(ETAG + ":" + url, currentTime + ":" + receivedEtag.getValue()); changed = true;
assertEquals("1",GetHTTP.parseStateValue(eTagStateValue).getValue()); controller.clearTransferState();
public void releaseWriteLock(final File file) { final String key = getMapKey(file); boolean updated = false; while (!updated) { final Tuple<ReadWriteLock, Integer> tuple = lockMap.get(key); if (tuple == null) { throw new IllegalMonitorStateException("Lock is not owned"); } // If this is the only reference to the lock, remove it from the map and then unlock. if (tuple.getValue() <= 1) { updated = lockMap.remove(key, tuple); if (updated) { tuple.getKey().writeLock().unlock(); } } else { final Tuple<ReadWriteLock, Integer> updatedTuple = new Tuple<>(tuple.getKey(), tuple.getValue() - 1); updated = lockMap.replace(key, tuple, updatedTuple); if (updated) { tuple.getKey().writeLock().unlock(); } } } }
@Override public <K, V> boolean putIfAbsent(final K key, final V value, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) throws IOException { return withConnection(redisConnection -> { final Tuple<byte[],byte[]> kv = serialize(key, value, keySerializer, valueSerializer); boolean set = redisConnection.setNX(kv.getKey(), kv.getValue()); if (ttl != -1L && set) { redisConnection.expire(kv.getKey(), ttl); } return set; }); }
@Override public synchronized TimingInfo getTimingInfo(final String directory, final String filename) { final String canonicalDirectory = directoryCanonicalization.computeIfAbsent(directory, key -> directory); final Tuple<String, String> key = new Tuple<>(canonicalDirectory, filename); final TimingInfo timingInfo = directoryToTimingInfo.computeIfAbsent(key, k -> new TimingInfo(directory, filename, this, logger, maxDiskOperationMillis)); return timingInfo; }
@Override public void process(final InputStream in) throws IOException { try (final RecordReader reader = readerFactory.createRecordReader(originalAttributes, in, getLogger())) { final RecordSchema writeSchema = writerFactory.getSchema(originalAttributes, reader.getSchema()); Record record; while ((record = reader.nextRecord()) != null) { final Set<Relationship> relationships = route(record, writeSchema, original, context, flowFileContext); numRecords.incrementAndGet(); for (final Relationship relationship : relationships) { final RecordSetWriter recordSetWriter; Tuple<FlowFile, RecordSetWriter> tuple = writers.get(relationship); if (tuple == null) { FlowFile outFlowFile = session.create(original); final OutputStream out = session.write(outFlowFile); recordSetWriter = writerFactory.createWriter(getLogger(), writeSchema, out); recordSetWriter.beginRecordSet(); tuple = new Tuple<>(outFlowFile, recordSetWriter); writers.put(relationship, tuple); } else { recordSetWriter = tuple.getValue(); } recordSetWriter.write(record); } } } catch (final SchemaNotFoundException | MalformedRecordException e) { throw new ProcessException("Could not parse incoming data", e); } } });
final AtlasObjectId queueId = queueTuple.getKey(); path.getOutputs().add(queueId);
try { final Tuple<String,String> nameAndBranch = new Tuple<>(schemaName, branchName); final Tuple<SchemaVersionInfo, Long> timestampedVersionInfo = schemaVersionByNameCache.get(nameAndBranch); } else { final long minTimestamp = System.nanoTime() - versionInfoCacheNanos; fetch = timestampedVersionInfo.getValue() < minTimestamp; return timestampedVersionInfo.getKey(); final Tuple<SchemaVersionInfo, Long> tuple = new Tuple<>(versionInfo, System.nanoTime()); schemaVersionByNameCache.put(nameAndBranch, tuple); return versionInfo;
@Override public Object call() throws IOException { while (!eventQueue.isEmpty() || !finishedAdding.get()) { try { final Tuple<StandardProvenanceEventRecord, Integer> tuple; try { tuple = eventQueue.poll(10, TimeUnit.MILLISECONDS); } catch (final InterruptedException ie) { Thread.currentThread().interrupt(); continue; } if (tuple == null) { continue; } indexingAction.index(tuple.getKey(), indexWriter.getIndexWriter(), tuple.getValue()); } catch (final Throwable t) { logger.error("Failed to index Provenance Event for " + writerFile + " to " + indexingDirectory, t); if (indexingFailureCount.incrementAndGet() >= MAX_INDEXING_FAILURE_COUNT) { return null; } } } return null; } };
private <K, V> Tuple<byte[],byte[]> serialize(final K key, final V value, final Serializer<K> keySerializer, final Serializer<V> valueSerializer) throws IOException { final ByteArrayOutputStream out = new ByteArrayOutputStream(); keySerializer.serialize(key, out); final byte[] k = out.toByteArray(); out.reset(); valueSerializer.serialize(value, out); final byte[] v = out.toByteArray(); return new Tuple<>(k, v); }