public long getEstimatedSize() { return groupByHash.getEstimatedSize(); }
public long getEstimatedSizeInBytes() { return hash.getEstimatedSize(); }
public long getEstimatedSize() { return hash.getEstimatedSize(); }
public long getSizeInMemory() { long sizeInMemory = groupByHash.getEstimatedSize(); for (Aggregator aggregator : aggregators) { sizeInMemory += aggregator.getEstimatedSize(); } return sizeInMemory; }
public long getEstimatedSizeInBytes() { return INSTANCE_SIZE + memorySizeInBytes + groupByHash.getEstimatedSize() + groupedRows.sizeOf() + pageReferences.sizeOf() + emptyPageReferenceSlots.getEstimatedSizeInBytes(); }
public boolean updateMemoryReservation() { // If memory is not available, once we return, this operator will be blocked until memory is available. localMemoryContext.setBytes(hash.getEstimatedSize()); // If memory is not available, inform the caller that we cannot proceed for allocation. return operatorContext.isWaitingForMemory().isDone(); }
/** * Update memory usage. * * @return true if the reservation is within the limit */ // TODO: update in the interface after the new memory tracking framework is landed (#9049) // Essentially we would love to have clean interfaces to support both pushing and pulling memory usage // The following implementation is a hybrid model, where the push model is going to call the pull model causing reentrancy private boolean updateMemoryReservation() { // Operator/driver will be blocked on memory after we call localUserMemoryContext.setBytes(). // If memory is not available, once we return, this operator will be blocked until memory is available. localUserMemoryContext.setBytes(groupByHash.getEstimatedSize()); // If memory is not available, inform the caller that we cannot proceed for allocation. return operatorContext.isWaitingForMemory().isDone(); }
groupByHash.getEstimatedSize() + referencedPagesSizeInBytes + rowHeapsSizeInBytes +
public long getEstimatedSize() { return hash.getEstimatedSize(); }
public long getEstimatedSizeInBytes() { return hash.getEstimatedSize(); }
public long getEstimatedSize() { return groupByHash.getEstimatedSize(); }
@Override public boolean needsInput() { operatorContext.setMemoryReservation(groupByHash.getEstimatedSize()); return !finishing && remainingLimit > 0 && outputPage == null; }
public void addPage(Page page) { hash.addPage(page); if (operatorContext != null) { operatorContext.setMemoryReservation(hash.getEstimatedSize()); } } }
if (groupByHash.isPresent()) { GroupByHash hash = groupByHash.get(); long groupByHashSize = hash.getEstimatedSize(); partitionIds = Optional.of(hash.getGroupIds(page)); operatorContext.reserveMemory(hash.getEstimatedSize() - groupByHashSize);
public boolean isFull() { long memorySize = groupByHash.getEstimatedSize(); for (Aggregator aggregator : aggregators) { memorySize += aggregator.getEstimatedSize(); } memorySize -= operatorContext.getOperatorPreAllocatedMemory().toBytes(); if (memorySize < 0) { memorySize = 0; } if (partial) { return !operatorContext.trySetMemoryReservation(memorySize); } else { operatorContext.setMemoryReservation(memorySize); return false; } }
@Override public void addInput(Page page) { checkState(needsInput()); operatorContext.setMemoryReservation(groupByHash.getEstimatedSize()); pageBuilder.reset(); GroupByIdBlock ids = groupByHash.getGroupIds(page); for (int position = 0; position < ids.getPositionCount(); position++) { if (ids.getGroupId(position) == nextDistinctId) { pageBuilder.declarePosition(); for (int channel = 0; channel < types.size(); channel++) { Type type = types.get(channel); type.appendTo(page.getBlock(channel), position, pageBuilder.getBlockBuilder(channel)); } remainingLimit--; nextDistinctId++; if (remainingLimit == 0) { break; } } } if (!pageBuilder.isEmpty()) { outputPage = pageBuilder.build(); } }