/** * To be called by {@link #shrinkIfUnderflow(DAG, NodeId, int)} */ private Set<NodeId> getChildrenRecursiveAndClearBuckets(final DAG dag) { Set<NodeId> children = new HashSet<>(); dag.forEachChild((id) -> children.add(id)); if (!children.isEmpty()) { return children; } final List<TreeId> bucketIds = dag.bucketList(); for (TreeId bucketId : bucketIds) { DAG bucket = getOrCreateDAG(bucketId); if (bucket.getState() == STATE.INITIALIZED) { mergeRoot(bucket); } Set<NodeId> bucketChildren = getChildrenRecursiveAndClearBuckets(bucket); int pre = children.size(); children.addAll(bucketChildren); int post = children.size(); Preconditions.checkState(pre + bucketChildren.size() == post); bucket.reset(RevTree.EMPTY_TREE_ID); } return children; }
/** * To be called by {@link #shrinkIfUnderflow(DAG, NodeId, int)} */ private Set<NodeId> getChildrenRecursiveAndClearBuckets(final DAG dag) { Set<NodeId> children = new HashSet<>(); dag.forEachChild((id) -> children.add(id)); if (!children.isEmpty()) { return children; } final List<TreeId> bucketIds = dag.bucketList(); for (TreeId bucketId : bucketIds) { DAG bucket = getOrCreateDAG(bucketId); if (bucket.getState() == STATE.INITIALIZED) { mergeRoot(bucket); } Set<NodeId> bucketChildren = getChildrenRecursiveAndClearBuckets(bucket); children.addAll(bucketChildren); bucket.reset(RevTree.EMPTY_TREE_ID); } return children; }
private void shrinkIfUnderflow(final DAG dag) { if (dag.numBuckets() == 0) { return; } final long childCount = dag.getTotalChildCount(); // TODO: in the case of quadtrees would need to check if it's an unpromotables bucket and // use canonical's normalized size limit instead? final int depth = dag.getId().depthLength(); final int normalizedSizeLimit = normalizedSizeLimit(depth); if (childCount > normalizedSizeLimit) { return; } Set<NodeId> childrenRecursive = getChildrenRecursiveAndClearBuckets(dag); checkState(childrenRecursive.size() == childCount, "expected %s, got %s, at: %s", childCount, childrenRecursive.size(), dag); dag.clearBuckets(); childrenRecursive.forEach((id) -> dag.addChild(id)); }
private void shrinkIfUnderflow(final DAG dag) { if (dag.numBuckets() == 0) { return; } final long childCount = dag.getTotalChildCount(); // TODO: in the case of quadtrees would need to check if it's an unpromotables bucket and // use canonical's normalized size limit instead? final int depth = dag.getId().depthLength(); final int normalizedSizeLimit = normalizedSizeLimit(depth); if (childCount > normalizedSizeLimit) { return; } Set<NodeId> childrenRecursive = getChildrenRecursiveAndClearBuckets(dag); int collectedSize = childrenRecursive.size(); if (dag.getId().equals(failingDag)) { System.err.printf("expected: %d, collected: %d\n", childCount, collectedSize); } if (collectedSize != childCount) { throw new IllegalStateException(String.format("expected %s, got %s, at: %s", childCount, childrenRecursive.size(), dag)); } dag.clearBuckets(); childrenRecursive.forEach((id) -> dag.addChild(id)); }