IntHashSet getNodeTypes(int node) { IntHashSet result = types[node]; if (result == null) { result = new IntHashSet(); types[node] = result; } return result; }
public void addEdge(int from, int to) { int max = Math.max(from, to); while (max >= successors.size()) { successors.add(new IntHashSet(1)); predecessors.add(new IntHashSet(1)); } successors.get(from).add(to); predecessors.get(to).add(from); }
private int[] withoutCopies(int[] nodesWithCopies) { IntSet visited = new IntHashSet(); int[] nodes = new int[nodesWithCopies.length]; int sz = 0; for (int node : nodesWithCopies) { node = nodeOriginals.get(node); if (visited.add(node)) { nodes[sz++] = node; } } return Arrays.copyOf(nodes, sz); }
public static RealtimeFeed empty(GtfsStorage staticGtfs) { return new RealtimeFeed(staticGtfs, Collections.emptyMap(), new IntHashSet(), new IntLongHashMap(), new IntLongHashMap(), Collections.emptyList(), Collections.emptyMap(), Collections.emptyMap(), staticGtfs.getOperatingDayPatterns(), staticGtfs.getWritableTimeZones()); }
private void addVariablePair(List<IntSet> target, Variable first, Variable second) { IntSet pairs = target.get(first.getIndex()); if (pairs == null) { pairs = new IntHashSet(); target.set(first.getIndex(), pairs); } pairs.add(second.getIndex()); }
public Graph build() { if (builtGraph == null) { IntSet[] incomingEdges = new IntSet[sz]; for (int i = 0; i < sz; ++i) { incomingEdges[i] = new IntHashSet(); } int[][] outgoingEdgeList = new int[sz][]; for (int i = 0; i < addedEdges.size(); ++i) { IntSet edgeList = addedEdges.get(i); outgoingEdgeList[i] = edgeList != null ? edgeList.toArray() : new int[0]; Arrays.sort(outgoingEdgeList[i]); for (int j : outgoingEdgeList[i]) { incomingEdges[j].add(i); } } for (int i = addedEdges.size(); i < sz; ++i) { outgoingEdgeList[i] = new int[0]; } int[][] incomingEdgeList = new int[sz][]; for (int i = 0; i < sz; ++i) { incomingEdgeList[i] = incomingEdges[i].toArray(); Arrays.sort(incomingEdgeList[i]); } builtGraph = new GraphImpl(incomingEdgeList, outgoingEdgeList); } return builtGraph; }
private IntSet nodesToCopy() { IntSet result = new IntHashSet(); for (int node : nodes.toArray()) { if (node == head || (node != bodyStart && !dom.dominates(bodyStart, node))) { result.add(node); } } return result; }
void schedulePropagation(Transition consumer, DependencyType type) { if (!consumer.destination.filter(type)) { return; } if (consumer.pendingTypes == null && propagationDepth < PROPAGATION_STACK_THRESHOLD && consumer.pointsToDomainOrigin() && consumer.destination.propagateCount < 20) { ++propagationDepth; consumer.consume(type); --propagationDepth; } else { if (consumer.pendingTypes == null) { pendingTransitions.add(consumer); consumer.pendingTypes = new IntHashSet(50); } consumer.pendingTypes.add(type.index); } }
IntHashSet set = new IntHashSet(activeLandmarkIndices.length); set.addAll(activeLandmarkIndices); int existingLandmarkCounter = 0;
void schedulePropagation(Transition consumer, DependencyType[] types) { if (types.length == 0) { return; } if (types.length == 1) { schedulePropagation(consumer, types[0]); return; } if (consumer.pendingTypes == null && propagationDepth < PROPAGATION_STACK_THRESHOLD && consumer.pointsToDomainOrigin() && consumer.destination.propagateCount < 20) { ++propagationDepth; consumer.consume(types); --propagationDepth; } else { if (consumer.pendingTypes == null) { pendingTransitions.add(consumer); consumer.pendingTypes = new IntHashSet(Math.max(50, types.length)); } consumer.pendingTypes.ensureCapacity(types.length + consumer.pendingTypes.size()); for (DependencyType type : types) { consumer.pendingTypes.add(type.index); } } }
private void registerCopies(int[] originalNodes, int[] copies) { for (int i = 0; i < originalNodes.length; ++i) { int original = nodeOriginals.get(originalNodes[i]); int copy = copies[i]; IntSet knownCopies = nodeCopies[original]; if (knownCopies == null) { knownCopies = new IntHashSet(); nodeCopies[original] = knownCopies; } if (knownCopies.add(copy)) { while (nodeOriginals.size() <= copy) { nodeOriginals.add(-1); } nodeOriginals.set(copy, original); } } }
private boolean findCondition() { IntSet tailNodes = new IntHashSet(program.basicBlockCount()); for (int tailCandidate : cfg.incomingEdges(head)) { if (nodes.contains(tailCandidate)) { tailNodes.add(tailCandidate); } } bodyStart = dom.commonDominatorOf(tailNodes.toArray()); int candidate = bodyStart; while (bodyStart != head) { int currentCandidate = candidate; if (Arrays.stream(exits.toArray()).anyMatch(exit -> dom.dominates(currentCandidate, exit))) { break; } bodyStart = candidate; candidate = dom.immediateDominatorOf(candidate); } return candidate != bodyStart; }
private void propagatePhiUsageInformation() { IntDeque worklist = new IntArrayDeque(); for (int receiverIndex : phisByReceiver.keys().toArray()) { if (usedPhis.get(receiverIndex)) { worklist.addLast(receiverIndex); } } IntSet visited = new IntHashSet(); while (!worklist.isEmpty()) { int varIndex = worklist.removeFirst(); if (!visited.add(varIndex)) { continue; } usedPhis.set(varIndex); Phi phi = phisByReceiver.get(varIndex); if (phi != null) { for (Incoming incoming : phi.getIncomings()) { if (!visited.contains(incoming.getValue().getIndex())) { worklist.addLast(incoming.getValue().getIndex()); } } } } }
private int[] findNaturalLoop(int head, int[] terminals) { IntSet loop = new IntHashSet(); loop.add(head); IntegerStack stack = new IntegerStack(1); for (int pred : terminals) { stack.push(pred); } while (!stack.isEmpty()) { int node = stack.pop(); if (!loop.add(node)) { continue; } for (int pred : graph.incomingEdges(node)) { stack.push(pred); } } return loop.toArray(); }
void convertToReducible(Graph cfg, int[] weight, GraphSplittingBackend backend) { this.backend = backend; nodeCopies = new IntHashSet[cfg.size()]; nodeOriginals = new IntegerArray(cfg.size()); for (int i = 0; i < cfg.size(); ++i) { nodeCopies[i] = new IntHashSet(); nodeOriginals.add(i); } int[][] identityNodeMap = new int[cfg.size()][]; for (int i = 0; i < identityNodeMap.length; ++i) { identityNodeMap[i] = new int[] { i }; } this.cfg = cfg; totalNodeCount = cfg.size(); handleLoops(new DJGraph(cfg, weight), identityNodeMap); this.backend = null; }
@Test public void testKeybasedGraphPartitioning() { Object[] options = {option(GraphDatabaseConfiguration.IDS_FLUSH), false, option(VertexIDAssigner.PLACEMENT_STRATEGY), PropertyPlacementStrategy.class.getName(), option(PropertyPlacementStrategy.PARTITION_KEY), "clusterId"}; clopen(options); int[] groupDegrees = {5,5,5,5,5,5,5,5}; int numVertices = setupGroupClusters(groupDegrees,CommitMode.PER_VERTEX); IntSet partitionIds = new IntHashSet(numVertices); //to track the "spread" of partition ids for (int i=0;i<groupDegrees.length;i++) { TitanVertex g = getOnlyVertex(tx.query().has("groupid","group"+i)); int partitionId = -1; for (TitanVertex v : g.query().direction(Direction.IN).labels("member").vertices()) { if (partitionId<0) partitionId = getPartitionID(v); assertEquals(partitionId,getPartitionID(v)); partitionIds.add(partitionId); } } assertTrue(partitionIds.size()>numPartitions/2); //This is a probabilistic test that might fail }
/** * This method makes edges crossing the specified border inaccessible to split a bigger area into smaller subnetworks. * This is important for the world wide use case to limit the maximum distance and also to detect unreasonable routes faster. */ protected IntHashSet findBorderEdgeIds(SpatialRuleLookup ruleLookup) { AllEdgesIterator allEdgesIterator = graph.getAllEdges(); NodeAccess nodeAccess = graph.getNodeAccess(); IntHashSet inaccessible = new IntHashSet(); while (allEdgesIterator.next()) { int adjNode = allEdgesIterator.getAdjNode(); SpatialRule ruleAdj = ruleLookup.lookupRule(nodeAccess.getLatitude(adjNode), nodeAccess.getLongitude(adjNode)); int baseNode = allEdgesIterator.getBaseNode(); SpatialRule ruleBase = ruleLookup.lookupRule(nodeAccess.getLatitude(baseNode), nodeAccess.getLongitude(baseNode)); if (ruleAdj != ruleBase) { inaccessible.add(allEdgesIterator.getEdge()); } } return inaccessible; }
@Test public void testKeyBasedGraphPartitioning() { Object[] options = {option(GraphDatabaseConfiguration.IDS_FLUSH), false, option(VertexIDAssigner.PLACEMENT_STRATEGY), PropertyPlacementStrategy.class.getName(), option(PropertyPlacementStrategy.PARTITION_KEY), "clusterId"}; clopen(options); int[] groupDegrees = {5,5,5,5,5,5,5,5}; int numVertices = setupGroupClusters(groupDegrees,CommitMode.PER_VERTEX); IntSet partitionIds = new IntHashSet(numVertices); //to track the "spread" of partition ids for (int i=0;i<groupDegrees.length;i++) { JanusGraphVertex g = getOnlyVertex(tx.query().has("groupid","group"+i)); int partitionId = -1; for (Object o : g.query().direction(Direction.IN).labels("member").vertices()) { JanusGraphVertex v = (JanusGraphVertex) o; if (partitionId<0) partitionId = getPartitionID(v); assertEquals(partitionId,getPartitionID(v)); partitionIds.add(partitionId); } } assertTrue(partitionIds.size()>numPartitions/2); //This is a probabilistic test that might fail }
private void testPartitionSpread(boolean flush, boolean batchCommit) { Object[] options = {option(GraphDatabaseConfiguration.IDS_FLUSH), flush}; clopen(options); int[] groupDegrees = {10,15,10,17,10,4,7,20,11}; int numVertices = setupGroupClusters(groupDegrees,batchCommit?CommitMode.BATCH:CommitMode.PER_VERTEX); IntSet partitionIds = new IntHashSet(numVertices); //to track the "spread" of partition ids for (int i=0;i<groupDegrees.length;i++) { TitanVertex g = getOnlyVertex(tx.query().has("groupid","group"+i)); assertCount(groupDegrees[i],g.edges(Direction.OUT,"contain")); assertCount(groupDegrees[i],g.edges(Direction.IN,"member")); assertCount(groupDegrees[i],g.query().direction(Direction.OUT).edges()); assertCount(groupDegrees[i],g.query().direction(Direction.IN).edges()); assertCount(groupDegrees[i]*2,g.query().edges()); for (TitanVertex v : g.query().direction(Direction.IN).labels("member").vertices()) { int pid = getPartitionID(v); partitionIds.add(pid); assertEquals(g, getOnlyElement(v.query().direction(Direction.OUT).labels("member").vertices())); VertexList vlist = v.query().direction(Direction.IN).labels("contain").vertexIds(); assertEquals(1,vlist.size()); assertEquals(pid,idManager.getPartitionId(vlist.getID(0))); assertEquals(g,vlist.get(0)); } } if (flush || !batchCommit) { //In these cases we would expect significant spread across partitions assertTrue(partitionIds.size()>numPartitions/2); //This is a probabilistic test that might fail } else { assertEquals(1,partitionIds.size()); //No spread in this case } }
private void testPartitionSpread(boolean flush, boolean batchCommit) { Object[] options = {option(GraphDatabaseConfiguration.IDS_FLUSH), flush}; clopen(options); int[] groupDegrees = {10,15,10,17,10,4,7,20,11}; int numVertices = setupGroupClusters(groupDegrees,batchCommit?CommitMode.BATCH:CommitMode.PER_VERTEX); IntSet partitionIds = new IntHashSet(numVertices); //to track the "spread" of partition ids for (int i=0;i<groupDegrees.length;i++) { JanusGraphVertex g = getOnlyVertex(tx.query().has("groupid","group"+i)); assertCount(groupDegrees[i],g.edges(Direction.OUT,"contain")); assertCount(groupDegrees[i],g.edges(Direction.IN,"member")); assertCount(groupDegrees[i],g.query().direction(Direction.OUT).edges()); assertCount(groupDegrees[i],g.query().direction(Direction.IN).edges()); assertCount(groupDegrees[i]*2,g.query().edges()); for (Object o : g.query().direction(Direction.IN).labels("member").vertices()) { JanusGraphVertex v = (JanusGraphVertex) o; int pid = getPartitionID(v); partitionIds.add(pid); assertEquals(g, getOnlyElement(v.query().direction(Direction.OUT).labels("member").vertices())); VertexList vertexList = v.query().direction(Direction.IN).labels("contain").vertexIds(); assertEquals(1,vertexList.size()); assertEquals(pid,idManager.getPartitionId(vertexList.getID(0))); assertEquals(g,vertexList.get(0)); } } if (flush || !batchCommit) { //In these cases we would expect significant spread across partitions assertTrue(partitionIds.size()>numPartitions/2); //This is a probabilistic test that might fail } else { assertEquals(1,partitionIds.size()); //No spread in this case } }