private void rehashKeys() { SimultaneousExecutor executor = new SimultaneousExecutor(); for(final Map.Entry<String, HollowHistoryTypeKeyIndex> entry : typeKeyIndexes.entrySet()) { executor.execute(() -> entry.getValue().hashRecordKeys()); } try { executor.awaitSuccessfulCompletion(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } }
private void updateTypeIndexes(final HollowReadStateEngine latestStateEngine, final boolean isDelta) { SimultaneousExecutor executor = new SimultaneousExecutor(); for(final Map.Entry<String, HollowHistoryTypeKeyIndex> entry : typeKeyIndexes.entrySet()) { executor.execute(() -> { HollowObjectTypeReadState typeState = (HollowObjectTypeReadState) latestStateEngine.getTypeState(entry.getKey()); entry.getValue().update(typeState, isDelta); }); } try { executor.awaitSuccessfulCompletion(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } }
private Map<String, BitSet> discoverChangedOrdinalsBetweenStates() { SimultaneousExecutor executor = new SimultaneousExecutor(); Map<String, BitSet> excludeOrdinalsFromCopy = new HashMap<String, BitSet>(); for(HollowSchema schema : schemas) { BitSet recordsToExclude = findOrdinalsPopulatedWithDifferentRecords(schema.getName(), executor); excludeOrdinalsFromCopy.put(schema.getName(), recordsToExclude); } TransitiveSetTraverser.addReferencingOutsideClosure(from, excludeOrdinalsFromCopy); return excludeOrdinalsFromCopy; }
/** * Parallel execution. Modifies the mutation ConcurrentHashMap in parallel based on a Callback. * <p> * Note: This could be replaced with Java 8 parallelStream and lambadas instead of Callback interface * </p> * @param objList * @param callback */ private void executeInParallel(Collection<Object> objList, final Callback callback) { SimultaneousExecutor executor = new SimultaneousExecutor(threadsPerCpu); for(final Object obj : objList) { executor.execute(new Runnable() { public void run() { callback.call(obj); } }); } try { executor.awaitSuccessfulCompletion(); } catch(Exception e) { throw new RuntimeException(e); } }
private void copyUnchangedDataToIntermediateState() { SimultaneousExecutor executor = new SimultaneousExecutor(); for(final HollowSchema schema : schemas) { executor.execute(new Runnable() {
private void prepareForDiffCalculation() { SimultaneousExecutor executor = new SimultaneousExecutor(1 + typeDiffs.size(), "hollow-diff-prepare"); executor.execute(() -> { for(HollowTypeDiff typeDiff : typeDiffs.values()) { equalityMapping.getEqualOrdinalMap(typeDiff.getTypeName()); } }); for(final HollowTypeDiff typeDiff : typeDiffs.values()) { executor.execute(typeDiff::calculateMatches); } try { executor.awaitSuccessfulCompletion(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } equalityMapping.markPrepared(); }
SimultaneousExecutor executor = new SimultaneousExecutor(1); Exception pipeException = null;
/** * Transition from the "adding records" phase of a cycle to the "writing" phase of a cycle. */ public void prepareForWrite() { if(!preparedForNextCycle) // this call should be a no-op if we are already prepared for write return; addTypeNamesWithDefinedHashCodesToHeader(); try { SimultaneousExecutor executor = new SimultaneousExecutor(); for(final Map.Entry<String, HollowTypeWriteState> typeStateEntry : writeStates.entrySet()) { executor.execute(new Runnable() { @Override public void run() { typeStateEntry.getValue().prepareForWrite(); } }); } executor.awaitSuccessfulCompletion(); } catch(Exception ex) { throw new HollowWriteStateException("Failed to prepare for write", ex); } preparedForNextCycle = false; }
/** * If a state was partially constructed after the last call to prepareForNextCycle(), this call * will remove all of those objects from the state engine and reset to the state it was in at the * last prepareForNextCycle() call. * <p> * This method can be called at any time, and will leave the state engine in the same state it was in immediately * after the last call to {@link #prepareForNextCycle()} */ public void resetToLastPrepareForNextCycle() { SimultaneousExecutor executor = new SimultaneousExecutor(); for(final Map.Entry<String, HollowTypeWriteState> typeStateEntry : writeStates.entrySet()) { executor.execute(new Runnable() { public void run() { typeStateEntry.getValue().resetToLastPrepareForNextCycle(); } }); } try { executor.awaitSuccessfulCompletion(); } catch(Exception ex) { throw new HollowWriteStateException("Unable to reset to the prior version of the write state", ex); } /// recreate a new randomized tag, to avoid any potential conflict with aborted versions nextStateRandomizedTag = mintNewRandomizedStateTag(); preparedForNextCycle = true; }
/** * Transition from the "writing" phase of a cycle to the "adding records" phase of the next cycle. */ public void prepareForNextCycle() { if(preparedForNextCycle) // this call should be a no-op if we are already prepared for the next cycle return; previousStateRandomizedTag = nextStateRandomizedTag; nextStateRandomizedTag = mintNewRandomizedStateTag(); try { SimultaneousExecutor executor = new SimultaneousExecutor(); for(final Map.Entry<String, HollowTypeWriteState> typeStateEntry : writeStates.entrySet()) { executor.execute(new Runnable() { @Override public void run() { typeStateEntry.getValue().prepareForNextCycle(); } }); } executor.awaitSuccessfulCompletion(); } catch(Exception ex) { throw new HollowWriteStateException("Failed to prepare for next cycle", ex); } preparedForNextCycle = true; restoredStates = null; }
private void remapHistoricalStateOrdinals(final DiffEqualityMappingOrdinalRemapper remapper, final HollowHistoricalStateDataAccess[] remappedDataAccesses, final HollowHistoricalStateKeyOrdinalMapping[] remappedKeyOrdinalMappings) { SimultaneousExecutor executor = new SimultaneousExecutor(); final int numThreads = executor.getCorePoolSize(); for(int i=0;i<executor.getCorePoolSize();i++) { final int threadNumber = i; executor.execute(() -> { for(int t=threadNumber;t<historicalStates.size();t+=numThreads) { HollowHistoricalState historicalStateToRemap = historicalStates.get(t); remappedDataAccesses[t] = creator.copyButRemapOrdinals(historicalStateToRemap.getDataAccess(), remapper); remappedKeyOrdinalMappings[t] = historicalStateToRemap.getKeyOrdinalMapping().remap(remapper); } }); } try { executor.awaitSuccessfulCompletion(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } }
public void split() { prepareForNextCycle(); SimultaneousExecutor executor = new SimultaneousExecutor(getNumberOfShards()); for(int i=0;i<getNumberOfShards();i++) { final int shardNumber = i; executor.execute(new Runnable() { public void run() { HollowSplitterShardCopier copier = new HollowSplitterShardCopier(inputStateEngine, outputStateEngines[shardNumber], director, shardNumber); copier.copy(); } }); } try { executor.awaitSuccessfulCompletion(); } catch(Throwable th) { throw new RuntimeException(th); } }
@Test public void test() throws Exception { String jsonArray = "[ { \"f1\\\"\" : \"value1\", \"f2\" : { \"f1.1\" : \"hel}}{{{{lo \\\"w{orld\\\"\" } } , { \"obj2\" : \"f2.1\" } ]"; JsonArrayChunker chunker = new JsonArrayChunker(new StringReader(jsonArray), new SimultaneousExecutor(), 4); chunker.initialize(); String obj1 = IOUtils.toString(chunker.nextChunk()); String obj2 = IOUtils.toString(chunker.nextChunk()); Assert.assertEquals("{ \"f1\\\"\" : \"value1\", \"f2\" : { \"f1.1\" : \"hel}}{{{{lo \\\"w{orld\\\"\" } }", obj1); Assert.assertEquals("{ \"obj2\" : \"f2.1\" }", obj2); }
private BitSet markTypeRecordsToRemove(HollowReadStateEngine priorStateEngine, final String type) { HollowTypeReadState priorReadState = priorStateEngine.getTypeState(type); HollowSchema schema = priorReadState.getSchema(); int populatedOrdinals = priorReadState.getPopulatedOrdinals().length(); if(schema.getSchemaType() == HollowSchema.SchemaType.OBJECT) { final HollowPrimaryKeyIndex idx = new HollowPrimaryKeyIndex(priorStateEngine, ((HollowObjectSchema) schema).getPrimaryKey()); ///TODO: Should we scan instead? Can we create this once and do delta updates? ThreadSafeBitSet typeRecordsToRemove = new ThreadSafeBitSet(ThreadSafeBitSet.DEFAULT_LOG2_SEGMENT_SIZE_IN_BITS, populatedOrdinals); SimultaneousExecutor executor = new SimultaneousExecutor(threadsPerCpu); for(final Map.Entry<RecordPrimaryKey, Object> entry : mutations.entrySet()) { executor.execute(() -> { if(entry.getKey().getType().equals(type)) { int priorOrdinal = idx.getMatchingOrdinal(entry.getKey().getKey()); if(priorOrdinal != -1) { if(entry.getValue() instanceof AddIfAbsent) ((AddIfAbsent)entry.getValue()).wasFound = true; else typeRecordsToRemove.set(priorOrdinal); } } }); } try { executor.awaitSuccessfulCompletion(); } catch(Exception e) { throw new RuntimeException(e); } return typeRecordsToRemove.toBitSet(); } return new BitSet(populatedOrdinals); }
public static HollowChecksum forStateEngineWithCommonSchemas(HollowReadStateEngine stateEngine, HollowReadStateEngine commonSchemasWithState) { final Vector<TypeChecksum> typeChecksums = new Vector<TypeChecksum>(); SimultaneousExecutor executor = new SimultaneousExecutor(); for(final HollowTypeReadState typeState : stateEngine.getTypeStates()) { HollowTypeReadState commonSchemasWithType = commonSchemasWithState.getTypeState(typeState.getSchema().getName()); if(commonSchemasWithType != null) { final HollowSchema commonSchemasWith = commonSchemasWithType.getSchema(); executor.execute(new Runnable() { public void run() { HollowChecksum cksum = typeState.getChecksum(commonSchemasWith); typeChecksums.addElement(new TypeChecksum(typeState.getSchema().getName(), cksum)); } }); } } try { executor.awaitSuccessfulCompletion(); } catch (Exception e) { throw new RuntimeException(e); } Collections.sort(typeChecksums); HollowChecksum totalChecksum = new HollowChecksum(); for(TypeChecksum cksum : typeChecksums) { totalChecksum.applyInt(cksum.getChecksum()); } return totalChecksum; }
SimultaneousExecutor executor = new SimultaneousExecutor();
@Test public void failsWhenAnyCallableThrowsException() throws Exception { SimultaneousExecutor executor = new SimultaneousExecutor(); StatusEnsuringCallable firstTask = new StatusEnsuringCallable(false); StatusEnsuringCallable secondTask = new StatusEnsuringCallable(false); executor.submit(firstTask); executor.submit(secondTask); try { executor.awaitSuccessfulCompletion(); fail("Should fail"); } catch (final Exception e) { } }
@Test public void failsWhenAnyRunnableThrowsException() throws Exception { SimultaneousExecutor executor = new SimultaneousExecutor(); executor.execute(new Job(false)); executor.execute(new Job(false)); executor.execute(new Job(true)); executor.execute(new Job(false)); try { executor.awaitSuccessfulCompletion(); fail("Should have thrown Exception"); } catch(Exception expected) { } }
@Test public void testEvenNumbersMultipleThread() { for (int j = 0; j < 10; j++) { int maxValue = 500000; sparseBitSet = new HollowSparseIntegerSet.SparseBitSet(maxValue); SimultaneousExecutor executor = new SimultaneousExecutor(); int parallelism = executor.getMaximumPoolSize(); int taskSize = maxValue / parallelism; for (int i = 0; i < parallelism; i++) { int from = i * taskSize; int to = (from + taskSize) - 1; if (i == (parallelism - 1)) to = maxValue; executor.submit(new Task(sparseBitSet, from, to)); } executor.awaitUninterruptibly(); HollowSparseIntegerSet.SparseBitSet.compact(sparseBitSet); Assert.assertTrue(sparseBitSet.cardinality() == 250001); } }
@Test public void canBeReused() throws Exception { SimultaneousExecutor executor = new SimultaneousExecutor(); executor.execute(new Job(false)); executor.execute(new Job(false)); executor.execute(new Job(false)); executor.execute(new Job(false)); executor.awaitSuccessfulCompletionOfCurrentTasks(); executor.execute(new Job(false)); executor.execute(new Job(false)); executor.execute(new Job(false)); executor.execute(new Job(false)); executor.awaitSuccessfulCompletion(); }