private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets( KafkaZkClient zkClient, KafkaConsumer<String, byte[]> consumer) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = consumer.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition topicPartition = new TopicPartition(topicStr, partition.partition()); Option<Object> optionOffset = zkClient.getConsumerOffset(groupId, topicPartition); if (optionOffset.nonEmpty()) { Long offset = (Long) optionOffset.get(); OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset); offsets.put(topicPartition, offsetAndMetadata); } } return offsets; }
private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets( KafkaZkClient zkClient, KafkaConsumer<String, byte[]> consumer, String topicStr) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = consumer.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition topicPartition = new TopicPartition(topicStr, partition.partition()); Option<Object> optionOffset = zkClient.getConsumerOffset(groupId, topicPartition); if (optionOffset.nonEmpty()) { Long offset = (Long) optionOffset.get(); OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset); offsets.put(topicPartition, offsetAndMetadata); } } return offsets; } }
public static ExecutorService createThreadPool(final SparkSession spark, final ParquetStoreProperties storeProperties) { final int numberOfThreads; final Option<String> sparkDriverCores = spark.conf().getOption("spark.driver.cores"); if (sparkDriverCores.nonEmpty()) { numberOfThreads = Integer.parseInt(sparkDriverCores.get()); } else { numberOfThreads = storeProperties.getThreadsAvailable(); } LOGGER.debug("Created thread pool of size {} to aggregate and sort data", numberOfThreads); return Executors.newFixedThreadPool(numberOfThreads); }
/** * Elements to display when outputting, e.g., to DOT file * @return Iterable */ @Override public scala.collection.Iterable<String> displayableElements() { LinkedList<String> elements = new LinkedList<>(); if (geographicalLocation.nonEmpty()) { elements.add("Lat=" + geographicalLocation.get()._1()); elements.add("Long=" + geographicalLocation.get()._2()); } if (isSecret.nonEmpty() && isSecret.get()) { elements.add("IsSecret=True"); } // Also possible to build a Scala collection directly in Java code, but building in Java and converting at // the very end is probably easier for Java developers than having to figure out the Scala collection classes // and deal with the annoying Java-interop syntax, e.g., to rewrite the Scala code "myArray += valToInsert" in // Java would look like "myArray.$plus$eq(valToInsert);", which we'd rather avoid. return linkedListToScalaIterable(elements); }
public final <T> Future<T> transactional(final Supplier<Future<T>> supplier) { if (currentTransaction.apply().nonEmpty()) return supplier.get(); else return currentTransaction.let(transactional(), () -> supplier.get()); }
void setResponseClass(Class<?> responseClass) { if (responseClass == null || responseClass == Void.class) { return; } Option<Model> model = ModelConverters.read(responseClass, ModelConverters.typeMap()); if (model.nonEmpty()) { this.responseClass = model.get().name(); } else { this.responseClass = responseClass.getSimpleName(); } }
@Override public boolean isParamAllowed(Parameter parameter, Operation operation, ApiDescription api, Map<String, List<String>> params, Map<String, String> cookies, Map<String, List<String>> headers) { if (parameter.paramAccess().nonEmpty()) { return !parameter.paramAccess().get().equals("internal"); } return super.isParamAllowed(parameter, operation, api, params, cookies, headers); }
private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets( KafkaZkClient zkClient, KafkaConsumer<String, byte[]> consumer) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = consumer.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition topicPartition = new TopicPartition(topicStr, partition.partition()); Option<Object> optionOffset = zkClient.getConsumerOffset(groupId, topicPartition); if (optionOffset.nonEmpty()) { Long offset = (Long) optionOffset.get(); OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset); offsets.put(topicPartition, offsetAndMetadata); } } return offsets; }
@Override public void preRestart(Throwable reason, Option<Object> message) throws Exception { logger.info("Restarting PageParsingActor and resending message '{}'", message); if (message.nonEmpty()) { getSelf().forward(message.get(), getContext()); } super.preRestart(reason, message); }
public scala.collection.immutable.Set<Acl> getAcls(final Resource resource) { final Option<scala.collection.immutable.Set<Acl>> acls = getAcls().get(resource); if (acls.nonEmpty()) return acls.get(); return new scala.collection.immutable.HashSet<Acl>(); }
private Map<TopicPartition, OffsetAndMetadata> getZookeeperOffsets( KafkaZkClient zkClient, KafkaConsumer<String, byte[]> consumer, String topicStr) { Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(); List<PartitionInfo> partitions = consumer.partitionsFor(topicStr); for (PartitionInfo partition : partitions) { TopicPartition topicPartition = new TopicPartition(topicStr, partition.partition()); Option<Object> optionOffset = zkClient.getConsumerOffset(groupId, topicPartition); if (optionOffset.nonEmpty()) { Long offset = (Long) optionOffset.get(); OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(offset); offsets.put(topicPartition, offsetAndMetadata); } } return offsets; } }
public static Optional<File> compile(File source, File sourceDirectory, File generatedDirectory, String formatterType, Collection<String> additionalImports, List<String> constructorAnnotations, Codec codec, boolean inclusiveDot) { Seq<String> scalaAdditionalImports = JavaConverters$.MODULE$.asScalaBufferConverter(new ArrayList<String>(additionalImports)).asScala(); Seq<String> scalaConstructorAnnotations = JavaConverters$.MODULE$.asScalaBufferConverter(constructorAnnotations).asScala(); Option<File> option = play.twirl.compiler.TwirlCompiler.compile(source, sourceDirectory, generatedDirectory, formatterType, scalaAdditionalImports, scalaConstructorAnnotations, codec, inclusiveDot); return Optional.ofNullable(option.nonEmpty() ? option.get() : null); }
public void setResponseContainer(final Class<?> type) { Option<Model> model = ModelConverters.read(type, ModelConverters.typeMap()); if (model.nonEmpty()) { setResponseContainer(model.get().name()); } else { setResponseContainer(type.getSimpleName()); } }
Option<ApiListing> apiListingOption = reader.read(docRoot, controllerClass, swaggerConfig); ApiListing apiListing = null; if (apiListingOption.nonEmpty()) { apiListing = apiListingOption.get();
@Override protected Seq<KoreanToken> perform(Seq<KoreanToken> tokens) { KoreanToken[] performed = new KoreanToken[tokens.length()]; int i = 0; Iterator<KoreanToken> tokenIterator = tokens.iterator(); while (tokenIterator.hasNext()) { KoreanToken token = tokenIterator.next(); performed[i++] = token.stem().nonEmpty() ? stem(token) : token; } return JavaConverters.asScalaBuffer(Arrays.asList(performed)).toSeq(); }
/** * Transforms the tokenization output to List<KoreanTokenJava> * * @param tokens Korean tokens (output of tokenize(CharSequence text)). * @return List of KoreanTokenJava. */ public static List<KoreanTokenJava> tokensToJavaKoreanTokenList(Seq<KoreanToken> tokens, boolean keepSpace) { Iterator<KoreanToken> tokenized = tokens.iterator(); List<KoreanTokenJava> output = new LinkedList<>(); while (tokenized.hasNext()) { KoreanToken token = tokenized.next(); String stem = ""; if (token.stem().nonEmpty()) { stem += token.stem().get(); } if (keepSpace || token.pos() != KoreanPos.Space()) { output.add(new KoreanTokenJava( token.text(), KoreanPosJava.valueOf(token.pos().toString()), token.offset(), token.length(), token.unknown(), stem )); } } return output; }
@Test public void testDoLoadAsyncWithAkkaSerializedSnapshot() throws IOException { SnapshotSerializer snapshotSerializer = new SnapshotSerializer((ExtendedActorSystem) system); String name = toSnapshotName(PERSISTENCE_ID, 1, 1000); try (FileOutputStream fos = new FileOutputStream(new File(SNAPSHOT_DIR, name))) { fos.write(snapshotSerializer.toBinary(new Snapshot("one"))); } SnapshotMetadata metadata = new SnapshotMetadata(PERSISTENCE_ID, 1, 1000); TestKit probe = new TestKit(system); snapshotStore.tell(new LoadSnapshot(PERSISTENCE_ID, SnapshotSelectionCriteria.latest(), Long.MAX_VALUE), probe.getRef()); LoadSnapshotResult result = probe.expectMsgClass(LoadSnapshotResult.class); Option<SelectedSnapshot> possibleSnapshot = result.snapshot(); assertEquals("SelectedSnapshot present", TRUE, possibleSnapshot.nonEmpty()); assertEquals("SelectedSnapshot metadata", metadata, possibleSnapshot.get().metadata()); assertEquals("SelectedSnapshot snapshot", "one", possibleSnapshot.get().snapshot()); }
@Test public void testDoLoadAsyncWithNoSnapshots() { TestKit probe = new TestKit(system); snapshotStore.tell(new LoadSnapshot(PERSISTENCE_ID, SnapshotSelectionCriteria.latest(), Long.MAX_VALUE), probe.getRef()); LoadSnapshotResult result = probe.expectMsgClass(LoadSnapshotResult.class); Option<SelectedSnapshot> possibleSnapshot = result.snapshot(); assertEquals("SelectedSnapshot present", FALSE, possibleSnapshot.nonEmpty()); }
@Test public void testDoLoadAsyncWithRetry() throws IOException { createSnapshotFile(PERSISTENCE_ID, "one", 0, 1000); createSnapshotFile(PERSISTENCE_ID, null, 1, 2000); SnapshotMetadata metadata = new SnapshotMetadata(PERSISTENCE_ID, 0, 1000); TestKit probe = new TestKit(system); snapshotStore.tell(new LoadSnapshot(PERSISTENCE_ID, SnapshotSelectionCriteria.latest(), Long.MAX_VALUE), probe.getRef()); LoadSnapshotResult result = probe.expectMsgClass(LoadSnapshotResult.class); Option<SelectedSnapshot> possibleSnapshot = result.snapshot(); assertEquals("SelectedSnapshot present", TRUE, possibleSnapshot.nonEmpty()); assertEquals("SelectedSnapshot metadata", metadata, possibleSnapshot.get().metadata()); assertEquals("SelectedSnapshot snapshot", "one", possibleSnapshot.get().snapshot()); }
@Test public void testDoLoadAsync() throws IOException { createSnapshotFile(PERSISTENCE_ID, "one", 0, 1000); createSnapshotFile(PERSISTENCE_ID, "two", 1, 2000); createSnapshotFile(PERSISTENCE_ID, "three", 1, 3000); createSnapshotFile(PREFIX_BASED_SHARD_PERSISTENCE_ID, "foo", 0, 1000); createSnapshotFile(PREFIX_BASED_SHARD_PERSISTENCE_ID, "bar", 1, 2000); createSnapshotFile(PREFIX_BASED_SHARD_PERSISTENCE_ID, "foobar", 1, 3000); createSnapshotFile("member-1-shard-default-oper", "foo", 0, 1000); createSnapshotFile("member-1-shard-toaster-oper", "foo", 0, 1000); new File(SNAPSHOT_DIR, "other").createNewFile(); new File(SNAPSHOT_DIR, "other-1485349217290").createNewFile(); SnapshotMetadata metadata3 = new SnapshotMetadata(PERSISTENCE_ID, 1, 3000); TestKit probe = new TestKit(system); snapshotStore.tell(new LoadSnapshot(PERSISTENCE_ID, SnapshotSelectionCriteria.latest(), Long.MAX_VALUE), probe.getRef()); LoadSnapshotResult result = probe.expectMsgClass(LoadSnapshotResult.class); Option<SelectedSnapshot> possibleSnapshot = result.snapshot(); assertEquals("SelectedSnapshot present", TRUE, possibleSnapshot.nonEmpty()); assertEquals("SelectedSnapshot metadata", metadata3, possibleSnapshot.get().metadata()); assertEquals("SelectedSnapshot snapshot", "three", possibleSnapshot.get().snapshot()); snapshotStore.tell(new LoadSnapshot(PREFIX_BASED_SHARD_PERSISTENCE_ID, SnapshotSelectionCriteria.latest(), Long.MAX_VALUE), probe.getRef()); result = probe.expectMsgClass(LoadSnapshotResult.class); possibleSnapshot = result.snapshot(); SnapshotMetadata prefixBasedShardMetada3 = new SnapshotMetadata(PREFIX_BASED_SHARD_PERSISTENCE_ID, 1, 3000); assertEquals("SelectedSnapshot present", TRUE, possibleSnapshot.nonEmpty()); assertEquals("SelectedSnapshot metadata", prefixBasedShardMetada3, possibleSnapshot.get().metadata()); assertEquals("SelectedSnapshot snapshot", "foobar", possibleSnapshot.get().snapshot()); }