private static Seq<String> gaugeName(String name) { return scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(name)).toList(); }
@Override public Object deserialize(JsonNode n, ObjectMapper mapper) { List<Object> o = new ArrayList<Object>(); try { logger.debug("using custom array deserializer"); for (int i = 0; i < n.size(); i++) { o.add(parent.deserialize(n.get(i), mapper)); } } catch (Exception e) { logger.error("exception deserializing Collection {}", e.getMessage()); o = null; } if (o != null) return scala.collection.JavaConversions.asScalaBuffer(o).toList(); return null; } }
private long flushMessages(List<Message> messages, long payloadSize) { if (messages.isEmpty()) { LOG.debug("No messages to flush, not trying to write an empty message set."); return -1L; } final ByteBufferMessageSet messageSet = new ByteBufferMessageSet(JavaConversions.asScalaBuffer(messages).toSeq()); if (LOG.isDebugEnabled()) { LOG.debug("Trying to write ByteBufferMessageSet with size of {} bytes to journal", messageSet.sizeInBytes()); } final LogAppendInfo appendInfo = kafkaLog.append(messageSet, true); long lastWriteOffset = appendInfo.lastOffset(); if (LOG.isDebugEnabled()) { LOG.debug("Wrote {} messages to journal: {} bytes (payload {} bytes), log position {} to {}", messages.size(), messageSet.sizeInBytes(), payloadSize, appendInfo.firstOffset(), lastWriteOffset); } writtenMessages.mark(messages.size()); return lastWriteOffset; }
@SuppressWarnings("unchecked") @Override public Object deserialize(JsonNode n, ObjectMapper mapper) { org.apache.commons.lang3.tuple.Pair<String, Object> deserializeObject = TableDisplayDeSerializer.getDeserializeObject(parent, n, mapper); String subtype = deserializeObject.getLeft(); if (subtype != null && subtype.equals(TableDisplay.DICTIONARY_SUBTYPE)) { return JavaConverters.mapAsScalaMapConverter((Map<String, Object>) deserializeObject.getRight()).asScala().toMap(Predef.<Tuple2<String, Object>>conforms()); } else if (subtype != null && subtype.equals(TableDisplay.LIST_OF_MAPS_SUBTYPE)) { List<Map<String, Object>> rows = (List<Map<String, Object>>) deserializeObject.getRight(); List<Object> oo = new ArrayList<Object>(); for (Map<String, Object> row : rows) { oo.add(JavaConverters.mapAsScalaMapConverter(row).asScala().toMap(Predef.<Tuple2<String, Object>>conforms())); } return scala.collection.JavaConversions.collectionAsScalaIterable(oo); } else if (subtype != null && subtype.equals(TableDisplay.MATRIX_SUBTYPE)) { List<List<?>> matrix = (List<List<?>>) deserializeObject.getRight(); ArrayList<Object> ll = new ArrayList<Object>(); for (List<?> ob : matrix) { ll.add(scala.collection.JavaConversions.asScalaBuffer(ob).toList()); } return scala.collection.JavaConversions.asScalaBuffer(ll).toList(); } return deserializeObject.getRight(); }
public void start() { Duration[] defaultLatchIntervals = {Duration.apply(1, TimeUnit.MINUTES)}; @SuppressWarnings("deprecation") AdminServiceFactory adminServiceFactory = new AdminServiceFactory( this.mPort, 20, List$.MODULE$.<StatsFactory>empty(), Option.<String>empty(), List$.MODULE$.<Regex>empty(), Map$.MODULE$.<String, CustomHttpHandler>empty(), JavaConversions .asScalaBuffer(Arrays.asList(defaultLatchIntervals)).toList() ); RuntimeEnvironment runtimeEnvironment = new RuntimeEnvironment(this); adminServiceFactory.apply(runtimeEnvironment); try { Properties properties = new Properties(); properties.load(this.getClass().getResource("build.properties").openStream()); String buildRevision = properties.getProperty("build_revision", "unknown"); LOG.info("build.properties build_revision: {}", properties.getProperty("build_revision", "unknown")); StatsUtil.setLabel("secor.build_revision", buildRevision); } catch (Throwable t) { LOG.error("Failed to load properties from build.properties", t); } } }
public void pushToStream(String message) { int streamNo = (int) this.nextStream.incrementAndGet() % this.queues.size(); AtomicLong offset = this.offsets.get(streamNo); BlockingQueue<FetchedDataChunk> queue = this.queues.get(streamNo); AtomicLong thisOffset = new AtomicLong(offset.incrementAndGet()); List<Message> seq = Lists.newArrayList(); seq.add(new Message(message.getBytes(Charsets.UTF_8))); ByteBufferMessageSet messageSet = new ByteBufferMessageSet(NoCompressionCodec$.MODULE$, offset, JavaConversions.asScalaBuffer(seq)); FetchedDataChunk chunk = new FetchedDataChunk(messageSet, new PartitionTopicInfo("topic", streamNo, queue, thisOffset, thisOffset, new AtomicInteger(1), "clientId"), thisOffset.get()); queue.add(chunk); }
scala.collection.JavaConversions.asScalaBuffer(Arrays.asList("num_streams")).toList(); cacheStatReceiver.provideGauge(numCachedStreamsGaugeName, new Function0<Object>() { @Override scala.collection.JavaConversions.asScalaBuffer(Arrays.asList("num_hosts")).toList(); cacheStatReceiver.provideGauge(numCachedHostsGaugeName, new Function0<Object>() { @Override
/** * Read number of partitions for the given topic on the specified zookeeper * @param zkUrl zookeeper connection url * @param topic topic name * * @return the number of partitions of the given topic */ public static int getPartitionNumForTopic(String zkUrl, String topic) { ZkUtils zkUtils = ZkUtils.apply(zkUrl, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled()); try { Seq<String> topics = scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(topic)); return zkUtils.getPartitionsForTopics(topics).apply(topic).size(); } catch (NoSuchElementException e) { return 0; } finally { zkUtils.close(); } }
public void provisionTopic(String topic) { if (_topicConsumerMap.containsKey(topic)) { // nothing to do: return } else { // provision topic AdminUtils.createTopic(_kafkaServerSuite.getZkClient(), topic, 1, 1, new Properties()); List<KafkaServer> servers = new ArrayList<>(); servers.add(_kafkaServerSuite.getKafkaServer()); kafka.utils.TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); KafkaConsumerSuite consumerSuite = new KafkaConsumerSuite(_kafkaServerSuite.getZkConnectString(), topic); _topicConsumerMap.put(topic, consumerSuite); } }
public KafkaTestBase(String topic) throws InterruptedException, RuntimeException { startServer(); this.topic = topic; AdminUtils.createTopic(zkClient, topic, 1, 1, new Properties()); List<KafkaServer> servers = new ArrayList<>(); servers.add(kafkaServer); TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); Properties consumeProps = new Properties(); consumeProps.put("zookeeper.connect", zkConnect); consumeProps.put("group.id", "testConsumer"); consumeProps.put("zookeeper.session.timeout.ms", "10000"); consumeProps.put("zookeeper.sync.time.ms", "10000"); consumeProps.put("auto.commit.interval.ms", "10000"); consumeProps.put("consumer.timeout.ms", "10000"); consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps)); Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put(this.topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap); List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this.topic); stream = streams.get(0); iterator = stream.iterator(); }
public void provisionTopic(String topic) { if (_topicConsumerMap.containsKey(topic)) { // nothing to do: return } else { // provision topic AdminUtils.createTopic(ZkUtils.apply(_kafkaServerSuite.getZkClient(), false), topic, 1, 1, new Properties()); List<KafkaServer> servers = new ArrayList<>(); servers.add(_kafkaServerSuite.getKafkaServer()); kafka.utils.TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000); KafkaConsumerSuite consumerSuite = new KafkaConsumerSuite(_kafkaServerSuite.getZkConnectString(), topic); _topicConsumerMap.put(topic, consumerSuite); } }
@Override public TopicMetadataResponse send(TopicMetadataRequest request) { java.util.List<String> topics = request.topics(); TopicMetadata[] topicMetadataArray = new TopicMetadata[topics.size()]; for (int i = 0; i < topicMetadataArray.length; i++) { String topic = topics.get(i); if (!topic.equals(topicName)) { topicMetadataArray[i] = new TopicMetadata(topic, null, Errors.UNKNOWN_TOPIC_OR_PARTITION.code()); } else { PartitionMetadata[] partitionMetadataArray = new PartitionMetadata[partitionCount]; for (int j = 0; j < partitionCount; j++) { java.util.List<BrokerEndPoint> emptyJavaList = Collections.emptyList(); List<BrokerEndPoint> emptyScalaList = JavaConversions.asScalaBuffer(emptyJavaList).toList(); partitionMetadataArray[j] = new PartitionMetadata(j, Some.apply(brokerArray[partitionLeaderIndices[j]]), emptyScalaList, emptyScalaList, Errors.NONE.code()); } Seq<PartitionMetadata> partitionsMetadata = List.fromArray(partitionMetadataArray); topicMetadataArray[i] = new TopicMetadata(topic, partitionsMetadata, Errors.NONE.code()); } } Seq<BrokerEndPoint> brokers = List.fromArray(brokerArray); Seq<TopicMetadata> topicsMetadata = List.fromArray(topicMetadataArray); return new TopicMetadataResponse(new kafka.api.TopicMetadataResponse(brokers, topicsMetadata, -1)); } }
protected static scala.collection.immutable.Map<String,Seq<String>> mapListToScala(Map<String,List<String>> data) { Map<String,Seq<String>> seqs = new HashMap<>(); for (String key: data.keySet()) { seqs.put(key, JavaConversions.asScalaBuffer(data.get(key))); } return asScala(seqs); }
protected Headers buildHeaders() { List<Tuple2<String, String>> list = new ArrayList<>(); for (Map.Entry<String,String[]> entry : headers().entrySet()) { for (String value : entry.getValue()) { list.add(new Tuple2<>(entry.getKey(), value)); } } return new Headers(JavaConversions.asScalaBuffer(list)); }
/** * Set a Form url encoded body to this request. */ public RequestBuilder bodyForm(Map<String,String> data) { Map<String,Seq<String>> seqs = new HashMap<>(); for (Entry<String,String> entry: data.entrySet()) { seqs.put(entry.getKey(), JavaConversions.asScalaBuffer(Arrays.asList(entry.getValue()))); } scala.collection.immutable.Map<String,Seq<String>> map = asScala(seqs); return body(new AnyContentAsFormUrlEncoded(map), "application/x-www-form-urlencoded"); }
/** * Sets cookies in a request. * @param data a key value mapping of cookies * @return the builder instance */ public RequestBuilder flash(Map<String,String> data) { play.api.mvc.Flash flash = new play.api.mvc.Flash(asScala(data)); cookies(JavaConversions.asScalaBuffer(Arrays.asList(play.api.mvc.Flash$.MODULE$.encodeAsCookie(flash)))); return this; }
/** * Sets all parameters for the session. * @param data a key value mapping of the session data * @return the builder instance */ public RequestBuilder session(Map<String,String> data) { play.api.mvc.Session session = new play.api.mvc.Session(asScala(data)); cookies(JavaConversions.asScalaBuffer(Arrays.asList(play.api.mvc.Session$.MODULE$.encodeAsCookie(session)))); return this; }
@Override public boolean persist(List listEntity, EntityMetadata m, SparkClient sparkClient) { Seq s = scala.collection.JavaConversions.asScalaBuffer(listEntity).toList(); ClassTag tag = scala.reflect.ClassTag$.MODULE$.apply(m.getEntityClazz()); JavaRDD personRDD = sparkClient.sparkContext.parallelize(s, 1, tag).toJavaRDD(); DataFrame df = sparkClient.sqlContext.createDataFrame(personRDD, m.getEntityClazz()); String outputFilePath = getOutputFilePath(sparkClient.properties); String ext = (String) sparkClient.properties.get("format"); FileType fileType = FileFormatConstants.extension.get(ext); switch (fileType) { case CSV: return writeDataInCsvFile(df, outputFilePath); case JSON: return writeDataInJsonFile(df, outputFilePath); default: throw new UnsupportedOperationException("Files of type " + ext + " are not yet supported."); } }
@Override public boolean persist(List listEntity, EntityMetadata m, SparkClient sparkClient) { try { Seq s = scala.collection.JavaConversions.asScalaBuffer(listEntity).toList(); ClassTag tag = scala.reflect.ClassTag$.MODULE$.apply(m.getEntityClazz()); JavaRDD personRDD = sparkClient.sparkContext.parallelize(s, 1, tag).toJavaRDD(); DataFrame df = sparkClient.sqlContext.createDataFrame(personRDD, m.getEntityClazz()); sparkClient.sqlContext.sql("use " + m.getSchema()); if (logger.isDebugEnabled()) { logger.info("Below are the registered table with hive context: "); sparkClient.sqlContext.sql("show tables").show(); } df.write().insertInto(m.getTableName()); return true; } catch (Exception e) { throw new KunderaException("Cannot persist object(s)", e); } }
@Override public boolean persist(List listEntity, EntityMetadata m, SparkClient sparkClient) { try { Seq s = scala.collection.JavaConversions.asScalaBuffer(listEntity).toList(); ClassTag tag = scala.reflect.ClassTag$.MODULE$.apply(m.getEntityClazz()); JavaRDD personRDD = sparkClient.sparkContext.parallelize(s, 1, tag).toJavaRDD(); CassandraJavaUtil.javaFunctions(personRDD) .writerBuilder(m.getSchema(), m.getTableName(), CassandraJavaUtil.mapToRow(m.getEntityClazz())) .saveToCassandra(); return true; } catch (Exception e) { throw new KunderaException("Cannot persist object(s)", e); } }