@Override public Iterable<Notification> getNotifications() { return JavaConversions.asJavaIterable( inner.notifications() ); }
public Iterable<LogSegment> getSegments() { return JavaConversions.asJavaIterable(kafkaLog.logSegments()); }
/** * Returns the first valid offset in the entire journal. * * @return first offset */ public long getLogStartOffset() { final Iterable<LogSegment> logSegments = JavaConversions.asJavaIterable(kafkaLog.logSegments()); final LogSegment segment = Iterables.getFirst(logSegments, null); if (segment == null) { return 0; } return segment.baseOffset(); }
public void toString( PrintWriter writer ) { inner.dumpToString( writer ); // legacy method - don't convert exceptions... for ( Notification notification : JavaConversions.asJavaIterable( inner.notifications() ) ) { writer.println( notification.getDescription() ); } }
@Override public Iterable<U> buildUpdates(JavaPairRDD<K, M> newData) { return JavaConversions.asJavaIterable(scalaManager.buildUpdates(newData.rdd())); }
@Override public Integer call() throws Exception { loggerForCleaner.debug("Beginning log cleanup"); int total = 0; final Timer.Context ctx = new Timer().time(); for (final Log kafkaLog : JavaConversions.asJavaIterable(logManager.allLogs())) { if (kafkaLog.config().compact()) continue; loggerForCleaner.debug("Garbage collecting {}", kafkaLog.name()); total += cleanupExpiredSegments(kafkaLog) + cleanupSegmentsToMaintainSize(kafkaLog) + cleanupSegmentsToRemoveCommitted(kafkaLog); } loggerForCleaner.debug("Log cleanup completed. {} files deleted in {} seconds", total, NANOSECONDS.toSeconds(ctx.stop())); return total; }
private int cleanupSegmentsToRemoveCommitted(Log kafkaLog) { if (kafkaLog.numberOfSegments() <= 1) { loggerForCleaner.debug( "[cleanup-committed] The journal is already minimal at {} segment(s), not trying to remove more segments.", kafkaLog.numberOfSegments()); return 0; } // we need to iterate through all segments to the find the cutoff point for the committed offset. // unfortunately finding the largest offset contained in a segment is expensive (it involves reading the entire file) // so we have to get a global view. final long committedOffset = KafkaJournal.this.committedOffset.get(); final HashSet<LogSegment> logSegments = Sets.newHashSet( JavaConversions.asJavaIterable(kafkaLog.logSegments(committedOffset, Long.MAX_VALUE)) ); loggerForCleaner.debug("[cleanup-committed] Keeping segments {}", logSegments); return kafkaLog.deleteOldSegments(new AbstractFunction1<LogSegment, Object>() { @Override public Object apply(LogSegment segment) { final boolean shouldDelete = !logSegments.contains(segment); if (shouldDelete) { loggerForCleaner.debug( "[cleanup-committed] Should delete segment {} because it is prior to committed offset {}", segment, committedOffset); } return shouldDelete; } }); } }
@Inject public Langs(play.api.i18n.Langs langs) { this.langs = langs; List<Lang> availables = new ArrayList<Lang>(); for (play.api.i18n.Lang lang : JavaConversions.asJavaIterable(langs.availables())) { availables.add(new Lang(lang)); } this.availables = Collections.unmodifiableList(availables); }
public static void refresh() { if (null == CONTEXT) throw new IllegalStateException("The Spark context has not been created."); if (CONTEXT.isStopped()) recreateStopped(); final Set<String> keepNames = new HashSet<>(); for (final RDD<?> rdd : JavaConversions.asJavaIterable(CONTEXT.persistentRdds().values())) { if (null != rdd.name()) { keepNames.add(rdd.name()); NAME_TO_RDD.put(rdd.name(), rdd); } } // remove all stale names in the NAME_TO_RDD map NAME_TO_RDD.keySet().stream().filter(key -> !keepNames.contains(key)).collect(Collectors.toList()).forEach(NAME_TO_RDD::remove); }
@Override public Iterable<Notification> getNotifications() { return JavaConversions.asJavaIterable( inner.notifications() ); }
@Override protected Iterator<Object> getArrayElements(Object array) { if (array instanceof scala.collection.Iterable) { return JavaConversions.asJavaIterable((scala.collection.Iterable) array).iterator(); } return (Iterator<Object>) super.getArrayElements(array); }
public Iterable<LogSegment> getSegments() { return JavaConversions.asJavaIterable(kafkaLog.logSegments()); }
public Iterable<LogSegment> getSegments() { return JavaConversions.asJavaIterable(kafkaLog.logSegments()); }
/** * Returns the first valid offset in the entire journal. * * @return first offset */ public long getLogStartOffset() { final Iterable<LogSegment> logSegments = JavaConversions.asJavaIterable(kafkaLog.logSegments()); final LogSegment segment = Iterables.getFirst(logSegments, null); if (segment == null) { return 0; } return segment.baseOffset(); }
/** * Returns the first valid offset in the entire journal. * * @return first offset */ public long getLogStartOffset() { final Iterable<LogSegment> logSegments = JavaConversions.asJavaIterable(kafkaLog.logSegments()); final LogSegment segment = Iterables.getFirst(logSegments, null); if (segment == null) { return 0; } return segment.baseOffset(); }
/** * There is always only one string in zkHost * @param zkHost * @return */ public static Set<String> getBrokers(Set<String> zkHost){ ZkClient zkclient = new ZkClient(zkHost.iterator().next(), 30000, 30000, ZKStringSerializer$.MODULE$); Set<String> brokerHosts = new HashSet<String>(); for (Broker b : JavaConversions.asJavaIterable(ZkUtils.getAllBrokersInCluster(zkclient))) { brokerHosts.add(b.connectionString()); } zkclient.close(); return brokerHosts; }
@Override public Iterable<U> buildUpdates(JavaPairRDD<K, M> newData) { return JavaConversions.asJavaIterable(scalaManager.buildUpdates(newData.rdd())); }
public static void refresh() { if (null == CONTEXT) throw new IllegalStateException("The Spark context has not been created."); if (CONTEXT.isStopped()) recreateStopped(); final Set<String> keepNames = new HashSet<>(); for (final RDD<?> rdd : JavaConversions.asJavaIterable(CONTEXT.persistentRdds().values())) { if (null != rdd.name()) { keepNames.add(rdd.name()); NAME_TO_RDD.put(rdd.name(), rdd); } } // remove all stale names in the NAME_TO_RDD map NAME_TO_RDD.keySet().stream().filter(key -> !keepNames.contains(key)).collect(Collectors.toList()).forEach(NAME_TO_RDD::remove); }
public void toString( PrintWriter writer ) { inner.dumpToString( writer ); // legacy method - don't convert exceptions... for ( Notification notification : JavaConversions.asJavaIterable( inner.notifications() ) ) { writer.println( notification.getDescription() ); } }
@Override public String persistenceId() { for (String role : JavaConversions.asJavaIterable((Cluster.get(getContext().system()).selfRoles()))) { if (role.startsWith("backend-")) { return role + "-master"; } } return "master"; }