Refine search
public CategoryDataset build() { DefaultCategoryDataset ds = new DefaultCategoryDataset(); TreeSet<Row> rowSet = new TreeSet<Row>(rows); TreeSet<Column> colSet = new TreeSet<Column>(columns); Comparable[] _rows = rowSet.toArray(new Comparable[rowSet.size()]); Comparable[] _cols = colSet.toArray(new Comparable[colSet.size()]); // insert rows and columns in the right order for (Comparable r : _rows) ds.setValue(null, r, _cols[0]); for (Comparable c : _cols) ds.setValue(null, _rows[0], c); for( int i=0; i<values.size(); i++ ) ds.addValue( values.get(i), rows.get(i), columns.get(i) ); return ds; } }
@Override public void writeToParcel(Parcel out, int flags) { out.writeParcelable(mMinTime, flags); out.writeParcelable(mMaxTime, flags); out.writeTypedArray(mSelectableTimes.toArray(new Timepoint[mSelectableTimes.size()]), flags); out.writeTypedArray(mDisabledTimes.toArray(new Timepoint[mDisabledTimes.size()]), flags); }
/** * Find the "best guess" middle value among comparables. If there is an even * number of total values, the lower of the two middle values will be returned. * @param <T> type of values processed by this method * @param items to compare * @return T at middle position * @throws NullPointerException if items is {@code null} * @throws IllegalArgumentException if items is empty or contains {@code null} values * @since 3.0.1 */ @SafeVarargs public static <T extends Comparable<? super T>> T median(final T... items) { Validate.notEmpty(items); Validate.noNullElements(items); final TreeSet<T> sort = new TreeSet<>(); Collections.addAll(sort, items); @SuppressWarnings("unchecked") //we know all items added were T instances final T result = (T) sort.toArray()[(sort.size() - 1) / 2]; return result; }
@Override public Long[] listAllVersions() throws IOException { if (!fileSystem.exists(basePath)) { return new Long[0]; // for the removed SegmentAppendTrieDictBuilder } FileStatus[] versionDirs = fileSystem.listStatus(basePath, new PathFilter() { @Override public boolean accept(Path path) { return path.getName().startsWith(VERSION_PREFIX); } }); TreeSet<Long> versions = new TreeSet<>(); for (int i = 0; i < versionDirs.length; i++) { Path path = versionDirs[i].getPath(); versions.add(Long.parseLong(path.getName().substring(VERSION_PREFIX.length()))); } return versions.toArray(new Long[versions.size()]); }
/** * Find the "best guess" middle value among comparables. If there is an even * number of total values, the lower of the two middle values will be returned. * @param <T> type of values processed by this method * @param comparator to use for comparisons * @param items to compare * @return T at middle position * @throws NullPointerException if items or comparator is {@code null} * @throws IllegalArgumentException if items is empty or contains {@code null} values * @since 3.0.1 */ @SafeVarargs public static <T> T median(final Comparator<T> comparator, final T... items) { Validate.notEmpty(items, "null/empty items"); Validate.noNullElements(items); Validate.notNull(comparator, "null comparator"); final TreeSet<T> sort = new TreeSet<>(comparator); Collections.addAll(sort, items); @SuppressWarnings("unchecked") //we know all items added were T instances final T result = (T) sort.toArray()[(sort.size() - 1) / 2]; return result; }
@Override public String[] childrenNames() throws BackingStoreException { synchronized (lock) { checkState(); TreeSet<String> result = new TreeSet<String>(cachedNode.keySet()); String[] names = childrenNamesSpi(); for (int i = 0; i < names.length; i++) { result.add(names[i]); } return result.toArray(new String[result.size()]); } }
/** * Returns a list of plugins that should be shown in the "available" tab, grouped by category. * A plugin with multiple categories will appear multiple times in the list. */ public PluginEntry[] getCategorizedAvailables() { TreeSet<PluginEntry> entries = new TreeSet<PluginEntry>(); for (Plugin p : getAvailables()) { if (p.categories==null || p.categories.length==0) entries.add(new PluginEntry(p, getCategoryDisplayName(null))); else for (String c : p.categories) entries.add(new PluginEntry(p, getCategoryDisplayName(c))); } return entries.toArray(new PluginEntry[entries.size()]); }
public String[] getRegionServerCoprocessors() { TreeSet<String> coprocessors = new TreeSet<>(); try { coprocessors.addAll(getWAL(null).getCoprocessorHost().getCoprocessors()); } catch (IOException exception) { LOG.warn("Exception attempting to fetch wal coprocessor information for the common wal; " + "skipping."); LOG.debug("Exception details for failure to fetch wal coprocessor information.", exception); } Collection<HRegion> regions = getOnlineRegionsLocalContext(); for (HRegion region: regions) { coprocessors.addAll(region.getCoprocessorHost().getCoprocessors()); try { coprocessors.addAll(getWAL(region.getRegionInfo()).getCoprocessorHost().getCoprocessors()); } catch (IOException exception) { LOG.warn("Exception attempting to fetch wal coprocessor information for region " + region + "; skipping."); LOG.debug("Exception details for failure to fetch wal coprocessor information.", exception); } } coprocessors.addAll(rsHost.getCoprocessors()); return coprocessors.toArray(new String[coprocessors.size()]); }
@Override public HavingQueryTree buildHavingQueryTree() { if (_function == null) { throw new Pql2CompilationException("IN predicate has no function"); } TreeSet<String> values = new TreeSet<>(); for (AstNode astNode : getChildren()) { if (astNode instanceof LiteralAstNode) { LiteralAstNode node = (LiteralAstNode) astNode; values.add(node.getValueAsString()); } } String[] valueArray = values.toArray(new String[values.size()]); FilterOperator filterOperator; if (_isNotInClause) { filterOperator = FilterOperator.NOT_IN; } else { filterOperator = FilterOperator.IN; } return new HavingQueryTree(_function.buildAggregationInfo(), Collections.singletonList(StringUtil.join("\t\t", valueArray)), filterOperator, null); } }
public Builder(TreeSet wordSet) { int wordCount = wordSet.size(); mWords = new String[wordCount]; wordSet.toArray(mWords); /* Let's guess approximate size we should need, assuming * average word length of 6 characters, and 100% overhead * in structure: */ int size = wordCount * 12; if (size < 256) { size = 256; } mData = new char[size]; }
public Builder(TreeSet wordSet) { int wordCount = wordSet.size(); mWords = new String[wordCount]; wordSet.toArray(mWords); /* 03-Jan-2006, TSa: Special case: just one entry; if so, * let's leave char array null, and just have the String * array with one entry. */ if (wordCount < 2) { if (wordCount == 0) { throw new IllegalArgumentException(); // not legal } mData = null; } else { /* Let's guess approximate size we should need, assuming * average word length of 6 characters, overhead matching * compression (ie. about 1-to-1 ratio overall) */ int size = wordCount * 6; if (size < 256) { size = 256; } mData = new char[size]; } }
@SuppressFBWarnings(value = "PATH_TRAVERSAL_IN", justification = "path provided by distributed cache framework, not user input") private synchronized Text[] getCutPoints() throws IOException { if (cutPointArray == null) { String cutFileName = conf.get(CUTFILE_KEY); Path[] cf = org.apache.accumulo.core.clientImpl.mapreduce.lib.DistributedCacheHelper .getLocalCacheFiles(conf); if (cf != null) { for (Path path : cf) { if (path.toUri().getPath() .endsWith(cutFileName.substring(cutFileName.lastIndexOf('/')))) { TreeSet<Text> cutPoints = new TreeSet<>(); try (Scanner in = new Scanner(new BufferedReader( new InputStreamReader(new FileInputStream(path.toString()), UTF_8)))) { while (in.hasNextLine()) cutPoints.add(new Text(Base64.getDecoder().decode(in.nextLine()))); } cutPointArray = cutPoints.toArray(new Text[cutPoints.size()]); break; } } } if (cutPointArray == null) throw new FileNotFoundException(cutFileName + " not found in distributed cache"); } return cutPointArray; }
TreeSet tmp = new TreeSet(SchemaComparator.getInstance()); tmp.addAll(imports); schema1.imports = (Schema[]) tmp.toArray(new Schema[tmp.size()]); schema1.simpleTypes = (SimpleType[]) tmp.toArray(new SimpleType[tmp.size()]); tmp.addAll(attributeGroups); schema1.attributeGroups = (AttributeGroup[]) tmp.toArray(new AttributeGroup[tmp.size()]); schema1.attributes = (Attribute[]) tmp.toArray(new Attribute[tmp.size()]); schema1.complexTypes = (ComplexType[]) tmp.toArray(new ComplexType[tmp.size()]); schema1.elements = (Element[]) tmp.toArray(new Element[tmp.size()]); schema1.groups = (Group[]) tmp.toArray(new Group[tmp.size()]);