Refine search
import java.util.Arrays; import java.util.Collections; import org.apache.commons.lang.ArrayUtils; public class MinMaxValue { public static void main(String[] args) { char[] a = {'3', '5', '1', '4', '2'}; List b = Arrays.asList(ArrayUtils.toObject(a)); System.out.println(Collections.min(b)); System.out.println(Collections.max(b)); } }
private void setupMapper(CubeSegment cubeSeg) throws IOException { // set the segment's offset info to job conf Map<Integer, Long> offsetStart = cubeSeg.getSourcePartitionOffsetStart(); Map<Integer, Long> offsetEnd = cubeSeg.getSourcePartitionOffsetEnd(); Integer minPartition = Collections.min(offsetStart.keySet()); Integer maxPartition = Collections.max(offsetStart.keySet()); job.getConfiguration().set(CONFIG_KAFKA_PARITION_MIN, minPartition.toString()); job.getConfiguration().set(CONFIG_KAFKA_PARITION_MAX, maxPartition.toString()); for(Integer partition: offsetStart.keySet()) { job.getConfiguration().set(CONFIG_KAFKA_PARITION_START + partition, offsetStart.get(partition).toString()); job.getConfiguration().set(CONFIG_KAFKA_PARITION_END + partition, offsetEnd.get(partition).toString()); } job.setMapperClass(KafkaFlatTableMapper.class); job.setInputFormatClass(KafkaInputFormat.class); job.setOutputKeyClass(BytesWritable.class); job.setOutputValueClass(Text.class); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setNumReduceTasks(0); }
public ArrayList<Integer> normalized(WDataSet wds){ ArrayList<Integer> wNormalized = new ArrayList<Integer>(); double max, min, wNDouble; int i, wNormalInt; double wNormal; max = Collections.max(wds.w); min = Collections.min(wds.w); if(max != min) for(i = 0; i < wds.graph.getNVerts(); i++){ wNDouble = wds.w.get(i); wNormal = (wNDouble - min) / (max - min); wNormalInt = (int)(100 * wNormal); wds.w.set(i, wNormal); wNormalized.add(wNormalInt); } else for(i = 0; i < wds.graph.getNVerts(); i++) wNormalized.add(100); return wNormalized; }
private void fillEmptyTimestamps(Map<Long, Map<String, Number>> results) { final long minTimestamp = Collections.min(results.keySet()); final long maxTimestamp = Collections.max(results.keySet()); final MutableDateTime currentTime = new MutableDateTime(minTimestamp, DateTimeZone.UTC); while (currentTime.getMillis() < maxTimestamp) { final Map<String, Number> entry = results.get(currentTime.getMillis()); // advance timestamp by the interval currentTime.add(interval.getPeriod()); if (entry == null) { // synthesize a 0 value for this timestamp results.put(currentTime.getMillis(), EMPTY_RESULT); } } }
private ResultScanner scan(Table ht, byte[] cf, Integer[] rowIndexes, Integer[] columnIndexes, Long[] versions, int maxVersions) throws IOException { Arrays.asList(rowIndexes); byte startRow[] = Bytes.toBytes("row:" + Collections.min( Arrays.asList(rowIndexes))); byte endRow[] = Bytes.toBytes("row:" + Collections.max( Arrays.asList(rowIndexes))+1); Scan scan = new Scan(startRow, endRow); for (Integer colIdx: columnIndexes) { byte column[] = Bytes.toBytes("column:" + colIdx); scan.addColumn(cf, column); } scan.setMaxVersions(maxVersions); scan.setTimeRange(Collections.min(Arrays.asList(versions)), Collections.max(Arrays.asList(versions))+1); ResultScanner scanner = ht.getScanner(scan); return scanner; }
private PipelineInstanceModels loadHistory(String pipelineName, List<Long> ids) { if (ids.isEmpty()) { return PipelineInstanceModels.createPipelineInstanceModels(); } Map<String, Object> args = arguments("pipelineName", pipelineName) .and("from", Collections.min(ids)) .and("to", Collections.max(ids)).asMap(); PipelineInstanceModels history = PipelineInstanceModels.createPipelineInstanceModels( (List<PipelineInstanceModel>) getSqlMapClientTemplate().queryForList("getPipelineHistoryByName", args)); for (PipelineInstanceModel pipelineInstanceModel : history) { loadPipelineHistoryBuildCause(pipelineInstanceModel); } return history; }
@SuppressWarnings("unchecked") private List<Map<String, Object>> flatHistory(IBatisUtil.IBatisArgument arguments) { List<Long> pipelineIds = getSqlMapClientTemplate().queryForList("limitedPipelineIds", arguments.asMap()); arguments.and("limitedPipelineIds", pipelineIds); long maxId = Collections.max(pipelineIds); long minId = Collections.min(pipelineIds); arguments = arguments.and("maxId", maxId).and("minId", minId); return (List<Map<String, Object>>) getSqlMapClientTemplate() .queryForList("getAllPropertiesHistory", arguments.asMap()); }
/** * Uses the TimestampFilter on a Get to request a specified list of * versions for the row/column specified by rowIdx & colIdx. * */ private Cell[] getNVersions(Table ht, byte[] cf, int rowIdx, int colIdx, List<Long> versions) throws IOException { byte row[] = Bytes.toBytes("row:" + rowIdx); byte column[] = Bytes.toBytes("column:" + colIdx); Get get = new Get(row); get.addColumn(cf, column); get.setMaxVersions(); get.setTimeRange(Collections.min(versions), Collections.max(versions)+1); Result result = ht.get(get); return result.rawCells(); }
/** * Drops as many of the highest or lowest values as possible, leaving * at least one value in the list. * @param accumulator A non-null accumulator list. * @param count A count of 1 or more. * @param highest Drop higher values == true or drop lower values == false. */ public static void drop(final List<WeightedValue> accumulator, final int count, final boolean highest) { for (int x = 0; x < count; x++) { if (accumulator.size() <= 1) { break; } if (highest) { accumulator.remove(Collections.max(accumulator)); } else { accumulator.remove(Collections.min(accumulator)); } } }
/** * Creates a histogram of the provided data. * * @param data list of observations * @param breaks number of breaks in the histogram * @return List of integer values size of the breaks */ public static List<Integer> getHistogram(List<Double> data, int breaks) { if (data.isEmpty()) { return Collections.emptyList(); } List<Integer> ret = new ArrayList<Integer>(breaks); for (int i = 0; i < breaks; i++) { ret.add(0); } double min = Collections.min(data); double range = Collections.max(data) - min + 1; double step = range / breaks; for (double point : data) { // Math.min necessary because rounding error -> AIOOBE int index = Math.min((int) ((point - min) / step), breaks - 1); ret.set(index, ret.get(index) + 1); } return ret; }
/** * Same as <code>getHistogram</code> but operates on <code>BigIntegers</code>. */ public static List<Integer> getHistogramBigInt(List<BigInteger> data, int breaks) { if (data.isEmpty()) { return Collections.emptyList(); } List<Integer> ret = new ArrayList<Integer>(breaks); for (int i = 0; i < breaks; i++) { ret.add(0); } BigInteger min = Collections.min(data); BigInteger max = Collections.max(data); BigInteger range = max.subtract(min).add(BigInteger.valueOf(1)); BigInteger step = range.divide(BigInteger.valueOf(breaks)); if (step.equals(BigInteger.ZERO)) { return Collections.emptyList(); // too small } for (BigInteger point : data) { int index = point.subtract(min).divide(step).intValue(); // Math.min necessary because rounding error -> AIOOBE index = Math.min(index, breaks - 1); ret.set(index, ret.get(index) + 1); } return ret; }