@Override public V getOrDefault(final Object key, final V defaultValue) { return map.getOrDefault(key, defaultValue); } @Override
@Override public V getOrDefault(final Object key, final V defaultValue) { synchronized (sync) { return map.getOrDefault(key, defaultValue); } } @Override
public IntCollection get(T key) { return map.getOrDefault(key, IntSets.EMPTY_SET); }
/** * Log the performance of a model. The model is identified by a prefix. Use "best" if you are only * tracking the performance of the best model, by score. Use bestAUC if you are tracking the performance * of the model that got best AUC on the test set. Use final for the model created at the end of the * training process, irrespective of performance. * * @param prefix Identifies a specific model. * @param numExamplesUsed The number of training examples used to train the model so far. Note that * reused examples are counted again. * @param epoch The number of epochs used to train the model. * @param score The score obtained at numExamplesUsed and epoch for the model. */ public void log(String prefix, long numExamplesUsed, int epoch, double score) { log.getOrDefault(prefix, new ObjectArrayList<>()).add(new Performance(numExamplesUsed, epoch, score, -1)); }
/** * Log the performance of a model. The model is identified by a prefix. Use "best" if you are only * tracking the performance of the best model, by score. Use bestAUC if you are tracking the performance * of the model that got best AUC on the test set. Use final for the model created at the end of the * training process, irrespective of performance. * * @param prefix Identifies a specific model. * @param numExamplesUsed The number of training examples seen by the model. Note that examples used in training several times count several times. * @param epoch The number of epochs used to train the model. * @param score The score obtained at numExamplesUsed and epoch for the model. * @param auc The AUC on the test set, or NaN. */ public void log(String prefix, long numExamplesUsed, int epoch, double score, double auc) { ObjectArrayList<Performance> defaultValue = new ObjectArrayList<>(); bestScore = Math.min(bestScore, score); bestAUC = Math.max(bestAUC, auc); log.getOrDefault(prefix, defaultValue).add(new Performance(numExamplesUsed, epoch, score, auc)); if (defaultValue.size() > 0) { log.put(prefix, defaultValue); } }
/** * Log the performance of a model. The model is identified by a prefix. Use "best" if you are only * tracking the performance of the best model, by score. Use bestAUC if you are tracking the performance * of the model that got best AUC on the test set. Use final for the model created at the end of the * training process, irrespective of performance. * * @param prefix Identifies a specific model. * @param numExamplesUsed The number of training examples seen by the model. Note that examples used in training several times count several times. * @param epoch The number of epochs used to train the model. * @param metricValues values of performance metrics. */ public void logMetrics(String prefix, long numExamplesUsed, int epoch, double... metricValues) { ObjectArrayList<Performance> defaultValue = new ObjectArrayList<>(); for (int metricIndex = 0; metricIndex < metricValues.length; metricIndex++) { if (metricValues[metricIndex] == metricValues[metricIndex]) { // not NaN if (performanceLargeIsBest[metricIndex]) { bestPerformances[metricIndex] = Math.max(bestPerformances[metricIndex], metricValues[metricIndex]); } else { bestPerformances[metricIndex] = Math.min(bestPerformances[metricIndex], metricValues[metricIndex]); } } } log.getOrDefault(prefix, defaultValue).add(new Performance(numExamplesUsed, epoch, performanceNames, metricValues)); if (defaultValue.size() > 0) { log.put(prefix, defaultValue); } }