/** Perform prediction with a default scheduler. */ default CompletionStage<List<Prediction<InputT, ValueT>>> predict( final Duration timeout, final InputT... input) { return predict(timeoutScheduler().scheduler(), timeout, input); }
/** * Returns a blocking {@link ModelLoader}. Blocks till the model is loaded or a {@link Duration} * is met. * * @param supplier model supplier. * @param duration Amount of time that it should wait, if necessary, for model to be loaded. * @param executor the executor to use for asynchronous execution. * @param <M> Underlying model instance. */ static <M extends Model<?>> PreLoader<M> preload( final ThrowableSupplier<M> supplier, final Duration duration, final Executor executor) throws InterruptedException, ExecutionException, TimeoutException { return preload(ModelLoader.load(supplier, executor), duration)::get; }
@SuppressWarnings("unchecked") default <C extends PredictorBuilder<ModelT, InputT, VectorT, ValueT>> C with( final FeatureExtractor<ModelT, InputT, VectorT> featureExtractor) { return (C) with(modelLoader(), featureExtractor, predictFn()); }
@SuppressWarnings("unchecked") default <C extends PredictorBuilder<ModelT, InputT, VectorT, ValueT>> C with( final ModelLoader<ModelT> modelLoader) { return (C) with(modelLoader, featureExtractor(), predictFn()); }
@SuppressWarnings("unchecked") default <C extends PredictorBuilder<ModelT, InputT, VectorT, ValueT>> C with( final AsyncPredictFn<ModelT, InputT, VectorT, ValueT> predictFn) { return (C) with(modelLoader(), featureExtractor(), predictFn); }
/** * Returns a TensorFlow model loader based on a saved model. * * @param supplier {@link TensorFlowModel} supplier. * @param executor the executor to use for asynchronous execution. */ static TensorFlowLoader create( final ThrowableSupplier<TensorFlowModel> supplier, final Executor executor) { return ModelLoader.load(supplier, executor)::get; } }
@Override public DefaultPredictorBuilder<ModelT, InputT, VectorT, ValueT> with( final ModelLoader<ModelT> modelLoader, final FeatureExtractor<ModelT, InputT, VectorT> featureExtractor, final AsyncPredictFn<ModelT, InputT, VectorT, ValueT> predictFn) { return create(modelLoader, featureExtractor, predictFn); } }
/** * Returns a blocking {@link PreLoader}. Blocks till the model is loaded or a {@link Duration} * is met. * * @param loader model loader. * @param duration Amount of time that it should wait, if necessary, for model to be loaded. * @param <M> Underlying model instance. */ static <M extends Model<?>> PreLoader<M> preload( final ModelLoader<M> loader, final Duration duration) throws InterruptedException, ExecutionException, TimeoutException { return ModelLoader.loaded(loader.get(duration))::get; }
/** * Returns a blocking {@link PreLoader}. Blocks till the model is loaded or a {@link Duration} is * met. * * @param loader model loader. * @param duration Amount of time that it should wait, if necessary, for model to be loaded. * @param <M> Underlying model instance. */ static <M extends Model<?>> ModelLoader<M> preload( final ModelLoader<M> loader, final Duration duration) throws InterruptedException, ExecutionException, TimeoutException { return PreLoader.preload(loader, duration); }
/** Create a new prediction result. */ public static <InputT, ValueT> Prediction<InputT, ValueT> create( final InputT input, final ValueT value) { return new AutoValue_Prediction<>(input, value); } }
/** Create a new feature extraction result. */ public static <InputT, ValueT> Vector<InputT, ValueT> create( final InputT input, final ValueT value) { return new AutoValue_Vector<>(input, value); } }
/** * Creates a {@link ModelLoader} with an already loaded model. * * @param model Underlying model instance. */ static <M extends Model<?>> ModelLoader<M> loaded(final M model) { return ConsLoader.cons(model); }
/** timeout scheduler for predict functions. */ default PredictorTimeoutScheduler timeoutScheduler() { return DefaultPredictorTimeoutScheduler.create(); }
public static Id create(final String value) { return new AutoValue_Model_Id(value); } }
public static DefaultPredictorTimeoutScheduler create() { return new DefaultPredictorTimeoutScheduler(); }
/** * Returns a TensorFlow model loader based on a serialized TensorFlow {@link Graph}. * * @param supplier {@link TensorFlowGraphModel} supplier. * @param executor the executor to use for asynchronous execution. */ static TensorFlowGraphLoader create( final ThrowableSupplier<TensorFlowGraphModel> supplier, final Executor executor) { return ModelLoader.load(supplier, executor)::get; } }
/** Perform prediction with a default scheduler, and practically infinite timeout. */ default CompletionStage<List<Prediction<InputT, ValueT>>> predict(final InputT... input) { return predict(timeoutScheduler().scheduler(), Duration.ofDays(Integer.MAX_VALUE), input); } }
/** * Returns a blocking {@link ModelLoader}. Blocks till the model is loaded or a {@link Duration} * is met. * * @param supplier model supplier. * @param duration Amount of time that it should wait, if necessary, for model to be loaded. * @param executor the executor to use for asynchronous execution. * @param <M> Underlying model instance. */ static <M extends Model<?>> PreLoader<M> preload( final ThrowableSupplier<M> supplier, final Duration duration, final Executor executor) throws InterruptedException, ExecutionException, TimeoutException { return PreLoader.preload(supplier, duration, executor); }
/** * Returns a XGBoost model loader given the serialized model stored in the model URI. * * @param supplier {@link XGBoostModel} supplier. */ static XGBoostLoader create( final ThrowableSupplier<XGBoostModel> supplier, final Executor executor) { return ModelLoader.load(supplier, executor)::get; } }