/** * Convert parallel arrays of objects and annotations to a list of * {@link AnnotatedObject}. * * @param objs * the objects * @param anns * the annotation for each object (assumes 1 annotation per * object). * @return the list */ public static <OBJECT, ANNOTATION> List<AnnotatedObject<OBJECT, ANNOTATION>> createList(OBJECT[] objs, ANNOTATION[] anns) { final List<AnnotatedObject<OBJECT, ANNOTATION>> list = new ArrayList<AnnotatedObject<OBJECT, ANNOTATION>>(); for (int i = 0; i < objs.length; i++) { list.add(create(objs[i], anns[i])); } return list; }
/** * Convert parallel arrays of objects and annotations to a list of * {@link AnnotatedObject}. * * @param objs * the objects * @param anns * the annotations for each object. * @return the list */ public static <OBJECT, ANNOTATION> List<AnnotatedObject<OBJECT, ANNOTATION>> createList(OBJECT[] objs, ANNOTATION[][] anns) { final List<AnnotatedObject<OBJECT, ANNOTATION>> list = new ArrayList<AnnotatedObject<OBJECT, ANNOTATION>>(); for (int i = 0; i < objs.length; i++) { for (final ANNOTATION a : anns[i]) list.add(create(objs[i], a)); } return list; } }
/** * Train for the given face patch without doing any face detection. It is * assumed that the given image will be a cropped/aligned image of the face * as is necessary for the given recogniser. * * @param face * The detected face implementation * @param person * The person to whom this face belongs * @return The face image */ public FACE train(final FACE face, final PERSON person) { this.recogniser.train(AnnotatedObject.create(face, person)); return face; }
/** * Train the recogniser with a single example, returning the detected face. * If multiple faces are found, the biggest is chosen. * <p> * If you need more control, consider calling {@link #getDetector()} to get * a detector which you can apply to your image and {@link #getRecogniser()} * to get the recogniser which you can train with the detections directly. * * @param person * the person * @param image * the image with the persons face * @return the detected face */ public FACE train(final PERSON person, final FImage image) { final List<FACE> faces = this.detector.detectFaces(image); if (faces == null || faces.size() == 0) { FaceRecognitionEngine.logger.warn("no face detected"); return null; } else if (faces.size() == 1) { this.recogniser.train(AnnotatedObject.create(faces.get(0), person)); return faces.get(0); } else { FaceRecognitionEngine.logger.warn("More than one face found. Choosing biggest."); final FACE face = DatasetFaceDetector.getBiggest(faces); this.recogniser.train(AnnotatedObject.create(face, person)); return face; } }
/** * Train the annotator on the given streams. The streams are annotated with * the appropriate annotation, and sample chunks (and therefore buffers) are * gathered from the streams into batches to train the annotator. * * @param streams * The annotated streams */ public void train(final List<IndependentPair<AudioStream, ANNOTATION>> streams) { // Convert all the incoming streams into AnnotatedObject instances // where the sample buffer for each final List<Annotated<SampleBuffer, ANNOTATION>> list = new ArrayList<Annotated<SampleBuffer, ANNOTATION>>(); for (final IndependentPair<AudioStream, ANNOTATION> stream : streams) { SampleChunk sc = null; while ((sc = stream.firstObject().nextSampleChunk()) != null) { final SampleBuffer sb = sc.getSampleBuffer(); final AnnotatedObject<SampleBuffer, ANNOTATION> a = AnnotatedObject.create(sb, stream.secondObject()); list.add(a); } } // Train the annotator for the streams this.annotator.train(list); }
/** * Train the annotator on the given streams. The streams are annotated with * the appropriate annotation, and sample chunks (and therefore buffers) are * gathered from the streams into batches to train the annotator. * * @param streams * The annotated streams */ public void train(final List<IndependentPair<AudioStream, ANNOTATION>> streams) { // Convert all the incoming streams into AnnotatedObject instances // where the sample buffer for each final List<Annotated<SampleBuffer, ANNOTATION>> list = new ArrayList<Annotated<SampleBuffer, ANNOTATION>>(); for (final IndependentPair<AudioStream, ANNOTATION> stream : streams) { SampleChunk sc = null; while ((sc = stream.firstObject().nextSampleChunk()) != null) { final SampleBuffer sb = sc.getSampleBuffer(); final AnnotatedObject<SampleBuffer, ANNOTATION> a = AnnotatedObject.create(sb, stream.secondObject()); list.add(a); } } // Train the annotator for the streams this.annotator.train(list); }