/** * Convert parallel arrays of objects and annotations to a list of * {@link AnnotatedObject}. * * @param objs * the objects * @param anns * the annotation for each object (assumes 1 annotation per * object). * @return the list */ public static <OBJECT, ANNOTATION> List<AnnotatedObject<OBJECT, ANNOTATION>> createList(OBJECT[] objs, ANNOTATION[] anns) { final List<AnnotatedObject<OBJECT, ANNOTATION>> list = new ArrayList<AnnotatedObject<OBJECT, ANNOTATION>>(); for (int i = 0; i < objs.length; i++) { list.add(create(objs[i], anns[i])); } return list; }
/** * Create an {@link AnnotatedObject} with the given object and its * annotation. * * @param <OBJECT> * Type of object. * @param <ANNOTATION> * Type of annotations * @param object * the object * @param annotation * the object's annotation. * @return the new {@link AnnotatedObject} */ public static <OBJECT, ANNOTATION> AnnotatedObject<OBJECT, ANNOTATION> create(OBJECT object, ANNOTATION annotation) { return new AnnotatedObject<OBJECT, ANNOTATION>(object, annotation); }
@Override public void train(GroupedDataset<ANNOTATION, ? extends ListDataset<OBJECT>, OBJECT> dataset) { train(AnnotatedObject.createList(dataset)); } }
/** * Convert parallel arrays of objects and annotations to a list of * {@link AnnotatedObject}. * * @param objs * the objects * @param anns * the annotations for each object. * @return the list */ public static <OBJECT, ANNOTATION> List<AnnotatedObject<OBJECT, ANNOTATION>> createList(OBJECT[] objs, ANNOTATION[][] anns) { final List<AnnotatedObject<OBJECT, ANNOTATION>> list = new ArrayList<AnnotatedObject<OBJECT, ANNOTATION>>(); for (int i = 0; i < objs.length; i++) { for (final ANNOTATION a : anns[i]) list.add(create(objs[i], a)); } return list; } }
/** * Create an {@link AnnotatedObject} with the given object and its * annotations. * * @param <OBJECT> * Type of object. * @param <ANNOTATION> * Type of annotations * @param object * the object * @param annotations * the objects annotations. * @return the new {@link AnnotatedObject} */ public static <OBJECT, ANNOTATION> AnnotatedObject<OBJECT, ANNOTATION> create(OBJECT object, Collection<ANNOTATION> annotations) { return new AnnotatedObject<OBJECT, ANNOTATION>(object, annotations); }
/** * Train the annotator with the given grouped dataset. Internally, the * dataset is converted to a list containing exactly one reference to each * object in the dataset with (potentially) multiple annotations. * * @param dataset * the dataset to train on */ public void train(GroupedDataset<ANNOTATION, ? extends ListDataset<OBJECT>, OBJECT> dataset) { train(AnnotatedObject.createList(dataset)); } }
/** * Train for the given face patch without doing any face detection. It is * assumed that the given image will be a cropped/aligned image of the face * as is necessary for the given recogniser. * * @param face * The detected face implementation * @param person * The person to whom this face belongs * @return The face image */ public FACE train(final FACE face, final PERSON person) { this.recogniser.train(AnnotatedObject.create(face, person)); return face; }
/** * Convert a grouped dataset to a list of annotated objects. The annotations * correspond to the type of group. If the same object appears in multiple * groups within the dataset then it will have multiple annotations. * * @param <OBJECT> * Type of object. * @param <ANNOTATION> * Type of annotations. * @param dataset * the dataset * @return the list of annotated instances */ public static <OBJECT, ANNOTATION> List<AnnotatedObject<OBJECT, ANNOTATION>> createList( GroupedDataset<ANNOTATION, ? extends ListDataset<OBJECT>, OBJECT> dataset) { final Map<OBJECT, AnnotatedObject<OBJECT, ANNOTATION>> annotated = new HashMap<OBJECT, AnnotatedObject<OBJECT, ANNOTATION>>( dataset.numInstances()); for (final ANNOTATION grp : dataset.getGroups()) { for (final OBJECT inst : dataset.getInstances(grp)) { final AnnotatedObject<OBJECT, ANNOTATION> ao = annotated.get(inst); if (ao == null) annotated.put(inst, new AnnotatedObject<OBJECT, ANNOTATION>(inst, grp)); else ao.annotations.add(grp); } } return new ArrayList<AnnotatedObject<OBJECT, ANNOTATION>>(annotated.values()); }
/** * Train the annotator with the given grouped dataset. This method assumes * that each object can appear in multiple groups of the dataset (i.e. a * multi-label problem). Internally, the dataset is converted to a list * containing exactly one reference to each object in the dataset with * (potentially) multiple annotations. * <p> * If the dataset is actually multi-class (i.e. each object belongs to only * a single group), then calling this method is equivalent to calling * {@link #trainMultiClass(GroupedDataset)}, but is less efficient as the * dataset has to be converted into a list. * <p> * Some annotator implementations do not care whether the data is * multi-class or multi-label, and might choose to override this method to * just call {@link #trainMultiClass(GroupedDataset)} instead. * * @param dataset * the dataset to train on */ public void train(GroupedDataset<ANNOTATION, ? extends ListDataset<OBJECT>, OBJECT> dataset) { for (final AnnotatedObject<OBJECT, ANNOTATION> ao : AnnotatedObject.createList(dataset)) { train(ao); } } }
/** * Train the recogniser with a single example, returning the detected face. * If multiple faces are found, the biggest is chosen. * <p> * If you need more control, consider calling {@link #getDetector()} to get * a detector which you can apply to your image and {@link #getRecogniser()} * to get the recogniser which you can train with the detections directly. * * @param person * the person * @param image * the image with the persons face * @return the detected face */ public FACE train(final PERSON person, final FImage image) { final List<FACE> faces = this.detector.detectFaces(image); if (faces == null || faces.size() == 0) { FaceRecognitionEngine.logger.warn("no face detected"); return null; } else if (faces.size() == 1) { this.recogniser.train(AnnotatedObject.create(faces.get(0), person)); return faces.get(0); } else { FaceRecognitionEngine.logger.warn("More than one face found. Choosing biggest."); final FACE face = DatasetFaceDetector.getBiggest(faces); this.recogniser.train(AnnotatedObject.create(face, person)); return face; } }
recogniser.train(new AnnotatedObject<CLMDetectedFace, String>(faces.get(0), person)); } else { System.out.println("Wrong number of faces found");
new SamplesFeatureProvider() ); ann.train( AnnotatedObject.createList( vd.getTrainingDataset() ) );
/** * Train the annotator on the given streams. The streams are annotated with * the appropriate annotation, and sample chunks (and therefore buffers) are * gathered from the streams into batches to train the annotator. * * @param streams * The annotated streams */ public void train(final List<IndependentPair<AudioStream, ANNOTATION>> streams) { // Convert all the incoming streams into AnnotatedObject instances // where the sample buffer for each final List<Annotated<SampleBuffer, ANNOTATION>> list = new ArrayList<Annotated<SampleBuffer, ANNOTATION>>(); for (final IndependentPair<AudioStream, ANNOTATION> stream : streams) { SampleChunk sc = null; while ((sc = stream.firstObject().nextSampleChunk()) != null) { final SampleBuffer sb = sc.getSampleBuffer(); final AnnotatedObject<SampleBuffer, ANNOTATION> a = AnnotatedObject.create(sb, stream.secondObject()); list.add(a); } } // Train the annotator for the streams this.annotator.train(list); }
recogniser.train(new AnnotatedObject<CLMDetectedFace, String>(faces.get(0), person)); } else { System.out.println("Wrong number of faces found");
new SamplesFeatureProvider() ); ann.train( AnnotatedObject.createList( vd.getTrainingDataset() ) );
/** * Train the annotator on the given streams. The streams are annotated with * the appropriate annotation, and sample chunks (and therefore buffers) are * gathered from the streams into batches to train the annotator. * * @param streams * The annotated streams */ public void train(final List<IndependentPair<AudioStream, ANNOTATION>> streams) { // Convert all the incoming streams into AnnotatedObject instances // where the sample buffer for each final List<Annotated<SampleBuffer, ANNOTATION>> list = new ArrayList<Annotated<SampleBuffer, ANNOTATION>>(); for (final IndependentPair<AudioStream, ANNOTATION> stream : streams) { SampleChunk sc = null; while ((sc = stream.firstObject().nextSampleChunk()) != null) { final SampleBuffer sb = sc.getSampleBuffer(); final AnnotatedObject<SampleBuffer, ANNOTATION> a = AnnotatedObject.create(sb, stream.secondObject()); list.add(a); } } // Train the annotator for the streams this.annotator.train(list); }
recogniser.train(new AnnotatedObject<CLMDetectedFace, String>(faces.get(0), person)); } else { System.out.println("Wrong number of faces found");
SolverType.L2R_L2LOSS_SVC_DUAL, C, 0.1, 1 /* bias */, true); ann.train(AnnotatedObject.createList(trfeaturesz, trclasses));
recogniser.train(new AnnotatedObject<CLMDetectedFace, String>(faces.get(0), person)); } else { System.out.println("Wrong number of faces found");
SolverType.L2R_L2LOSS_SVC_DUAL, C, 0.1, 1 /* bias */, true); ann.train(AnnotatedObject.createList(trfeaturesz, trclasses));