private List<D> getDetectedFaces(String faceId, I faceImage) { List<D> toRet = null; if(!this.cache){ toRet = this.detector.detectFaces(faceImage); } else{ toRet = this.detectedFaceCache.get(faceId); if(toRet == null){ // System.out.println("Redetected face: " + faceId); toRet = this.detector.detectFaces(faceImage);; this.detectedFaceCache.put(faceId, toRet); } } return toRet; }
@Override public void readBinary(DataInput in) throws IOException { faceDetector = IOUtils.newInstance(in.readUTF()); faceDetector.readBinary(in); // facialKeypointExtractor; this.patchScale = in.readFloat(); }
@Override public void writeBinary(DataOutput out) throws IOException { out.writeUTF(faceDetector.getClass().getName()); faceDetector.writeBinary(out); // facialKeypointExtractor; out.writeFloat(patchScale); }
/** * Detect and recognise the faces in the given image, returning a list of * potential people for each face. * * @param image * the image * @return a list of faces and recognitions */ public List<IndependentPair<FACE, List<ScoredAnnotation<PERSON>>>> recognise(final FImage image) { final List<FACE> detectedFaces = this.detector.detectFaces(image); final List<IndependentPair<FACE, List<ScoredAnnotation<PERSON>>>> results = new ArrayList<IndependentPair<FACE, List<ScoredAnnotation<PERSON>>>>(); for (final FACE df : detectedFaces) { results.add(new IndependentPair<FACE, List<ScoredAnnotation<PERSON>>>(df, this.recogniser.annotate(df))); } return results; }
@Override public void writeBinary(final DataOutput out) throws IOException { out.writeUTF(this.detector.getClass().getName()); this.detector.writeBinary(out); out.writeUTF(this.recogniser.getClass().getName()); this.recogniser.writeBinary(out); } }
@Override public void readBinary(final DataInput in) throws IOException { final String detectorClass = in.readUTF(); this.detector = IOUtils.newInstance(detectorClass); this.detector.readBinary(in); final String recogniserClass = in.readUTF(); this.recogniser = IOUtils.newInstance(recogniserClass); this.recogniser.readBinary(in); }
/** * Detect and recognise the faces in the given image, returning the most * likely person for each face. * * @param image * the image * @return a list of faces with the most likely person */ public List<IndependentPair<FACE, ScoredAnnotation<PERSON>>> recogniseBest(final FImage image) { final List<FACE> detectedFaces = this.detector.detectFaces(image); final List<IndependentPair<FACE, ScoredAnnotation<PERSON>>> results = new ArrayList<IndependentPair<FACE, ScoredAnnotation<PERSON>>>(); for (final FACE df : detectedFaces) { results.add(new IndependentPair<FACE, ScoredAnnotation<PERSON>>(df, this.recogniser.annotateBest(df))); } return results; }
/** * Detect and recognise the faces in the given image, returning a list of * potential people for each face. The recognised people will be restricted * to the given set. * * @param image * the image * @param restrict * set of people to restrict to * @return a list of faces and recognitions */ public List<IndependentPair<FACE, List<ScoredAnnotation<PERSON>>>> recognise(final FImage image, final Set<PERSON> restrict) { final List<FACE> detectedFaces = this.detector.detectFaces(image); final List<IndependentPair<FACE, List<ScoredAnnotation<PERSON>>>> results = new ArrayList<IndependentPair<FACE, List<ScoredAnnotation<PERSON>>>>(); for (final FACE df : detectedFaces) { results.add(new IndependentPair<FACE, List<ScoredAnnotation<PERSON>>>(df, this.recogniser.annotate(df, restrict))); } return results; }
/** * Detect and recognise the faces in the given image, returning the most * likely person for each face. The recognised people will be restricted to * the given set. * * @param image * the image * @param restrict * set of people to restrict to * @return a list of faces with the most likely person */ public List<IndependentPair<FACE, ScoredAnnotation<PERSON>>> recogniseBest(final FImage image, final Set<PERSON> restrict) { final List<FACE> detectedFaces = this.detector.detectFaces(image); final List<IndependentPair<FACE, ScoredAnnotation<PERSON>>> results = new ArrayList<IndependentPair<FACE, ScoredAnnotation<PERSON>>>(); for (final FACE df : detectedFaces) { results.add(new IndependentPair<FACE, ScoredAnnotation<PERSON>>(df, this.recogniser .annotateBest(df, restrict))); } return results; }
private void useHaarCascadeDetector( MBFImage image, Metadata metadata ) { FImage fim = Transforms.calculateIntensity( image ); // Detect human faces: List<DetectedFace> faces = fd.detectFaces( fim ); for( DetectedFace face : faces ) { this.addFaceRectangle(face.getBounds(), metadata, "human"); } // Detect cat faces: faces = catfd.detectFaces(fim); for (DetectedFace face : faces) { this.addFaceRectangle(face.getBounds(), metadata, "cat"); } }
private void useHaarCascadeDetector( MBFImage image, Metadata metadata ) { FImage fim = Transforms.calculateIntensity( image ); // Detect human faces: List<DetectedFace> faces = fd.detectFaces( fim ); for( DetectedFace face : faces ) { this.addFaceRectangle(face.getBounds(), metadata, "human"); } // Detect cat faces: faces = catfd.detectFaces(fim); for (DetectedFace face : faces) { this.addFaceRectangle(face.getBounds(), metadata, "cat"); } }
final List<FACE> faces = detector.detectFaces(img);
/** * Train the recogniser with a single example, returning the detected face. * If multiple faces are found, the biggest is chosen. * <p> * If you need more control, consider calling {@link #getDetector()} to get * a detector which you can apply to your image and {@link #getRecogniser()} * to get the recogniser which you can train with the detections directly. * * @param person * the person * @param image * the image with the persons face * @return the detected face */ public FACE train(final PERSON person, final FImage image) { final List<FACE> faces = this.detector.detectFaces(image); if (faces == null || faces.size() == 0) { FaceRecognitionEngine.logger.warn("no face detected"); return null; } else if (faces.size() == 1) { this.recogniser.train(AnnotatedObject.create(faces.get(0), person)); return faces.get(0); } else { FaceRecognitionEngine.logger.warn("More than one face found. Choosing biggest."); final FACE face = DatasetFaceDetector.getBiggest(faces); this.recogniser.train(AnnotatedObject.create(face, person)); return face; } }
detectedFaces = this.faceDetector.detectFaces(img);
detectedFaces = this.faceDetector.detectFaces(img);
private void useHaarCascadeDetector( MBFImage image, Metadata metadata ) { FaceDetector<DetectedFace,FImage> fd = new HaarCascadeDetector(20); FImage fim = Transforms.calculateIntensity( image ); List<DetectedFace> faces = fd.detectFaces( fim ); for( DetectedFace face : faces ) { this.addFaceRectangle(face.getBounds(), metadata); } }
@Override public void beforeUpdate(MBFImage frame) { final FaceDetector<DetectedFace, FImage> fd = new HaarCascadeDetector(40); final List<DetectedFace> faces = fd.detectFaces(Transforms.calculateIntensity(frame)); for (final DetectedFace face : faces) { frame.drawShape(face.getBounds(), RGBColour.RED); } }
private void useFKEFaceDetector( MBFImage image, Metadata metadata ) { FaceDetector<KEDetectedFace,FImage> fd = new FKEFaceDetector(20); FImage fim = Transforms.calculateIntensity( image ); List<KEDetectedFace> faces = fd.detectFaces( fim ); for( KEDetectedFace face : faces ) { //for( FacialKeypoint kp : face.getKeypoints() ) { // kp.position.translate(face.getBounds().getTopLeft()); //image.drawPoint(kp.position, RGBColour.GRAY, 3); //} this.addFaceRectangle(face.getBounds(), metadata, "human"); //image.drawShape(b, RGBColour.RED); //image.drawShape(b, ArrayUtils.toObject(dc.getColorComponents(null)) ); // Output in standard form: http://www.w3.org/2008/WebVideo/Fragments/WD-media-fragments-spec/#naming-space } //DisplayUtilities.display(image); }
private void useFKEFaceDetector( MBFImage image, Metadata metadata ) { FaceDetector<KEDetectedFace,FImage> fd = new FKEFaceDetector(20); FImage fim = Transforms.calculateIntensity( image ); List<KEDetectedFace> faces = fd.detectFaces( fim ); for( KEDetectedFace face : faces ) { //for( FacialKeypoint kp : face.getKeypoints() ) { // kp.position.translate(face.getBounds().getTopLeft()); //image.drawPoint(kp.position, RGBColour.GRAY, 3); //} this.addFaceRectangle(face.getBounds(), metadata); //image.drawShape(b, RGBColour.RED); //image.drawShape(b, ArrayUtils.toObject(dc.getColorComponents(null)) ); // Output in standard form: http://www.w3.org/2008/WebVideo/Fragments/WD-media-fragments-spec/#naming-space } //DisplayUtilities.display(image); }
private void useFKEFaceDetector( MBFImage image, Metadata metadata ) { FaceDetector<KEDetectedFace,FImage> fd = new FKEFaceDetector(20); FImage fim = Transforms.calculateIntensity( image ); List<KEDetectedFace> faces = fd.detectFaces( fim ); for( KEDetectedFace face : faces ) { //for( FacialKeypoint kp : face.getKeypoints() ) { // kp.position.translate(face.getBounds().getTopLeft()); //image.drawPoint(kp.position, RGBColour.GRAY, 3); //} this.addFaceRectangle(face.getBounds(), metadata, "human"); //image.drawShape(b, RGBColour.RED); //image.drawShape(b, ArrayUtils.toObject(dc.getColorComponents(null)) ); // Output in standard form: http://www.w3.org/2008/WebVideo/Fragments/WD-media-fragments-spec/#naming-space } //DisplayUtilities.display(image); }