9.5 C
London
Sunday, June 9, 2024

swift – Face Detection is just not correct whereas integrating OpenCV with native iOS


I’m attempting to combine openCV with native iOS app, The issue I’m going through is my code is just not capable of detect faces correctly and draw a bounding field across the similar. Right here is the whole supply code, please drag and drop the openCV iOS framework within the mission in the event you determine to clone because it was too massive for GitHub so I needed to take away it.

So right here is my Goal-C++ code

#import <opencv2/opencv.hpp>
#import <opencv2/imgcodecs/ios.h>
#import "OpenCVWrapper.h"

/*
 * Add a technique convertToMat to UIImage class
 */
@interface UIImage (OpenCVWrapper)
- (void)convertToMat: (cv::Mat *)pMat: (bool)alphaExists;
@finish

@implementation UIImage (OpenCVWrapper)

- (void)convertToMat: (cv::Mat *)pMat: (bool)alphaExists {
    UIImageOrientation orientation = self.imageOrientation;
    cv::Mat mat;
    UIImageToMat(self, mat, alphaExists);
    
    swap (orientation) {
        case UIImageOrientationRight:
            cv::rotate(mat, *pMat, cv::ROTATE_90_CLOCKWISE);
            break;
        case UIImageOrientationLeft:
            cv::rotate(mat, *pMat, cv::ROTATE_90_COUNTERCLOCKWISE);
            break;
        case UIImageOrientationDown:
            cv::rotate(mat, *pMat, cv::ROTATE_180);
            break;
        case UIImageOrientationUp:
        default:
            *pMat = mat;
            break;
    }
}
@finish

@implementation OpenCVWrapper

+ (NSArray<NSValue *> *)detectFaceRectsInUIImage:(UIImage *)picture {
    // Convert UIImage to cv::Mat
    cv::Mat mat;
    [image convertToMat:&mat :false];

    // Load the face detection mannequin
    NSString *faceCascadePath = [[NSBundle mainBundle] pathForResource:@"haarcascade_frontalface_default" ofType:@"xml"];
    cv::CascadeClassifier faceCascade;
    if (!faceCascade.load([faceCascadePath UTF8String])) {
        NSLog(@"Error loading face detection mannequin");
        return @[];
    }

    // Convert the picture to grayscale
    cv::Mat grey;
    cv::cvtColor(mat, grey, cv::COLOR_BGR2GRAY);
    cv::equalizeHist(grey, grey);

    // Detect faces
    std::vector<cv::Rect> faces;
    faceCascade.detectMultiScale(grey, faces, 1.1, 2, 0 | cv::CASCADE_SCALE_IMAGE, cv::Measurement(30, 30));

    // Convert cv::Rect to CGRect and wrap in NSValue
    NSMutableArray<NSValue *> *faceRects = [NSMutableArray arrayWithCapacity:faces.size()];
    for (const auto &face : faces) {
        CGRect faceRect = CGRectMake(face.x, face.y, face.width, face.top);
        [faceRects addObject:[NSValue valueWithCGRect:faceRect]];
    }

    return [faceRects copy];
}

@finish

Following is my swift code

import UIKit
import AVFoundation
import VideoToolbox

class ViewController: UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate {
    
    var previewView : UIView!
    var boxView:UIView!
    
    //Digital camera Seize requiered properties
    var videoDataOutput: AVCaptureVideoDataOutput!
    var videoDataOutputQueue: DispatchQueue!
    var previewLayer:AVCaptureVideoPreviewLayer!
    var captureDevice : AVCaptureDevice!
    let session = AVCaptureSession()
    non-public var faceOverlayView: FaceOverlayView!
    
    override func viewDidLoad() {
        tremendous.viewDidLoad()
            previewView = UIView(body: CGRect(x: 0,
                                               y: 0,
                                               width: UIScreen.major.bounds.dimension.width,
                                               top: UIScreen.major.bounds.dimension.top))
            previewView.contentMode = UIView.ContentMode.scaleAspectFit
            view.addSubview(previewView)
            
            boxView = UIView(body: self.view.body)
            view.addSubview(boxView)
            
            // Initialize face overlay view
            faceOverlayView = FaceOverlayView(body: view.bounds)
            view.addSubview(faceOverlayView)
            
            
            setupAVCapture()
    }
    
    override var shouldAutorotate: Bool {
        if (UIDevice.present.orientation == UIDeviceOrientation.landscapeLeft ||
            UIDevice.present.orientation == UIDeviceOrientation.landscapeRight ||
            UIDevice.present.orientation == UIDeviceOrientation.unknown) {
            return false
        }
        else {
            return true
        }
    }
    
    func setupAVCapture(){
        session.sessionPreset = AVCaptureSession.Preset.vga640x480
        guard let system = AVCaptureDevice
            .default(AVCaptureDevice.DeviceType.builtInWideAngleCamera,
                     for: .video,
                     place: AVCaptureDevice.Place.again) else {
            return
        }
        captureDevice = system
        beginSession()
    }
    
    func beginSession(){
        var deviceInput: AVCaptureDeviceInput!
        
        do {
            deviceInput = attempt AVCaptureDeviceInput(system: captureDevice)
            guard deviceInput != nil else {
                print("error: cant get deviceInput")
                return
            }
            
            if self.session.canAddInput(deviceInput){
                self.session.addInput(deviceInput)
            }
            
            videoDataOutput = AVCaptureVideoDataOutput()
            videoDataOutput.alwaysDiscardsLateVideoFrames=true
            videoDataOutputQueue = DispatchQueue(label: "VideoDataOutputQueue")
            videoDataOutput.setSampleBufferDelegate(self, queue:self.videoDataOutputQueue)
            
            if session.canAddOutput(self.videoDataOutput){
                session.addOutput(self.videoDataOutput)
            }
            
            videoDataOutput.connection(with: .video)?.isEnabled = true
            
            previewLayer = AVCaptureVideoPreviewLayer(session: self.session)
            previewLayer.videoGravity = AVLayerVideoGravity.resizeAspect
            
            let rootLayer :CALayer = self.previewView.layer
            rootLayer.masksToBounds=true
            previewLayer.body = rootLayer.bounds
            rootLayer.addSublayer(self.previewLayer)
            DispatchQueue.international(qos: .userInitiated).async {
                self.session.startRunning()
            }
        } catch let error as NSError {
            deviceInput = nil
            print("error: (error.localizedDescription)")
        }
    }
    
    func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
        guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return  }
        
        guard let picture = UIImage(pixelBuffer: imageBuffer) else {
            return
        }
        detectFaces(in: picture)
        //stopCamera()
    }
    
    
    
    func stopCamera(){
        session.stopRunning()
    }
    
    
    non-public func detectFaces(in picture: UIImage) {
        guard let faceRects = OpenCVWrapper.detectFaceRects(in: picture) else { return }
        
        DispatchQueue.major.async {
            let viewWidth = self.faceOverlayView.bounds.width
            let viewHeight = self.faceOverlayView.bounds.top
            let imageWidth = picture.dimension.width
            let imageHeight = picture.dimension.top
            
            let scaleX = viewWidth / imageWidth
            let scaleY = viewHeight / imageHeight
            
            let scaleFactor = min(scaleX, scaleY)
            
            let offsetX = (viewWidth - imageWidth * scaleFactor) / 2
            let offsetY = (viewHeight - imageHeight * scaleFactor) / 2
            
            let transformedRects = faceRects.map { $0.cgRectValue }.map { face in
                return CGRect(
                    x: face.origin.x * scaleFactor + offsetX,
                    y: face.origin.y * scaleFactor + offsetY,
                    width: face.dimension.width * scaleFactor,
                    top: face.dimension.top * scaleFactor
                )
            }
            
            self.faceOverlayView.setFaces(transformedRects)
        }
    }
    
    
}


extension UIImage {
    public comfort init?(pixelBuffer: CVPixelBuffer) {
        var cgImage: CGImage?
        VTCreateCGImageFromCVPixelBuffer(pixelBuffer, choices: nil, imageOut: &cgImage)
        
        guard let cgImage = cgImage else {
            return nil
        }
        
        self.init(cgImage: cgImage)
    }
}

I’m able to construct the mission correctly, simply the face detection half is a bit off and undecided what else must be added

Latest news
Related news

LEAVE A REPLY

Please enter your comment!
Please enter your name here