모바일/iOS

Swift4 - Using Machine Learning Vision Library in Swift4, Cocoa Framework

늘근이 2018. 6. 1. 01:04



The simplest solution for reading face image file, and converting to face landmark description.


import Cocoa
import Vision
class ViewController: NSViewController {
@IBOutlet weak var desc: NSTextField!
@IBOutlet weak var path: NSTextField!
let DEFAULT_PATH = "/Users/kohry/Documents/face/"
override func viewDidLoad() {
super.viewDidLoad()
path.stringValue = DEFAULT_PATH
// Do any additional setup after loading the view.
}
override var representedObject: Any? {
didSet {
// Update the view, if already loaded.
}
}
@IBAction func readImageFiles(_ sender: Any) {
//open file with file manager
let fileManager = FileManager.default
let contents = try! fileManager.contentsOfDirectory(atPath: path.stringValue)
contents.forEach { (fileName) in
let ciimage = CIImage(contentsOf: URL(fileURLWithPath: path.stringValue + fileName))
let context = CIContext(options: nil)
let cgimage = context.createCGImage(ciimage!, from: ciimage!.extent)
analyze(cgimage!, fileName: fileName)
}
}
func analyze(_ image: CGImage, fileName: String) {
let faceRequest = VNDetectFaceLandmarksRequest{ (req, error) in
if let results = req.results as? [VNFaceObservation] {
for observation in results {
print(fileName)
print(observation.landmarks?.allPoints?.normalizedPoints)
let allPointsInString = observation.landmarks?.allPoints?.normalizedPoints
self.desc.stringValue = self.desc.stringValue + String(describing: allPointsInString) + "\n"
}
}
}
let imageRequestHandler = VNImageRequestHandler(cgImage: image, options: [:])
try? imageRequestHandler.perform([faceRequest])
}
}
view raw face.swift hosted with ❤ by GitHub