diff --git a/README.md b/README.md index 99e3b30..002102a 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,13 @@ - [@nativescript/mlkit-selfie-segmentation](packages/mlkit-selfie-segmentation/README.md) - [@nativescript/mlkit-text-recognition](packages/mlkit-text-recognition/README.md) +# How to run it with IOS simulator? + +Since Xcode 26 also older IOS emulators (till 17.5) crash when MLKit libs are included in the binary. You can now +therefore control inclusion of the libs via a command line switch. + +NSSkipMLKitFromIOSSimulator=1 ns prepare ios + # How to use? This workspace manages the suite of plugins listed above. diff --git a/migrations.json b/migrations.json deleted file mode 100644 index b1b6cc3..0000000 --- a/migrations.json +++ /dev/null @@ -1 +0,0 @@ -{ "migrations": [{ "cli": "nx", "version": "5.5.3", "description": "Migrate tools to 5.5.3", "implementation": "./src/migrations/update-5-5-3/update-5-5-3", "package": "@nativescript/plugin-tools", "name": "update-to-5.5.3" }] } diff --git a/packages/mlkit-barcode-scanning/package.json b/packages/mlkit-barcode-scanning/package.json index e7d6340..226da7e 100644 --- a/packages/mlkit-barcode-scanning/package.json +++ b/packages/mlkit-barcode-scanning/package.json @@ -1,6 +1,6 @@ { "name": "@nativescript/mlkit-barcode-scanning", - "version": "3.0.1", + "version": "3.0.2", "description": "NativeScript MLKit Barcode Scanner module", "main": "index", "typings": "index.d.ts", diff --git a/packages/mlkit-barcode-scanning/platforms/ios/Podfile b/packages/mlkit-barcode-scanning/platforms/ios/Podfile index d75f447..44f5253 100644 --- a/packages/mlkit-barcode-scanning/platforms/ios/Podfile +++ b/packages/mlkit-barcode-scanning/platforms/ios/Podfile @@ -1,2 +1,7 @@ platform :ios, '15.5.0' -pod 'GoogleMLKit/BarcodeScanning', '8.0.0' \ No newline at end of file + +if ENV.key?('NSSkipMLKitFromIOSSimulator') + puts '⏭️ Skipping GoogleMLKit/BarcodeScanning (simulator build)' +else + pod 'GoogleMLKit/BarcodeScanning' +end diff --git a/packages/mlkit-core/index.ios.ts b/packages/mlkit-core/index.ios.ts index 9fe0286..974619c 100644 --- a/packages/mlkit-core/index.ios.ts +++ b/packages/mlkit-core/index.ios.ts @@ -841,7 +841,7 @@ export function detectWithStillImage(image: any, options?: StillImageDetectionOp } catch (e) {} } resolve(result); - } + }, ); }); } diff --git a/packages/mlkit-core/package.json b/packages/mlkit-core/package.json index 4bd595c..da778ba 100644 --- a/packages/mlkit-core/package.json +++ b/packages/mlkit-core/package.json @@ -1,6 +1,6 @@ { "name": "@nativescript/mlkit-core", - "version": "3.0.2", + "version": "3.0.3", "description": "NativeScript MLKit Core", "main": "index", "typings": "index.d.ts", diff --git a/packages/mlkit-core/platforms/ios/src/TNSML.swift b/packages/mlkit-core/platforms/ios/src/TNSML.swift index 746d2c3..67dccac 100644 --- a/packages/mlkit-core/platforms/ios/src/TNSML.swift +++ b/packages/mlkit-core/platforms/ios/src/TNSML.swift @@ -1,41 +1,42 @@ import Foundation import UIKit -import MLKitVision + +#if canImport(MLKitVision) + import MLKitVision +#endif #if canImport(MLKitBarcodeScanning) -import MLKitBarcodeScanning + import MLKitBarcodeScanning #endif #if canImport(MLKitFaceDetection) -import MLKitFaceDetection + import MLKitFaceDetection #endif #if canImport(MLKitTextRecognition) -import MLKitTextRecognition + import MLKitTextRecognition #endif #if canImport(MLKitDigitalInkRecognition) -import MLKitDigitalInkRecognition + import MLKitDigitalInkRecognition #endif #if canImport(MLKitImageLabeling) -import MLKitImageLabeling + import MLKitImageLabeling #endif #if canImport(MLKitObjectDetection) -import MLKitObjectDetection + import MLKitObjectDetection #endif #if canImport(MLKitPoseDetection) -import MLKitPoseDetection + import MLKitPoseDetection #endif - #if canImport(MLKitSegmentationSelfie) -import MLKitSegmentationSelfie + import MLKitSegmentationSelfie #endif - @objcMembers @objc(TNSMLResult) public class TNSMLResult: NSObject { @@ -47,330 +48,333 @@ public class TNSMLResult: NSObject { } } -@objcMembers -@objc(TNSML) -public class TNSML: NSObject { - private static let encoder = JSONEncoder() - private static let queue = DispatchQueue(label: "TNSML", attributes: .concurrent) - - - public static func processImage(_ image: UIImage, _ json: NSDictionary, _ callback: @escaping ((Any) -> Void)){ - if(isMLSupported()){ - let inputImage = VisionImage(image: image) - process(inputImage, json, callback) +#if canImport(MLKitVision) + @objcMembers + @objc(TNSML) + public class TNSML: NSObject { + private static let encoder = JSONEncoder() + private static let queue = DispatchQueue(label: "TNSML", attributes: .concurrent) + + public static func processImage( + _ image: UIImage, _ json: NSDictionary, _ callback: @escaping ((Any) -> Void) + ) { + if isMLSupported() { + let inputImage = VisionImage(image: image) + process(inputImage, json, callback) + } } - } - - - private static func process(_ inputImage: VisionImage, _ json: NSDictionary, _ callback: @escaping ((Any) -> Void)){ - queue.async { - - let detectorType = TNSMLKitDetectionType.init(rawValue: json["detectorType"] as? UInt32 ?? TNSMLKitDetectionType.None.rawValue) - - - var results: [TNSMLResult] = [] - - -#if canImport(MLKitBarcodeScanning) - if(detectorType == .Barcode || detectorType == .All){ - var formatRaw = 0 - let barcodeScanning = json["barcodeScanning"] as? NSDictionary - if(barcodeScanning != nil){ - let formatArray = barcodeScanning!["format"] as? [String] - - if(formatArray != nil){ - for item in formatArray! { - let format = BarcodeFormats(rawValue: item) - if(format != nil){ - formatRaw |= format!.format.rawValue + + private static func process( + _ inputImage: VisionImage, _ json: NSDictionary, _ callback: @escaping ((Any) -> Void) + ) { + queue.async { + + let detectorType = TNSMLKitDetectionType.init( + rawValue: json["detectorType"] as? UInt32 ?? TNSMLKitDetectionType.None.rawValue + ) + + var results: [TNSMLResult] = [] + + #if canImport(MLKitBarcodeScanning) + if detectorType == .Barcode || detectorType == .All { + var formatRaw = 0 + let barcodeScanning = json["barcodeScanning"] as? NSDictionary + if barcodeScanning != nil { + let formatArray = barcodeScanning!["format"] as? [String] + + if formatArray != nil { + for item in formatArray! { + let format = BarcodeFormats(rawValue: item) + if format != nil { + formatRaw |= format!.format.rawValue + } + } } + + if formatRaw == 0 { + formatRaw = BarcodeFormat.all.rawValue + } + } else { + formatRaw = BarcodeFormat.all.rawValue } + + let options = BarcodeScannerOptions( + formats: BarcodeFormat(rawValue: formatRaw)) + do { + let scanner = BarcodeScanner.barcodeScanner(options: options) + let result = try scanner.results(in: inputImage) + let barCodes = handleBarcodeScanner(result) + if !barCodes.isEmpty { + let response = toJSON(barCodes) + if response != nil { + results.append( + TNSMLResult( + response!, + TNSMLKitDetectionType.Barcode.string() + )) + } + } + } catch {} + } - - if(formatRaw == 0){ - formatRaw = BarcodeFormat.all.rawValue - } - }else { - formatRaw = BarcodeFormat.all.rawValue - } - - - - - let options = BarcodeScannerOptions(formats: BarcodeFormat(rawValue: formatRaw)) - do { - let scanner = BarcodeScanner.barcodeScanner(options: options) - let result = try scanner.results(in: inputImage) - let barCodes = handleBarcodeScanner(result) - if(!barCodes.isEmpty) { - let response = toJSON(barCodes) - if(response != nil){ - results.append(TNSMLResult( - response!, - TNSMLKitDetectionType.Barcode.string() - )) + #endif + + #if canImport(MLKitFaceDetection) + if detectorType == .Face || detectorType == .All { + let faceDetection = json["faceDetection"] as? NSDictionary + let options = FaceDetectorOptions() + if faceDetection != nil { + options.isTrackingEnabled = + faceDetection!["faceTracking"] as? Bool ?? false + options.minFaceSize = CGFloat( + faceDetection!["minimumFaceSize"] as? Float ?? 0.1) + options.performanceMode = + (faceDetection!["detectionMode"] as? String ?? "fast") == "accurate" + ? .accurate : .fast + + options.landmarkMode = + (faceDetection!["landmarkMode"] as? String ?? "all") == "none" + ? .none : .all + + options.contourMode = + (faceDetection!["contourMode"] as? String ?? "all") == "none" + ? .none : .all + + options.classificationMode = + (faceDetection!["classificationMode"] as? String ?? "all") == "none" + ? .none : .all + + } else { + options.isTrackingEnabled = false + options.minFaceSize = CGFloat(0.1) + options.performanceMode = .fast + options.classificationMode = .all + options.landmarkMode = .all + options.contourMode = .all } + + let faceDetector = FaceDetector.faceDetector(options: options) + do { + let result = try faceDetector.results(in: inputImage) + let faces = handleFaceDetection(result) + + if !faces.isEmpty { + let response = toJSON(faces) + if response != nil { + results.append( + TNSMLResult( + response!, + TNSMLKitDetectionType.Face.string() + )) + } + } + } catch {} } - } catch {} - - } -#endif - - - -#if canImport(MLKitFaceDetection) - if(detectorType == .Face || detectorType == .All){ - let faceDetection = json["faceDetection"] as? NSDictionary - let options = FaceDetectorOptions() - if(faceDetection != nil){ - options.isTrackingEnabled = faceDetection!["faceTracking"] as? Bool ?? false - options.minFaceSize = CGFloat(faceDetection!["minimumFaceSize"] as? Float ?? 0.1) - options.performanceMode = (faceDetection!["detectionMode"] as? String ?? "fast") == "accurate" ? .accurate : .fast - - - options.landmarkMode = (faceDetection!["landmarkMode"] as? String ?? "all") == "none" ? .none : .all - - options.contourMode = (faceDetection!["contourMode"] as? String ?? "all") == "none" ? .none : .all - - options.classificationMode = (faceDetection!["classificationMode"] as? String ?? "all") == "none" ? .none : .all - - }else { - options.isTrackingEnabled = false - options.minFaceSize = CGFloat(0.1) - options.performanceMode = .fast - options.classificationMode = .all - options.landmarkMode = .all - options.contourMode = .all - } - - - - - let faceDetector = FaceDetector.faceDetector(options: options) - do { - let result = try faceDetector.results(in: inputImage) - let faces = handleFaceDetection(result) - - if(!faces.isEmpty) { - let response = toJSON(faces) - if response != nil { - results.append(TNSMLResult( - response!, - TNSMLKitDetectionType.Face.string() - )) - } + #endif + + #if canImport(MLKitPoseDetection) + if detectorType == .Pose || detectorType == .All { + let options = PoseDetectorOptions() + options.detectorMode = .singleImage + let poseDetector = PoseDetector.poseDetector(options: options) + do { + let result = try poseDetector.results(in: inputImage) + let poses = handlePoseDetection(result) + + if !poses.isEmpty { + let response = toJSON(poses) + if response != nil { + results.append( + TNSMLResult( + response!, + TNSMLKitDetectionType.Pose.string() + )) + } + } + } catch {} } - } catch {} - } -#endif - - - -#if canImport(MLKitPoseDetection) - if(detectorType == .Pose || detectorType == .All){ - let options = PoseDetectorOptions() - options.detectorMode = .singleImage - let poseDetector = PoseDetector.poseDetector(options: options) - do { - let result = try poseDetector.results(in: inputImage) - let poses = handlePoseDetection(result) - - if(!poses.isEmpty) { - let response = toJSON(poses) - if response != nil { - results.append(TNSMLResult( - response!, - TNSMLKitDetectionType.Pose.string() - )) + #endif + + #if canImport(MLKitImageLabeling) + if detectorType == .Image || detectorType == .All { + let options = ImageLabelerOptions() + + let imageLabeling = json["imageLabeling"] as? NSDictionary + + if imageLabeling != nil { + options.confidenceThreshold = + (imageLabeling!["confidenceThreshold"] as? Float ?? 0.5) as NSNumber + } else { + options.confidenceThreshold = 0.5 } + + let imageLabeler = ImageLabeler.imageLabeler(options: options) + do { + let result = try imageLabeler.results(in: inputImage) + let labels = handleImageLabeling(result) + + if !labels.isEmpty { + let response = toJSON(labels) + if response != nil { + results.append( + TNSMLResult( + response!, + TNSMLKitDetectionType.Image.string() + )) + } + } + } catch {} } - } catch {} - } -#endif - - - -#if canImport(MLKitImageLabeling) - if(detectorType == .Image || detectorType == .All){ - let options = ImageLabelerOptions() - - let imageLabeling = json["imageLabeling"] as? NSDictionary - - if(imageLabeling != nil){ - options.confidenceThreshold = (imageLabeling!["confidenceThreshold"] as? Float ?? 0.5) as NSNumber - }else { - options.confidenceThreshold = 0.5 - } - - let imageLabeler = ImageLabeler.imageLabeler(options: options) - do { - let result = try imageLabeler.results(in: inputImage) - let labels = handleImageLabeling(result) - - if(!labels.isEmpty) { - let response = toJSON(labels) - if response != nil { - results.append(TNSMLResult( - response!, - TNSMLKitDetectionType.Image.string() - )) + #endif + + #if canImport(MLKitObjectDetection) + if detectorType == .Object || detectorType == .All { + let options = ObjectDetectorOptions() + + let objectDetection = json["objectDetection"] as? NSDictionary + + if objectDetection != nil { + options.shouldEnableMultipleObjects = + objectDetection!["multiple"] as? Bool ?? false + options.shouldEnableClassification = + objectDetection!["classification"] as? Bool ?? false + + } else { + options.shouldEnableMultipleObjects = false + options.shouldEnableClassification = false } + + let objectDetector = ObjectDetector.objectDetector(options: options) + do { + let result = try objectDetector.results(in: inputImage) + let objects = handleObjectDetection(result) + + if !objects.isEmpty { + let response = toJSON(objects) + results.append( + TNSMLResult( + response!, + TNSMLKitDetectionType.Object.string() + )) + } + } catch {} } - } catch {} - } -#endif - - - -#if canImport(MLKitObjectDetection) - if(detectorType == .Object || detectorType == .All){ - let options = ObjectDetectorOptions() - - - let objectDetection = json["objectDetection"] as? NSDictionary - - if(objectDetection != nil){ - options.shouldEnableMultipleObjects = objectDetection!["multiple"] as? Bool ?? false - options.shouldEnableClassification = objectDetection!["classification"] as? Bool ?? false - - }else { - options.shouldEnableMultipleObjects = false - options.shouldEnableClassification = false - } - - - let objectDetector = ObjectDetector.objectDetector(options: options) - do { - let result = try objectDetector.results(in: inputImage) - let objects = handleObjectDetection(result) - - if(!objects.isEmpty) { - let response = toJSON(objects) - results.append(TNSMLResult( - response!, - TNSMLKitDetectionType.Object.string() - )) + #endif + + #if canImport(MLKitTextRecognition) + if detectorType == .Text || detectorType == .All { + let textRecognizer = TextRecognizer.textRecognizer( + options: TextRecognizerOptions()) + do { + let result = try textRecognizer.results(in: inputImage) + let texts = handleTextRecognition(result) + + if !texts.isEmpty { + let response = toJSON(texts) + if response != nil { + results.append( + TNSMLResult( + response!, + TNSMLKitDetectionType.Text.string() + )) + } + } + } catch {} } - } catch {} - } -#endif - - - - -#if canImport(MLKitTextRecognition) - if(detectorType == .Text || detectorType == .All){ - let textRecognizer = TextRecognizer.textRecognizer(options: TextRecognizerOptions()) - do { - let result = try textRecognizer.results(in: inputImage) - let texts = handleTextRecognition(result) - - if(!texts.isEmpty) { - let response = toJSON(texts) - if response != nil { - results.append(TNSMLResult( - response!, - TNSMLKitDetectionType.Text.string() - )) + #endif + + #if canImport(MLKitSegmentationSelfie) + if detectorType == .Selfie || detectorType == .All { + let options = SelfieSegmenterOptions() + options.segmenterMode = .singleImage + let selfieSegmentation = json["selfieSegmentation"] as? NSDictionary + + if selfieSegmentation != nil { + options.shouldEnableRawSizeMask = + selfieSegmentation!["enableRawSizeMask"] as? Bool ?? false + + } else { + options.shouldEnableRawSizeMask = false } + + let selfieSegmentor = Segmenter.segmenter(options: options) + do { + let mask = try selfieSegmentor.results(in: inputImage) + let maskWidth = CVPixelBufferGetWidth(mask.buffer) + let maskHeight = CVPixelBufferGetHeight(mask.buffer) + + CVPixelBufferLockBaseAddress( + mask.buffer, CVPixelBufferLockFlags.readOnly) + let maskBytesPerRow = CVPixelBufferGetBytesPerRow(mask.buffer) + let maskAddress = + CVPixelBufferGetBaseAddress(mask.buffer)!.bindMemory( + to: Float32.self, capacity: maskBytesPerRow * maskHeight) + let data = Data(bytes: maskAddress, count: maskBytesPerRow * maskHeight) + var ret: [String: Any] = [:] + ret["width"] = maskWidth + ret["height"] = maskHeight + ret["data"] = data + results.append( + TNSMLResult( + ret, + TNSMLKitDetectionType.Selfie.string() + )) + + } catch {} } - } catch {} - } -#endif - - -#if canImport(MLKitSegmentationSelfie) - if(detectorType == .Selfie || detectorType == .All){ - let options = SelfieSegmenterOptions() - options.segmenterMode = .singleImage - let selfieSegmentation = json["selfieSegmentation"] as? NSDictionary - - if(selfieSegmentation != nil){ - options.shouldEnableRawSizeMask = selfieSegmentation!["enableRawSizeMask"] as? Bool ?? false - - }else { - options.shouldEnableRawSizeMask = false + #endif + + DispatchQueue.main.async { + callback(results) } - - let selfieSegmentor = Segmenter.segmenter(options: options) - do { - let mask = try selfieSegmentor.results(in: inputImage) - let maskWidth = CVPixelBufferGetWidth(mask.buffer) - let maskHeight = CVPixelBufferGetHeight(mask.buffer) - - CVPixelBufferLockBaseAddress(mask.buffer, CVPixelBufferLockFlags.readOnly) - let maskBytesPerRow = CVPixelBufferGetBytesPerRow(mask.buffer) - let maskAddress = - CVPixelBufferGetBaseAddress(mask.buffer)!.bindMemory( - to: Float32.self, capacity: maskBytesPerRow * maskHeight) - let data = Data(bytes: maskAddress, count: maskBytesPerRow * maskHeight) - var ret: [String: Any] = [:] - ret["width"] = maskWidth - ret["height"] = maskHeight - ret["data"] = data - results.append(TNSMLResult( - ret, - TNSMLKitDetectionType.Selfie.string() - )) - - }catch {} + } -#endif - - - DispatchQueue.main.async { - callback(results) + } + + private static func toJSON(_ value: T) -> String? { + do { + let json = try encoder.encode(value) + return String(data: json, encoding: .utf8) ?? nil + } catch { + return nil } - } - } - - - - private static func toJSON(_ value: T)-> String?{ - do{ - let json = try encoder.encode(value) - return String(data: json, encoding: .utf8) ?? nil - }catch{ - return nil + + private static func isMLSupported() -> Bool { + var supported = false + #if canImport(MLKitBarcodeScanning) + supported = true + #endif + + #if canImport(MLKitFaceDetection) + supported = true + #endif + + #if canImport(MLKitTextRecognition) + supported = true + #endif + + #if canImport(MLKitDigitalInkRecognition) + supported = true + #endif + + #if canImport(MLKitImageLabeling) + supported = true + #endif + + #if canImport(MLKitObjectDetection) + supported = true + #endif + + #if canImport(MLKitPoseDetection) + supported = true + #endif + + #if canImport(MLKitSegmentationSelfie) + supported = true + #endif + + return supported } } - - - private static func isMLSupported() -> Bool{ - var supported = false -#if canImport(MLKitBarcodeScanning) - supported = true #endif - -#if canImport(MLKitFaceDetection) - supported = true -#endif - -#if canImport(MLKitTextRecognition) - supported = true -#endif - -#if canImport(MLKitDigitalInkRecognition) - supported = true -#endif - -#if canImport(MLKitImageLabeling) - supported = true -#endif - -#if canImport(MLKitObjectDetection) - supported = true -#endif - -#if canImport(MLKitPoseDetection) - supported = true -#endif - -#if canImport(MLKitSegmentationSelfie) - supported = true -#endif - - return supported - } -} diff --git a/packages/mlkit-core/platforms/ios/src/TNSMLKitHelper.swift b/packages/mlkit-core/platforms/ios/src/TNSMLKitHelper.swift index b8ed087..8d7171f 100644 --- a/packages/mlkit-core/platforms/ios/src/TNSMLKitHelper.swift +++ b/packages/mlkit-core/platforms/ios/src/TNSMLKitHelper.swift @@ -1,51 +1,53 @@ -import Foundation import AVFoundation -import MLKitVision +import Foundation +import UIKit + +#if canImport(MLKitVision) + import MLKitVision +#endif #if canImport(MLKitBarcodeScanning) -import MLKitBarcodeScanning + import MLKitBarcodeScanning #endif #if canImport(MLKitFaceDetection) -import MLKitFaceDetection + import MLKitFaceDetection #endif #if canImport(MLKitTextRecognition) -import MLKitTextRecognition + import MLKitTextRecognition #endif #if canImport(MLKitDigitalInkRecognition) -import MLKitDigitalInkRecognition + import MLKitDigitalInkRecognition #endif #if canImport(MLKitImageLabeling) -import MLKitImageLabeling + import MLKitImageLabeling #endif #if canImport(MLKitObjectDetection) -import MLKitObjectDetection + import MLKitObjectDetection #endif #if canImport(MLKitObjectDetectionCustom) -import MLKitObjectDetectionCustom + import MLKitObjectDetectionCustom #endif #if canImport(MLKitPoseDetection) -import MLKitPoseDetection + import MLKitPoseDetection #endif #if canImport(MLKitSegmentationSelfie) -import MLKitSegmentationSelfie + import MLKitSegmentationSelfie #endif - - @objc(TNSMLKitHelperCameraPosition) public enum TNSMLKitHelperCameraPosition: Int, RawRepresentable { case Front case Back public typealias RawValue = UInt32 - + public var rawValue: RawValue { switch self { case .Back: @@ -54,8 +56,7 @@ public enum TNSMLKitHelperCameraPosition: Int, RawRepresentable { return 1 } } - - + public init?(rawValue: RawValue) { switch rawValue { case 0: @@ -66,8 +67,7 @@ public enum TNSMLKitHelperCameraPosition: Int, RawRepresentable { return nil } } - - + public init?(string: String) { switch string { case "back": @@ -78,9 +78,8 @@ public enum TNSMLKitHelperCameraPosition: Int, RawRepresentable { return nil } } - -} +} struct TNSBounds: Codable { var x: Double @@ -89,25 +88,24 @@ struct TNSBounds: Codable { var height: Double } - func createBounds(_ frame: CGRect) -> TNSBounds { - return TNSBounds(x: Double(frame.origin.x), y: Double(frame.origin.y), width: Double(frame.size.width), height: Double(frame.size.height)) + return TNSBounds( + x: Double(frame.origin.x), y: Double(frame.origin.y), width: Double(frame.size.width), + height: Double(frame.size.height)) } - struct TNSPoint: Codable { - var x:Double + var x: Double var y: Double } func createPoint(_ point: NSValue?) -> TNSPoint? { - guard point != nil else {return nil} - return TNSPoint(x: Double(point!.cgPointValue.x), y: Double(point!.cgPointValue.y)) + guard point != nil else { return nil } + return TNSPoint(x: Double(point!.cgPointValue.x), y: Double(point!.cgPointValue.y)) } - func createPoints(_ points: [NSValue]?) -> [TNSPoint]? { - guard points != nil else {return nil} + guard points != nil else { return nil } var results: [TNSPoint] = [] for point in points! { results.append(createPoint(point)!) @@ -115,9 +113,6 @@ func createPoints(_ points: [NSValue]?) -> [TNSPoint]? { return results } - - - @objc(TNSMLKitDetectionType) enum TNSMLKitDetectionType: Int, RawRepresentable { case Barcode @@ -132,7 +127,7 @@ enum TNSMLKitDetectionType: Int, RawRepresentable { case Selfie case None public typealias RawValue = UInt32 - + public var rawValue: RawValue { switch self { case .Barcode: @@ -159,8 +154,7 @@ enum TNSMLKitDetectionType: Int, RawRepresentable { return 10 } } - - + public init?(rawValue: RawValue) { switch rawValue { case 0: @@ -189,8 +183,7 @@ enum TNSMLKitDetectionType: Int, RawRepresentable { return nil } } - - + public init?(string: String) { switch string { case "barcode": @@ -207,7 +200,7 @@ enum TNSMLKitDetectionType: Int, RawRepresentable { self = .CustomObject case "pose": self = .Pose - case "text": + case "text": self = .Text case "all": self = .All @@ -219,9 +212,9 @@ enum TNSMLKitDetectionType: Int, RawRepresentable { return nil } } - + func string() -> String { - switch(self){ + switch self { case .Barcode: return "barcode" case .DigitalInk: @@ -248,17 +241,13 @@ enum TNSMLKitDetectionType: Int, RawRepresentable { } } - - - - @objc(TNSMLKitTorchMode) public enum TNSMLKitTorchMode: Int, RawRepresentable { case Off case On case Auto public typealias RawValue = UInt32 - + public var rawValue: RawValue { switch self { case .Off: @@ -269,8 +258,7 @@ public enum TNSMLKitTorchMode: Int, RawRepresentable { return 2 } } - - + public init?(rawValue: RawValue) { switch rawValue { case 0: @@ -283,8 +271,7 @@ public enum TNSMLKitTorchMode: Int, RawRepresentable { return nil } } - - + public init?(string: String) { switch string { case "off": @@ -299,7 +286,6 @@ public enum TNSMLKitTorchMode: Int, RawRepresentable { } } - @objc(TNSMLKitHelper) @objcMembers public class TNSMLKitHelper: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate { @@ -311,17 +297,18 @@ public class TNSMLKitHelper: NSObject, AVCaptureVideoDataOutputSampleBufferDeleg private var isSessionSetup = false private var videoInput: AVCaptureDeviceInput? private var _latestImage: UIImage? = nil - + public func getCaptureInfo() -> String? { guard let videoInput = self.videoInput else { return nil } let formatDescription = videoInput.device.activeFormat.formatDescription - let orientation = getOrientation(deviceOrientation: UIDevice.current.orientation, - cameraPosition: videoInput.device.position) + let orientation = getOrientation( + deviceOrientation: UIDevice.current.orientation, + cameraPosition: videoInput.device.position) let dimensions = CMVideoFormatDescriptionGetDimensions(formatDescription) let imageInfo = [ "width": Int(dimensions.width), "height": Int(dimensions.height), - "orientation": orientation.rawValue + "orientation": orientation.rawValue, ] let encoder = JSONEncoder() if let jsonData = try? encoder.encode(imageInfo) { @@ -332,30 +319,26 @@ public class TNSMLKitHelper: NSObject, AVCaptureVideoDataOutputSampleBufferDeleg var retrieveLatestImage = false { didSet { - if(_latestImage != nil){ + if _latestImage != nil { _latestImage = nil } } } var latestImage: UIImage? { - get { - return _latestImage - } + return _latestImage } - + var cameraPosition = TNSMLKitHelperCameraPosition.Back { didSet { - if(!isSessionSetup){ + if !isSessionSetup { return } setCamera() } } var output: AVCaptureVideoDataOutput { - get { - return _output - } + return _output } var autoFocus = true let session = AVCaptureSession() @@ -363,86 +346,84 @@ public class TNSMLKitHelper: NSObject, AVCaptureVideoDataOutputSampleBufferDeleg var detectorType = TNSMLKitDetectionType.All var processEveryNthFrame = 0 private var currentFrame = 0 - - private func updateAutoFocus(_ videoInput: AVCaptureDeviceInput?){ - if(!session.isRunning){ + + private func updateAutoFocus(_ videoInput: AVCaptureDeviceInput?) { + if !session.isRunning { return } do { guard let videoInput = videoInput else { return } - + try videoInput.device.lockForConfiguration() - + defer { videoInput.device.unlockForConfiguration() } - + if videoInput.device.isFocusModeSupported(.continuousAutoFocus) { videoInput.device.focusMode = .continuousAutoFocus if videoInput.device.isSmoothAutoFocusSupported { videoInput.device.isSmoothAutoFocusEnabled = true } } - - }catch {} + + } catch {} } - - - private func updateTorchMode(_ videoInput: AVCaptureDeviceInput?){ - if(!session.isRunning){ + + private func updateTorchMode(_ videoInput: AVCaptureDeviceInput?) { + if !session.isRunning { return } do { guard videoInput?.device != nil else { return } - + try videoInput!.device.lockForConfiguration() - + defer { videoInput!.device.unlockForConfiguration() } - - switch(torchMode){ + + switch torchMode { case .Off: - if(videoInput!.device.isTorchModeSupported(.off)){ + if videoInput!.device.isTorchModeSupported(.off) { videoInput!.device.torchMode = .off } break case .On: - if(videoInput!.device.isTorchModeSupported(.on)){ + if videoInput!.device.isTorchModeSupported(.on) { videoInput!.device.torchMode = .on } break case .Auto: - if(videoInput!.device.isTorchModeSupported(.auto)){ + if videoInput!.device.isTorchModeSupported(.auto) { videoInput!.device.torchMode = .auto } break } - - }catch {} + + } catch {} } - + public var torchMode: TNSMLKitTorchMode = .Off { didSet { updateTorchMode(self.videoInput) } } - - + public var pause: Bool = false { didSet { sessionQueue.async { - if(self.isSessionSetup){ - if(self.pause && self.session.isRunning){ + if self.isSessionSetup { + if self.pause && self.session.isRunning { self.session.stopRunning() self.resetCurrentFrame() } - - if(!self.pause && !self.session.isRunning){ + + if !self.pause && !self.session.isRunning { self.session.startRunning() self.updateAutoFocus(self.videoInput) self.updateTorchMode(self.videoInput) @@ -451,50 +432,43 @@ public class TNSMLKitHelper: NSObject, AVCaptureVideoDataOutputSampleBufferDeleg } } } - - -#if canImport(MLKitBarcodeScanning) - var barcodeScanner: BarcodeScanner? -#endif - - -#if canImport(MLKitFaceDetection) - var faceDetector: FaceDetector? -#endif - - -#if canImport(MLKitTextRecognition) - var textRecognizer: TextRecognizer? -#endif - - -#if canImport(MLKitDigitalInkRecognition) - var digitalInkRecognizer: DigitalInkRecognizer? -#endif - - -#if canImport(MLKitImageLabeling) - var imageLabeler: ImageLabeler? -#endif - - -#if canImport(MLKitObjectDetection) - var objectDetector: ObjectDetector? -#endif -#if canImport(MLKitObjectDetectionCustom) - var customObjectDetector: ObjectDetector? -#endif - -#if canImport(MLKitPoseDetection) - var poseDetector: PoseDetector? -#endif - -#if canImport(MLKitSegmentationSelfie) - var selfieSegmentor: Segmenter? -#endif - - + #if canImport(MLKitBarcodeScanning) + var barcodeScanner: BarcodeScanner? + #endif + + #if canImport(MLKitFaceDetection) + var faceDetector: FaceDetector? + #endif + + #if canImport(MLKitTextRecognition) + var textRecognizer: TextRecognizer? + #endif + + #if canImport(MLKitDigitalInkRecognition) + var digitalInkRecognizer: DigitalInkRecognizer? + #endif + + #if canImport(MLKitImageLabeling) + var imageLabeler: ImageLabeler? + #endif + + #if canImport(MLKitObjectDetection) + var objectDetector: ObjectDetector? + #endif + + #if canImport(MLKitObjectDetectionCustom) + var customObjectDetector: ObjectDetector? + #endif + + #if canImport(MLKitPoseDetection) + var poseDetector: PoseDetector? + #endif + + #if canImport(MLKitSegmentationSelfie) + var selfieSegmentor: Segmenter? + #endif + public override init() { super.init() _output.alwaysDiscardsLateVideoFrames = true @@ -503,47 +477,47 @@ public class TNSMLKitHelper: NSObject, AVCaptureVideoDataOutputSampleBufferDeleg ] _output.setSampleBufferDelegate(self, queue: queue) } - - public func startPreview(){ + + public func startPreview() { sessionQueue.async { - if(self.isSessionSetup && !self.session.isRunning && !self.pause){ + if self.isSessionSetup && !self.session.isRunning && !self.pause { self.session.startRunning() self.updateAutoFocus(self.videoInput) self.updateTorchMode(self.videoInput) } } - + } - - public func stopPreview(){ + + public func stopPreview() { sessionQueue.async { - if(self.isSessionSetup && self.session.isRunning){ + if self.isSessionSetup && self.session.isRunning { self.session.stopRunning() self.resetCurrentFrame() } } } - - public func toggleCamera(){ + + public func toggleCamera() { if cameraPosition == .Front { cameraPosition = .Back - }else { + } else { cameraPosition = .Front } setCamera() } - private func setCamera(){ + private func setCamera() { sessionQueue.async { [self] in - if(self.isSessionSetup){ + if self.isSessionSetup { let wasRunning = self.session.isRunning - + let videoDevice = self.getVideoDevice() guard videoDevice != nil else { let error = NSError(domain: "Failed to toggleCamera", code: 1, userInfo: nil) self.onError?(error) return } - + let videoInput: AVCaptureDeviceInput? do { videoInput = try AVCaptureDeviceInput(device: videoDevice!) @@ -551,24 +525,22 @@ public class TNSMLKitHelper: NSObject, AVCaptureVideoDataOutputSampleBufferDeleg self.onError?(error as NSError) return } - - - if(wasRunning){ + + if wasRunning { self.session.stopRunning() self.resetCurrentFrame() } - - - if(self.videoInput != nil){ + + if self.videoInput != nil { self.session.removeInput(self.videoInput!) } - + self.videoInput = videoInput - - if(self.session.canAddInput(videoInput!)){ + + if self.session.canAddInput(videoInput!) { self.session.addInput(videoInput!) } - if(wasRunning && !self.pause){ + if wasRunning && !self.pause { self.session.startRunning() updateAutoFocus(videoInput) updateTorchMode(videoInput) @@ -583,14 +555,15 @@ public class TNSMLKitHelper: NSObject, AVCaptureVideoDataOutputSampleBufferDeleg if self.cameraPosition == .Front { position = .front } - if #available(iOS 10.0, *){ + if #available(iOS 10.0, *) { // close range autofocus requires a virtual camera as the minimum focus distance of WideAngleCamera - // is to far away to allow for good quality barcode scanning - captureDevice = AVCaptureDevice.default(.builtInTripleCamera, for: .video, position: position) ?? - AVCaptureDevice.default(.builtInDualWideCamera, for: .video, position: position) ?? - AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: position) - - }else { + // is to far away to allow for good quality barcode scanning + captureDevice = + AVCaptureDevice.default(.builtInTripleCamera, for: .video, position: position) + ?? AVCaptureDevice.default(.builtInDualWideCamera, for: .video, position: position) + ?? AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: position) + + } else { let devices = AVCaptureDevice.devices(for: .video) for device in devices { if device.position == position { @@ -601,12 +574,11 @@ public class TNSMLKitHelper: NSObject, AVCaptureVideoDataOutputSampleBufferDeleg } return captureDevice } - + public func hasCameraPermission() -> Bool { return AVCaptureDevice.authorizationStatus(for: .video) == .authorized } - - + public func requestCameraPermission(_ callback: @escaping ((Bool) -> Void)) { AVCaptureDevice.requestAccess(for: .video) { result in DispatchQueue.main.async { @@ -614,73 +586,70 @@ public class TNSMLKitHelper: NSObject, AVCaptureVideoDataOutputSampleBufferDeleg } } } - - - public func openCamera(){ + + public func openCamera() { sessionQueue.async { - if(self.isSessionSetup){ + if self.isSessionSetup { return } - - if(!self.hasCameraPermission()){ + + if !self.hasCameraPermission() { return } - + let captureDevice = self.getVideoDevice() - guard captureDevice != nil else {return} + guard captureDevice != nil else { return } do { self.videoInput = try AVCaptureDeviceInput(device: captureDevice!) - }catch { + } catch { self.onError?(error as NSError) } - - guard self.videoInput != nil else {return} - + + guard self.videoInput != nil else { return } + self.updateAutoFocus(self.videoInput) self.updateTorchMode(self.videoInput) - + self.session.beginConfiguration() self.session.addInput(self.videoInput!) - - if self.session.canAddOutput(self.output){ + + if self.session.canAddOutput(self.output) { self.session.addOutput(self.output) } self.session.commitConfiguration() self.isSessionSetup = true - + } } - + private func handleNil(_ value: AnyObject?) -> AnyHashable { - guard value != nil else {return NSNull()} + guard value != nil else { return NSNull() } return value as! AnyHashable } - - + private func handleNilArray(_ value: [Any]?) -> Any { - guard value != nil else {return NSNull()} + guard value != nil else { return NSNull() } return value! } - - - func toJSON(_ value: T)-> String?{ - do{ + + func toJSON(_ value: T) -> String? { + do { let json = try encoder.encode(value) return String(data: json, encoding: .utf8) ?? nil - }catch{ + } catch { return nil } } - - - - func getOrientation(deviceOrientation: UIDeviceOrientation, cameraPosition: AVCaptureDevice.Position) -> UIImage.Orientation { - switch (deviceOrientation) { + + func getOrientation( + deviceOrientation: UIDeviceOrientation, cameraPosition: AVCaptureDevice.Position + ) -> UIImage.Orientation { + switch deviceOrientation { case .portrait: if cameraPosition == .front { return .leftMirrored } - return .right + return .right case .landscapeLeft: if cameraPosition == .front { return .downMirrored @@ -697,238 +666,245 @@ public class TNSMLKitHelper: NSObject, AVCaptureVideoDataOutputSampleBufferDeleg } return .down case .unknown, .faceUp, .faceDown: - return .up; + return .up } } - - + private func resetCurrentFrame() { - if (isProcessingEveryNthFrame()) { + if isProcessingEveryNthFrame() { self.currentFrame = 0 } } - + private func isProcessingEveryNthFrame() -> Bool { return self.processEveryNthFrame > 0 } - + private func incrementCurrentFrame() { - if (isProcessingEveryNthFrame()) { + if isProcessingEveryNthFrame() { self.currentFrame += 1 } } - - + let context = CIContext() - - public func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) { - if(onScanCallback == nil){return} - autoreleasepool { - let buffer = CMSampleBufferGetImageBuffer(sampleBuffer) - guard buffer != nil else {return} - - if(self.currentFrame != self.processEveryNthFrame){ - self.incrementCurrentFrame() - return - } - let image = VisionImage(buffer: sampleBuffer) - - let orientation = getOrientation(deviceOrientation: UIDevice.current.orientation, cameraPosition: videoInput!.device.position) - - image.orientation = orientation - -#if canImport(MLKitBarcodeScanning) - if(detectorType == .Barcode || detectorType == .All){ - do { - let result = try self.barcodeScanner?.results(in: image) - if(result != nil){ - let barCodes = handleBarcodeScanner(result!) - if(!barCodes.isEmpty) { - let response = toJSON(barCodes) - if(response != nil){ - DispatchQueue.main.async { - self.onScanCallback!(response!, TNSMLKitDetectionType.Barcode.string()) + public func captureOutput( + _ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, + from connection: AVCaptureConnection + ) { + #if canImport(MLKitVision) + if onScanCallback == nil { return } + autoreleasepool { + let buffer = CMSampleBufferGetImageBuffer(sampleBuffer) + guard buffer != nil else { return } + + if self.currentFrame != self.processEveryNthFrame { + self.incrementCurrentFrame() + return + } + + let image = VisionImage(buffer: sampleBuffer) + + let orientation = getOrientation( + deviceOrientation: UIDevice.current.orientation, + cameraPosition: videoInput!.device.position) + + image.orientation = orientation + + #if canImport(MLKitBarcodeScanning) + if detectorType == .Barcode || detectorType == .All { + do { + let result = try self.barcodeScanner?.results(in: image) + if result != nil { + let barCodes = handleBarcodeScanner(result!) + if !barCodes.isEmpty { + let response = toJSON(barCodes) + if response != nil { + DispatchQueue.main.async { + self.onScanCallback!( + response!, TNSMLKitDetectionType.Barcode.string()) + } + } } } - } + } catch {} } - } catch {} - } -#endif - - -#if canImport(MLKitFaceDetection) - if(detectorType == .Face || detectorType == .All){ - do { - let result = try self.faceDetector?.results(in: image) - if(result != nil){ - let faces = handleFaceDetection(result!) - - if(!faces.isEmpty) { - let response = toJSON(faces) - if response != nil { - DispatchQueue.main.async { - self.onScanCallback!(response!, TNSMLKitDetectionType.Face.string()) + #endif + + #if canImport(MLKitFaceDetection) + if detectorType == .Face || detectorType == .All { + do { + let result = try self.faceDetector?.results(in: image) + if result != nil { + let faces = handleFaceDetection(result!) + + if !faces.isEmpty { + let response = toJSON(faces) + if response != nil { + DispatchQueue.main.async { + self.onScanCallback!( + response!, TNSMLKitDetectionType.Face.string()) + } + } } } - } + } catch {} } - } catch {} - } -#endif - - - -#if canImport(MLKitPoseDetection) - if(detectorType == .Pose || detectorType == .All){ - do { - let result = try self.poseDetector?.results(in: image) - if(result != nil){ - let poses = handlePoseDetection(result!) - - if(!poses.isEmpty) { - let response = toJSON(poses) - if response != nil { - DispatchQueue.main.async { - self.onScanCallback!(response!, TNSMLKitDetectionType.Pose.string()) + #endif + + #if canImport(MLKitPoseDetection) + if detectorType == .Pose || detectorType == .All { + do { + let result = try self.poseDetector?.results(in: image) + if result != nil { + let poses = handlePoseDetection(result!) + + if !poses.isEmpty { + let response = toJSON(poses) + if response != nil { + DispatchQueue.main.async { + self.onScanCallback!( + response!, TNSMLKitDetectionType.Pose.string()) + } + } } } - } + } catch {} } - } catch {} - } -#endif - - - -#if canImport(MLKitImageLabeling) - if(detectorType == .Image || detectorType == .All){ - do { - let result = try self.imageLabeler?.results(in: image) - if(result != nil){ - let labels = handleImageLabeling(result!) - - if(!labels.isEmpty) { - let response = toJSON(labels) - if response != nil { - DispatchQueue.main.async { - self.onScanCallback!(response!, TNSMLKitDetectionType.Image.string()) + #endif + + #if canImport(MLKitImageLabeling) + if detectorType == .Image || detectorType == .All { + do { + let result = try self.imageLabeler?.results(in: image) + if result != nil { + let labels = handleImageLabeling(result!) + + if !labels.isEmpty { + let response = toJSON(labels) + if response != nil { + DispatchQueue.main.async { + self.onScanCallback!( + response!, TNSMLKitDetectionType.Image.string()) + } + } } } - } + } catch {} } - } catch {} - } -#endif - - - -#if canImport(MLKitObjectDetection) - if(detectorType == .Object || detectorType == .All){ - do { - let result = try self.objectDetector?.results(in: image) - if(result != nil){ - let objects = handleObjectDetection(result!) - - if(!objects.isEmpty) { - let response = toJSON(objects) - if response != nil { - DispatchQueue.main.async { - self.onScanCallback!(response!, TNSMLKitDetectionType.Object.string()) + #endif + + #if canImport(MLKitObjectDetection) + if detectorType == .Object || detectorType == .All { + do { + let result = try self.objectDetector?.results(in: image) + if result != nil { + let objects = handleObjectDetection(result!) + + if !objects.isEmpty { + let response = toJSON(objects) + if response != nil { + DispatchQueue.main.async { + self.onScanCallback!( + response!, TNSMLKitDetectionType.Object.string()) + } + } } } - } + } catch {} } - } catch {} - } -#endif - - #if canImport(MLKitObjectDetectionCustom) - if(detectorType == .CustomObject || detectorType == .All){ - do { - let result = try self.customObjectDetector?.results(in: image) - if(result != nil){ - let objects = handleObjectDetection(result!) - if(!objects.isEmpty) { - let response = toJSON(objects) - if response != nil { - DispatchQueue.main.async { - self.onScanCallback!(response!, TNSMLKitDetectionType.CustomObject.string()) + #endif + + #if canImport(MLKitObjectDetectionCustom) + if detectorType == .CustomObject || detectorType == .All { + do { + let result = try self.customObjectDetector?.results(in: image) + if result != nil { + let objects = handleObjectDetection(result!) + if !objects.isEmpty { + let response = toJSON(objects) + if response != nil { + DispatchQueue.main.async { + self.onScanCallback!( + response!, + TNSMLKitDetectionType.CustomObject.string()) + } + } } } + } catch let error { + print( + "Failed to detect object with error \(error.localizedDescription).") + return } } - } catch let error { - print("Failed to detect object with error \(error.localizedDescription).") - return - } - } -#endif - - -#if canImport(MLKitTextRecognition) - if(detectorType == .Text || detectorType == .All){ - do { - let result = try self.textRecognizer?.results(in: image) - if(result != nil){ - let texts = handleTextRecognition(result!) - - if(!texts.isEmpty) { - let response = toJSON(texts) - if response != nil { - DispatchQueue.main.async { - self.onScanCallback!(response!, TNSMLKitDetectionType.Text.string()) + #endif + + #if canImport(MLKitTextRecognition) + if detectorType == .Text || detectorType == .All { + do { + let result = try self.textRecognizer?.results(in: image) + if result != nil { + let texts = handleTextRecognition(result!) + + if !texts.isEmpty { + let response = toJSON(texts) + if response != nil { + DispatchQueue.main.async { + self.onScanCallback!( + response!, TNSMLKitDetectionType.Text.string()) + } + } } } - } + } catch {} } - } catch {} - } -#endif - - - -#if canImport(MLKitSegmentationSelfie) - if(detectorType == .Selfie || detectorType == .All){ - do { - let result = try self.selfieSegmentor?.results(in: image) - if(result != nil){ - let mask = result! - let maskWidth = CVPixelBufferGetWidth(mask.buffer) - let maskHeight = CVPixelBufferGetHeight(mask.buffer) - - CVPixelBufferLockBaseAddress(mask.buffer, CVPixelBufferLockFlags.readOnly) - let maskBytesPerRow = CVPixelBufferGetBytesPerRow(mask.buffer) - let maskAddress = - CVPixelBufferGetBaseAddress(mask.buffer)!.bindMemory( - to: Float32.self, capacity: maskBytesPerRow * maskHeight) - let data = Data(bytes: maskAddress, count: maskBytesPerRow * maskHeight) - var ret: [String: Any] = [:] - ret["width"] = maskWidth - ret["height"] = maskHeight - ret["data"] = data - - DispatchQueue.main.async { - self.onScanCallback!(ret, TNSMLKitDetectionType.Selfie.string()) - } - + #endif + + #if canImport(MLKitSegmentationSelfie) + if detectorType == .Selfie || detectorType == .All { + do { + let result = try self.selfieSegmentor?.results(in: image) + if result != nil { + let mask = result! + let maskWidth = CVPixelBufferGetWidth(mask.buffer) + let maskHeight = CVPixelBufferGetHeight(mask.buffer) + + CVPixelBufferLockBaseAddress( + mask.buffer, CVPixelBufferLockFlags.readOnly) + let maskBytesPerRow = CVPixelBufferGetBytesPerRow(mask.buffer) + let maskAddress = + CVPixelBufferGetBaseAddress(mask.buffer)!.bindMemory( + to: Float32.self, capacity: maskBytesPerRow * maskHeight) + let data = Data( + bytes: maskAddress, count: maskBytesPerRow * maskHeight) + var ret: [String: Any] = [:] + ret["width"] = maskWidth + ret["height"] = maskHeight + ret["data"] = data + + DispatchQueue.main.async { + self.onScanCallback!(ret, TNSMLKitDetectionType.Selfie.string()) + } + + } + } catch {} } - }catch {} - } -#endif - // latest image should be updated only after last plugin finished processing, this ensures that - // image will be available till next scan result - if retrieveLatestImage { - let ciimage = CIImage(cvImageBuffer: buffer!) - self._latestImage = UIImage(ciImage: ciimage, scale: 1.0, orientation: orientation) - } else { - self._latestImage = nil + #endif + // latest image should be updated only after last plugin finished processing, this ensures that + // image will be available till next scan result + if retrieveLatestImage { + let ciimage = CIImage(cvImageBuffer: buffer!) + self._latestImage = UIImage( + ciImage: ciimage, scale: 1.0, orientation: orientation) + } else { + self._latestImage = nil + } + + self.resetCurrentFrame() } - - self.resetCurrentFrame() - } - + #endif + } - + } diff --git a/packages/mlkit-custom-object-detection/package.json b/packages/mlkit-custom-object-detection/package.json index b0c085a..064e625 100644 --- a/packages/mlkit-custom-object-detection/package.json +++ b/packages/mlkit-custom-object-detection/package.json @@ -1,6 +1,6 @@ { "name": "@nativescript/mlkit-custom-object-detection", - "version": "3.0.1", + "version": "3.0.2", "description": "NativeScript MLKit Custom Object Detection module", "main": "index", "typings": "index.d.ts", diff --git a/packages/mlkit-custom-object-detection/platforms/ios/Podfile b/packages/mlkit-custom-object-detection/platforms/ios/Podfile index 1d40227..d4192c8 100644 --- a/packages/mlkit-custom-object-detection/platforms/ios/Podfile +++ b/packages/mlkit-custom-object-detection/platforms/ios/Podfile @@ -1,2 +1,7 @@ platform :ios, '15.5' -pod 'GoogleMLKit/ObjectDetectionCustom', '8.0.0' \ No newline at end of file + +if ENV.key?('NSSkipMLKitFromIOSSimulator') + puts '⏭️ Skipping GoogleMLKit/ObjectDetectionCustom (simulator build)' +else + pod 'GoogleMLKit/ObjectDetectionCustom' +end diff --git a/packages/mlkit-digital-ink-recognition/package.json b/packages/mlkit-digital-ink-recognition/package.json index 2c1acf7..be8fd2b 100644 --- a/packages/mlkit-digital-ink-recognition/package.json +++ b/packages/mlkit-digital-ink-recognition/package.json @@ -1,6 +1,6 @@ { "name": "@nativescript/mlkit-digital-ink-recognition", - "version": "3.0.1", + "version": "3.0.2", "description": "NativeScript MLKit Digital Ink Recognition module", "main": "index", "typings": "index.d.ts", diff --git a/packages/mlkit-digital-ink-recognition/platforms/ios/Podfile b/packages/mlkit-digital-ink-recognition/platforms/ios/Podfile index c53a823..76adb71 100644 --- a/packages/mlkit-digital-ink-recognition/platforms/ios/Podfile +++ b/packages/mlkit-digital-ink-recognition/platforms/ios/Podfile @@ -1,2 +1,6 @@ platform :ios, '15.5.0' -pod 'GoogleMLKit/DigitalInkRecognition', '8.0.0' \ No newline at end of file +if ENV.key?('NSSkipMLKitFromIOSSimulator') + puts '⏭️ Skipping GoogleMLKit/DigitalInkRecognition (simulator build)' +else + pod 'GoogleMLKit/DigitalInkRecognition' +end diff --git a/packages/mlkit-face-detection/package.json b/packages/mlkit-face-detection/package.json index 40cc356..f6dcb9a 100644 --- a/packages/mlkit-face-detection/package.json +++ b/packages/mlkit-face-detection/package.json @@ -1,6 +1,6 @@ { "name": "@nativescript/mlkit-face-detection", - "version": "3.0.1", + "version": "3.0.2", "description": "NativeScript MLKit Face Detection module", "main": "index", "typings": "index.d.ts", diff --git a/packages/mlkit-face-detection/platforms/ios/Podfile b/packages/mlkit-face-detection/platforms/ios/Podfile index 20f0722..212d2d8 100644 --- a/packages/mlkit-face-detection/platforms/ios/Podfile +++ b/packages/mlkit-face-detection/platforms/ios/Podfile @@ -1,2 +1,7 @@ platform :ios, '15.5.0' -pod 'GoogleMLKit/FaceDetection', '8.0.0' \ No newline at end of file + +if ENV.key?('NSSkipMLKitFromIOSSimulator') + puts '⏭️ Skipping GoogleMLKit/FaceDetection (simulator build)' +else + pod 'GoogleMLKit/FaceDetection' +end diff --git a/packages/mlkit-image-labeling/package.json b/packages/mlkit-image-labeling/package.json index 7d43f1e..9de724f 100644 --- a/packages/mlkit-image-labeling/package.json +++ b/packages/mlkit-image-labeling/package.json @@ -1,6 +1,6 @@ { "name": "@nativescript/mlkit-image-labeling", - "version": "3.0.1", + "version": "3.0.2", "description": "NativeScript MLKit Image Labeling module", "main": "index", "typings": "index.d.ts", diff --git a/packages/mlkit-image-labeling/platforms/ios/Podfile b/packages/mlkit-image-labeling/platforms/ios/Podfile index 2b065da..5c9ac12 100644 --- a/packages/mlkit-image-labeling/platforms/ios/Podfile +++ b/packages/mlkit-image-labeling/platforms/ios/Podfile @@ -1,2 +1,7 @@ platform :ios, '15.5.0' -pod 'GoogleMLKit/ImageLabeling', '8.0.0' \ No newline at end of file + +if ENV.key?('NSSkipMLKitFromIOSSimulator') + puts '⏭️ Skipping GoogleMLKit/ObjectDetection (simulator build)' +else + pod 'GoogleMLKit/ObjectDetection' +end diff --git a/packages/mlkit-object-detection/package.json b/packages/mlkit-object-detection/package.json index 08f7210..0fb4d0a 100644 --- a/packages/mlkit-object-detection/package.json +++ b/packages/mlkit-object-detection/package.json @@ -1,6 +1,6 @@ { "name": "@nativescript/mlkit-object-detection", - "version": "3.0.1", + "version": "3.0.2", "description": "NativeScript MLKit Object Detection module", "main": "index", "typings": "index.d.ts", diff --git a/packages/mlkit-object-detection/platforms/ios/Podfile b/packages/mlkit-object-detection/platforms/ios/Podfile index 3e2e004..5c9ac12 100644 --- a/packages/mlkit-object-detection/platforms/ios/Podfile +++ b/packages/mlkit-object-detection/platforms/ios/Podfile @@ -1,2 +1,7 @@ platform :ios, '15.5.0' -pod 'GoogleMLKit/ObjectDetection', '8.0.0' \ No newline at end of file + +if ENV.key?('NSSkipMLKitFromIOSSimulator') + puts '⏭️ Skipping GoogleMLKit/ObjectDetection (simulator build)' +else + pod 'GoogleMLKit/ObjectDetection' +end diff --git a/packages/mlkit-pose-detection/package.json b/packages/mlkit-pose-detection/package.json index b4511e2..8d906bd 100644 --- a/packages/mlkit-pose-detection/package.json +++ b/packages/mlkit-pose-detection/package.json @@ -1,6 +1,6 @@ { "name": "@nativescript/mlkit-pose-detection", - "version": "3.0.1", + "version": "3.0.2", "description": "NativeScript MLKit Pose Detection module", "main": "index", "typings": "index.d.ts", diff --git a/packages/mlkit-pose-detection/platforms/ios/Podfile b/packages/mlkit-pose-detection/platforms/ios/Podfile index de0716e..20ca55d 100644 --- a/packages/mlkit-pose-detection/platforms/ios/Podfile +++ b/packages/mlkit-pose-detection/platforms/ios/Podfile @@ -1,7 +1,11 @@ platform :ios, '15.5.0' - if defined?($NSMLKitPoseDetectionAccurate) - pod 'GoogleMLKit/PoseDetectionAccurate', '8.0.0' +if ENV.key?('NSSkipMLKitFromIOSSimulator') + puts '⏭️ Skipping GoogleMLKit/PoseDetection (simulator build)' +else + if defined?($NSMLKitPoseDetectionAccurate) + pod 'GoogleMLKit/PoseDetectionAccurate' else - pod 'GoogleMLKit/PoseDetection', '8.0.0' + pod 'GoogleMLKit/PoseDetection' end +end diff --git a/packages/mlkit-selfie-segmentation/package.json b/packages/mlkit-selfie-segmentation/package.json index baa59bb..5bf74cb 100644 --- a/packages/mlkit-selfie-segmentation/package.json +++ b/packages/mlkit-selfie-segmentation/package.json @@ -1,6 +1,6 @@ { "name": "@nativescript/mlkit-selfie-segmentation", - "version": "3.0.1", + "version": "3.0.2", "description": "NativeScript MLKit Self Segmentation module", "main": "index", "typings": "index.d.ts", diff --git a/packages/mlkit-selfie-segmentation/platforms/ios/Podfile b/packages/mlkit-selfie-segmentation/platforms/ios/Podfile index cf324c5..cfd3414 100644 --- a/packages/mlkit-selfie-segmentation/platforms/ios/Podfile +++ b/packages/mlkit-selfie-segmentation/platforms/ios/Podfile @@ -1,2 +1,6 @@ platform :ios, '15.5.0' -pod 'GoogleMLKit/SegmentationSelfie', '8.0.0' \ No newline at end of file +if ENV.key?('NSSkipMLKitFromIOSSimulator') + puts '⏭️ Skipping GoogleMLKit/PoseDetection (simulator build)' +else + pod 'GoogleMLKit/SegmentationSelfie' +end diff --git a/packages/mlkit-text-recognition/package.json b/packages/mlkit-text-recognition/package.json index 43cfcb0..b9bd2a2 100644 --- a/packages/mlkit-text-recognition/package.json +++ b/packages/mlkit-text-recognition/package.json @@ -1,6 +1,6 @@ { "name": "@nativescript/mlkit-text-recognition", - "version": "3.0.1", + "version": "3.0.2", "description": "NativeScript MLKit Text Recognition module", "main": "index", "typings": "index.d.ts", diff --git a/packages/mlkit-text-recognition/platforms/ios/Podfile b/packages/mlkit-text-recognition/platforms/ios/Podfile index 9ea9a30..4811b29 100644 --- a/packages/mlkit-text-recognition/platforms/ios/Podfile +++ b/packages/mlkit-text-recognition/platforms/ios/Podfile @@ -1,2 +1,6 @@ platform :ios, '15.5.0' -pod 'GoogleMLKit/TextRecognition','8.0.0' \ No newline at end of file +if ENV.key?('NSSkipMLKitFromIOSSimulator') + puts '⏭️ Skipping GoogleMLKit/TextRecognition (simulator build)' +else + pod 'GoogleMLKit/TextRecognition' +end diff --git a/tools/assets/App_Resources/iOS/Podfile b/tools/assets/App_Resources/iOS/Podfile index 8c35228..b735e3c 100644 --- a/tools/assets/App_Resources/iOS/Podfile +++ b/tools/assets/App_Resources/iOS/Podfile @@ -1,6 +1,8 @@ platform :ios, '15.5.0' +$NSSkipMLKitFromIOSSimulator = true + post_install do |installer| installer.pods_project.targets.each do |target| target.build_configurations.each do |config| @@ -8,4 +10,5 @@ post_install do |installer| config.build_settings['CODE_SIGNING_ALLOWED'] = 'NO' end end -end \ No newline at end of file +end +