A cross-platform Subject Segmentation app built with Compose Multiplatform (CMP), designed to deliver a unified UI and shared business logic for both Android and iOS. This open-source project demonstrates how to perform subject/background segmentation using platform-specific ML frameworks while maintaining a shared UI layer.
The app requests camera permissions and allows users to open the device camera directly from the CMP UI.
Before capturing, the app performs an alignment check (e.g., tilt/level indicators) to ensure the device is held correctly. The capture button is only enabled when the device is stable and aligned.
Once aligned, the user captures an image which is then passed to the segmentation model.
Segmentation is done using the best native ML tools on each platform:
-
Android: Google ML Kit (Subject Segmentation API)
-
IOS: Apple Vision Framework (VNGenerateForegroundInstanceMaskRequest)
Each platform handles conversion and processing natively, while the results are displayed through CMP.
UI is fully written in Compose Multiplatform, allowing the same design and interactions for Android and iOS.
android-trim.mp4
ScreenRecording_11-13-2025.12-19-25_1.mp4
Screen.Recording.2025-11-13.at.12.mp4
expect fun subjectSegmentation(imageBitmap: ImageBitmap, onResult: (ImageBitmap) -> Unit)fun subjectSegmentation(imageBitmap: ImageBitmap, onSegmentationResult: (ImageBitmap) -> Unit) {
val image: InputImage
try {
image = InputImage.fromBitmap(imageBitmap.asAndroidBitmap(), 0)
val subjectResultOptions = SubjectSegmenterOptions.SubjectResultOptions.Builder()
.enableSubjectBitmap()
.enableConfidenceMask()
.build()
val options = SubjectSegmenterOptions.Builder()
.enableMultipleSubjects(subjectResultOptions)
.build()
val segmenter = SubjectSegmentation.getClient(options)
segmenter.process(image)
.addOnSuccessListener { result ->
val subjects = result.subjects
Log.d("SegmentationInfo", "Detected ${subjects.size} subjects")
subjects.forEachIndexed { index, subject ->
val bitmap = subject.bitmap
Log.d("SegmentationInfo", "Subject $index bounds:")
if (bitmap != null) {
Log.d("SegmentationInfo", "Detected subjects Bitmap not null")
onSegmentationResult(bitmap.asImageBitmap())
}
}
}
.addOnFailureListener { e ->
Log.i("subjectSegmentation", "CameraXAndroidScreen: ${e.message}")
}
} catch (e: IOException) {
e.printStackTrace()
Log.i("subjectSegmentation", "subjectSegmentation: ${e.message}")
}
}@objc public class IOSSubjectSegmenter: NSObject {
private let context = CIContext()
@objc public func extractSubject(from image: UIImage, completion: @escaping (UIImage?) -> Void) {
guard let cgImage = image.cgImage else {
completion(nil)
return
}
let request = VNGenerateObjectnessBasedSaliencyImageRequest()
let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])
DispatchQueue.global().async {
do {
try handler.perform([request])
guard let observation = request.results?.first as? VNSaliencyImageObservation else {
DispatchQueue.main.async { completion(nil) }
return
}
let mask = observation.pixelBuffer
if let result = self.applyMask(on: image, mask: mask) {
DispatchQueue.main.async {
completion(result)
}
} else {
DispatchQueue.main.async { completion(nil) }
}
} catch {
print("Vision error:", error.localizedDescription)
DispatchQueue.main.async { completion(nil) }
}
}
}
private func applyMask(on image: UIImage, mask: CVPixelBuffer) -> UIImage? {
let ciImage = CIImage(cgImage: image.cgImage!)
let maskImage = CIImage(cvPixelBuffer: mask)
// Scale mask to image size
let scaledMask = maskImage.transformed(by: CGAffineTransform(
scaleX: ciImage.extent.width / maskImage.extent.width,
y: ciImage.extent.height / maskImage.extent.height
))
// Apply mask: foreground = original image, background = transparent
let masked = ciImage.applyingFilter("CIBlendWithMask", parameters: [
kCIInputMaskImageKey: scaledMask,
kCIInputBackgroundImageKey: CIImage(color: .clear).cropped(to: ciImage.extent)
])
if let cgResult = context.createCGImage(masked, from: masked.extent) {
return UIImage(cgImage: cgResult)
}
return nil
}
}
class IOSSubjectSegmenterWrapper: SubjectSegmenterProvider {
private let segmenter = IOSSubjectSegmenter()
func extractSubject(image: UIImage, completion: @escaping (UIImage?) -> Void) {
segmenter.extractSubject(from: image, completion: completion)
}
}- Compose Multiplatform (CMP)
- Google ML Kit (Android)
- Apple Vision / VisionKit (iOS, Swift native)
- Native camera integrations (CameraX on Android, AVFoundation on iOS)
Contributions are welcome! Please follow these steps:
- Fork the repository.
- Create a new branch (git checkout -b feature-branch).
- Commit your changes (git commit -m 'Add some feature').
- Push to the branch (git push origin feature-branch).
- Open a pull request.
|
Muhammad Waqas |