So I'm working on an app that requires QR scanner as a main feature. Previously I was using camerax-alpha06 with Firebase ML vision 24.0.3 and they were working fine for months, no customer complaints about scanning issues.
Then about two weeks ago I had to change Firebase ML vision to MLKit barcode scanning (related to the Crashlytics migration - out of topic) and now some of the users who could scan in the previous version now could not. Some sample devices be Samsung Tab A7 (Android 5.1.1) and Vivo 1919 (Android 10)
This is my build.gradle section that involves this feature
def camerax_version = "1.0.0-beta11"
implementation "androidx.camera:camera-core:${camerax_version}"
implementation "androidx.camera:camera-camera2:${camerax_version}"
implementation "androidx.camera:camera-lifecycle:${camerax_version}"
implementation "androidx.camera:camera-view:1.0.0-alpha18"
implementation "androidx.camera:camera-extensions:1.0.0-alpha18"
implementation 'com.google.android.gms:play-services-mlkit-barcode-scanning:16.1.2'
This is my camera handler file
class ScanQRCameraViewHandler(
private val fragment: ScanQRDialogFragment,
private val previewView: PreviewView
) {
private val displayLayout get() = previewView
companion object {
private const val RATIO_4_3_VALUE = 4.0 / 3.0
private const val RATIO_16_9_VALUE = 16.0 / 9.0
}
private val analyzer = GMSMLKitAnalyzer(onFoundQR = { extractedString ->
fragment.verifyExtractedString(extractedString)
}, onNotFoundQR = {
resetStateToAllowNewImageStream()
})
private var cameraProviderFuture: ListenableFuture<ProcessCameraProvider>? = null
private var camera: Camera? = null
private var isAnalyzing = false
internal fun resetStateToAllowNewImageStream() {
isAnalyzing = false
}
internal fun setTorceEnable(isEnabled: Boolean) {
camera?.cameraControl?.enableTorch(isEnabled)
}
internal fun initCameraProviderIfHasNot() {
if (cameraProviderFuture == null) {
fragment.context?.let {
cameraProviderFuture = ProcessCameraProvider.getInstance(it)
val executor = ContextCompat.getMainExecutor(it)
cameraProviderFuture?.addListener({
bindPreview(cameraProviderFuture?.get(), executor)
}, executor)
}
}
}
private fun bindPreview(cameraProvider: ProcessCameraProvider?, executor: Executor) {
val metrics = DisplayMetrics().also { displayLayout.display.getRealMetrics(it) }
val screenAspectRatio = aspectRatio(metrics.widthPixels, metrics.heightPixels)
val preview = initPreview(screenAspectRatio)
val imageAnalyzer = createImageAnalyzer()
val imageAnalysis = createImageAnalysis(executor, imageAnalyzer, screenAspectRatio)
val cameraSelector = createCameraSelector()
cameraProvider?.unbindAll()
camera = cameraProvider?.bindToLifecycle(
fragment as LifecycleOwner,
cameraSelector, imageAnalysis, preview
)
}
private fun createCameraSelector(): CameraSelector {
return CameraSelector.Builder()
.requireLensFacing(CameraSelector.LENS_FACING_BACK)
.build()
}
private fun createImageAnalysis(
executor: Executor, imageAnalyzer: ImageAnalysis.Analyzer, screenAspectRatio: Int
): ImageAnalysis {
val rotation = displayLayout.rotation
val imageAnalysis = ImageAnalysis.Builder()
// .setTargetRotation(rotation.toInt())
// .setTargetAspectRatio(screenAspectRatio)
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build()
imageAnalysis.setAnalyzer(executor, imageAnalyzer)
return imageAnalysis
}
private fun createImageAnalyzer(): ImageAnalysis.Analyzer {
return ImageAnalysis.Analyzer {
isAnalyzing = true
analyzer.analyze(it)
}
}
private fun initPreview(screenAspectRatio: Int): Preview {
val preview: Preview = Preview.Builder()
//.setTargetResolution(Size(840, 840))
// .setTargetAspectRatio(screenAspectRatio)
// .setTargetRotation(displayLayout.rotation.toInt())
.build()
preview.setSurfaceProvider(previewView.surfaceProvider)
return preview
}
fun unbindAll() {
cameraProviderFuture?.get()?.unbindAll()
}
private fun aspectRatio(width: Int, height: Int): Int {
val previewRatio = width.coerceAtLeast(height).toDouble() / width.coerceAtMost(height)
if (kotlin.math.abs(previewRatio - RATIO_4_3_VALUE) <= kotlin.math.abs(previewRatio - RATIO_16_9_VALUE)) {
return AspectRatio.RATIO_4_3
}
return AspectRatio.RATIO_16_9
}
}
And my analyzer
internal class GMSMLKitAnalyzer(
private val onFoundQR: (String) -> Unit,
private val onNotFoundQR: () -> Unit
) :
ImageAnalysis.Analyzer {
private val options = BarcodeScannerOptions.Builder()
.setBarcodeFormats(Barcode.FORMAT_QR_CODE).build()
#SuppressLint("UnsafeExperimentalUsageError")
override fun analyze(imageProxy: ImageProxy) {
imageProxy.image?.let { mediaImage ->
val image = InputImage.fromMediaImage(mediaImage, imageProxy.imageInfo.rotationDegrees)
val scanner = BarcodeScanning.getClient(options)
CoroutineScope(Dispatchers.Main).launch {
val result = scanner.process(image).await()
result.result?.let { barcodes ->
barcodes.find { it.rawValue != null }?.rawValue?.let {
onFoundQR(it)
} ?: run { onNotFoundQR() }
}
imageProxy.close()
}
} ?: imageProxy.close()
}
}
The commented out lines are what I've tried to add and didn't help, some even caused issues on other (used-to-be-working) devices.
I am unsure if I misconfigure anything or not, so I would like any suggestions that would help me find the solution.
Thank you
P.S. This is my first post so if I've done anything wrong or missed something please advise.
BarcodeScanning does not work on some devices running with camera-camera2:1.0.0-beta08 version or later. You can use an earlier version of camera-camera2 to bypass this issue. For example:
See: https://developers.google.com/ml-kit/known-issues
We are working on fix internally in MLKit for the next SDK release.
Update your ML barcode scan plugin above 16.1.1
This issue was fixed in 'com.google.mlkit:barcode-scanning:16.1.1'
Related
I am trying to integrate a tensorflow-lite model in my android app. I have a simple model to differentiate between cats and dogs.I downloaded the required dataset from kaggle and used teachable machine website to train the model. Then I downloaded the model as tensorflow-lite and selected the quantized option. Below is my android code to detect the model.
class ObjectDetectorHelper(
var threshold: Float = 0.5f,
var numThreads: Int = 2,
var maxResults: Int = 1,
var currentDelegate: Int = 0,
var currentModel: Int = 0,
val context: Context,
val objectDetectorListener: DetectorListener
) {
private val TAG = "ObjectDetectionHelper"
// For this example this needs to be a var so it can be reset on changes. If the ObjectDetector
// will not change, a lazy val would be preferable.
private var objectDetector: ObjectDetector? = null
private var gpuSupported = false
init {
TfLiteGpu.isGpuDelegateAvailable(context).onSuccessTask { gpuAvailable: Boolean ->
val optionsBuilder =
TfLiteInitializationOptions.builder()
if (gpuAvailable) {
optionsBuilder.setEnableGpuDelegateSupport(true)
}
TfLiteVision.initialize(context, optionsBuilder.build())
}.addOnSuccessListener {
objectDetectorListener.onInitialized()
}.addOnFailureListener{
objectDetectorListener.onError("TfLiteVision failed to initialize: "
+ it.message)
}
}
fun clearObjectDetector() {
objectDetector = null
}
// Initialize the object detector using current settings on the
// thread that is using it. CPU and NNAPI delegates can be used with detectors
// that are created on the main thread and used on a background thread, but
// the GPU delegate needs to be used on the thread that initialized the detector
private fun setupObjectDetector() {
if (!TfLiteVision.isInitialized()) {
Log.e(TAG, "setupObjectDetector: TfLiteVision is not initialized yet")
return
}
// Create the base options for the detector using specifies max results and score threshold
val optionsBuilder =
ObjectDetector.ObjectDetectorOptions.builder()
.setScoreThreshold(threshold)
.setMaxResults(maxResults)
// Set general detection options, including number of used threads
val baseOptionsBuilder = BaseOptions.builder().setNumThreads(numThreads)
// Use the specified hardware for running the model. Default to CPU
when (currentDelegate) {
DELEGATE_CPU -> {
// Default
}
DELEGATE_GPU -> {
if (gpuSupported) {
baseOptionsBuilder.useGpu()
} else {
objectDetectorListener.onError("GPU is not supported on this device")
}
}
DELEGATE_NNAPI -> {
baseOptionsBuilder.useNnapi()
}
}
optionsBuilder.setBaseOptions(baseOptionsBuilder.build())
val modelName =
when (currentModel) {
MODEL_MOBILENETV1 -> "model.tflite"
MODEL_EFFICIENTDETV0 -> "model.tflite"
MODEL_EFFICIENTDETV1 -> "model.tflite"
MODEL_EFFICIENTDETV2 -> "model.tflite"
else -> "model.tflite"
}
try {
objectDetector =
ObjectDetector.createFromFileAndOptions(context, modelName, optionsBuilder.build())
} catch (e: Exception) {
objectDetectorListener.onError(
"Object detector failed to initialize. See error logs for details"
)
Log.e(TAG, "TFLite failed to load model with error: " + e.message)
}
}
fun detect(image: Bitmap, imageRotation: Int) {
Log.i("resultssss","9")
if (!TfLiteVision.isInitialized()) {
Log.e(TAG, "detect: TfLiteVision is not initialized yet")
return
}
Log.i("resultssss","10")
if (objectDetector == null) {
setupObjectDetector()
}
Log.i("resultssss","11")
// Inference time is the difference between the system time at the start and finish of the
// process
var inferenceTime = SystemClock.uptimeMillis()
Log.i("resultssss","12")
// Create preprocessor for the image.
// See https://www.tensorflow.org/lite/inference_with_metadata/
// lite_support#imageprocessor_architecture
val imageProcessor = ImageProcessor.Builder().add(Rot90Op(-imageRotation / 90)).build()
Log.i("resultssss","13")
// Preprocess the image and convert it into a TensorImage for detection.
val tensorImage = imageProcessor.process(TensorImage.fromBitmap(image))
Log.i("resultssss","14")
val results = objectDetector?.detect(tensorImage)
Log.i("resultssss","15")
inferenceTime = SystemClock.uptimeMillis() - inferenceTime
Log.i("resultssss","16")
objectDetectorListener.onResults(
results,
inferenceTime,
tensorImage.height,
tensorImage.width)
}
interface DetectorListener {
fun onInitialized()
fun onError(error: String)
fun onResults(
results: MutableList<Detection>?,
inferenceTime: Long,
imageHeight: Int,
imageWidth: Int
)
}
companion object {
const val DELEGATE_CPU = 0
const val DELEGATE_GPU = 1
const val DELEGATE_NNAPI = 2
const val MODEL_MOBILENETV1 = 0
const val MODEL_EFFICIENTDETV0 = 1
const val MODEL_EFFICIENTDETV1 = 2
const val MODEL_EFFICIENTDETV2 = 3
}
}
class MainActivity : AppCompatActivity(), ObjectDetectorHelper.DetectorListener {
private lateinit var cameraExecutor: ExecutorService
private var mCameraProvider: ProcessCameraProvider? = null
private lateinit var viewFinder: PreviewView
private lateinit var objectDetectorHelper: ObjectDetectorHelper
private lateinit var bitmapBuffer: Bitmap
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
viewFinder = findViewById(R.id.viewFinder)
objectDetectorHelper = ObjectDetectorHelper(
context = this,
objectDetectorListener = this
)
}
private fun setUpCamera() {
if (allPermissionsGranted()) {
startCamera()
}
}
private fun detectObjects(image: ImageProxy) {
Log.i("resultssss", "5")
// Copy out RGB bits to the shared bitmap buffer
image.use { bitmapBuffer.copyPixelsFromBuffer(image.planes[0].buffer) }
Log.i("resultssss", "6")
val imageRotation = image.imageInfo.rotationDegrees
Log.i("resultssss", "7")
// Pass Bitmap and rotation to the object detector helper for processing and detection
objectDetectorHelper.detect(bitmapBuffer, imageRotation)
Log.i("resultssss", "8")
}
private fun startCamera() {
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener({
// Used to bind the lifecycle of cameras to the lifecycle owner
val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
mCameraProvider = cameraProvider
// Preview
val surfacePreview = Preview.Builder()
.setTargetAspectRatio(AspectRatio.RATIO_4_3)
.setTargetRotation(viewFinder.display.rotation)
.build()
.also {
it.setSurfaceProvider(viewFinder.surfaceProvider)
}
val imageAnalyzer =
ImageAnalysis.Builder()
.setTargetAspectRatio(AspectRatio.RATIO_4_3)
.setTargetRotation(viewFinder.display.rotation)
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.setOutputImageFormat(OUTPUT_IMAGE_FORMAT_RGBA_8888)
.build()
// The analyzer can then be assigned to the instance
.also {
Log.i("resultssss", "1")
it.setAnalyzer(cameraExecutor) { image ->
Log.i("resultssss", "2")
if (!::bitmapBuffer.isInitialized) {
Log.i("resultssss", "3")
// The image rotation and RGB image buffer are initialized only once
// the analyzer has started running
bitmapBuffer = Bitmap.createBitmap(
image.width,
image.height,
Bitmap.Config.ARGB_8888
)
}
Log.i("resultssss", "4")
detectObjects(image)
}
}
// Select back camera as a default
val cameraSelector = CameraSelector.DEFAULT_BACK_CAMERA
try {
// Unbind use cases before rebinding
cameraProvider.unbindAll()
// Bind use cases to camera
cameraProvider.bindToLifecycle(
this, cameraSelector, surfacePreview, imageAnalyzer
)
} catch (exc: Exception) {
Toast.makeText(this, exc.message, Toast.LENGTH_LONG).show()
}
}, ContextCompat.getMainExecutor(this))
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
baseContext, it
) == PackageManager.PERMISSION_GRANTED
}
override fun onDestroy() {
super.onDestroy()
objectDetectorHelper.clearObjectDetector()
cameraExecutor.shutdown()
}
companion object {
private const val REQUEST_CODE_PERMISSIONS = 10
private val REQUIRED_PERMISSIONS =
mutableListOf(
android.Manifest.permission.CAMERA
).toTypedArray()
}
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>, grantResults:
IntArray
) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults)
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
setUpCamera()
} else {
Toast.makeText(
this,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT
).show()
finish()
}
}
}
override fun onInitialized() {
if (allPermissionsGranted()) {
setUpCamera()
} else {
ActivityCompat.requestPermissions(
this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS
)
}
cameraExecutor = Executors.newSingleThreadExecutor()
}
override fun onError(error: String) {
runOnUiThread { Toast.makeText(this, error, Toast.LENGTH_SHORT).show() }
}
override fun onResults(
results: MutableList<Detection>?,
inferenceTime: Long,
imageHeight: Int,
imageWidth: Int
) {
runOnUiThread {
Log.i(
"resultssss",
"${results?.get(0)?.categories.toString()} ${results?.get(0)?.boundingBox.toString()}"
)
}
}
}
Complete error log is as follows
Error getting native address of native library: task_vision_jni_gms
java.lang.IllegalArgumentException: Error occurred when initializing ObjectDetector: Mobile SSD models are expected to have exactly 4 outputs, found 1
at org.tensorflow.lite.task.gms.vision.detector.ObjectDetector.initJniWithModelFdAndOptions(Native Method)
at org.tensorflow.lite.task.gms.vision.detector.ObjectDetector.zzb(Unknown Source:0)
at org.tensorflow.lite.task.gms.vision.detector.zzb.createHandle(org.tensorflow:tensorflow-lite-task-vision-play-services##0.4.2:4)
at org.tensorflow.lite.task.core.TaskJniUtils$1.createHandle(TaskJniUtils.java:70)
at org.tensorflow.lite.task.core.TaskJniUtils.createHandleFromLibrary(TaskJniUtils.java:91)
at org.tensorflow.lite.task.core.TaskJniUtils.createHandleFromFdAndOptions(TaskJniUtils.java:66)
at org.tensorflow.lite.task.gms.vision.detector.ObjectDetector.createFromFileAndOptions(org.tensorflow:tensorflow-lite-task-vision-play-services##0.4.2:2)
at com.affinidi.tfdemoone.ObjectDetectorHelper.setupObjectDetector(ObjectDetectorHelper.kt:104)
at com.affinidi.tfdemoone.ObjectDetectorHelper.detect(ObjectDetectorHelper.kt:121)
at com.affinidi.tfdemoone.MainActivity.detectObjects(MainActivity.kt:89)
at com.affinidi.tfdemoone.MainActivity.startCamera$lambda$4$lambda$3$lambda$2(MainActivity.kt:132)
at com.affinidi.tfdemoone.MainActivity.$r8$lambda$cwS3iJ069sufgGf-nT7H81EEGtQ(Unknown Source:0)
at com.affinidi.tfdemoone.MainActivity$$ExternalSyntheticLambda3.analyze(Unknown Source:2)
at androidx.camera.core.ImageAnalysis.lambda$setAnalyzer$2(ImageAnalysis.java:481)
at androidx.camera.core.ImageAnalysis$$ExternalSyntheticLambda2.analyze(Unknown Source:2)
at androidx.camera.core.ImageAnalysisAbstractAnalyzer.lambda$analyzeImage$0$androidx-camera-core-ImageAnalysisAbstractAnalyzer(ImageAnalysisAbstractAnalyzer.java:286)
at androidx.camera.core.ImageAnalysisAbstractAnalyzer$$ExternalSyntheticLambda1.run(Unknown Source:14)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1167)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:641)
at java.lang.Thread.run(Thread.java:920)
Checking model.tflite revealed that the model you trained is a classification model but you are using the ObjectDetector APIs.
Debugging,
# Load the TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(TFLITE_FILE_PATH)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test the model on cat image
from PIL import Image
image = Image.open("/home/vijay/Downloads/cat.jpg")
input_shape = input_details[0]['shape']
interpreter.set_tensor(input_details[0]['index'], img[None,...])
interpreter.invoke()
#get output
output_data = interpreter.get_tensor(output_details[0]['index'])
#output_data [255, 0]--> idx 0 ---> cat
#checking the above for a dog photo
#output_data [0, 255]--> idx 1 ---> dog
As the error points out, you will get only one output in a classification model. So check the android example on how to handle classification problem: https://github.com/tensorflow/examples/tree/master/lite/examples/image_classification/android
I am creating a simple qr scanner using Camerax and google ML Kit. I am opening an intent after the string value is extracted from the QR code. The problem I'm facing is , the intent is opening multiple times. How do I resolve this?
The following is the setup for image analysis.DisplayQR intent will open after receiving string value inside QR code.
val imageAnalysis = ImageAnalysis.Builder()
.setTargetResolution(Size(640, 480))
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build()
imageAnalysis.setAnalyzer(
ContextCompat.getMainExecutor(this),
CodeAnalyzer(this, object : CallBackInterface {
override fun onSuccess(qrString: String?) {
imageAnalysis.clearAnalyzer()
Toast.makeText(this#ActivityQR,qrString,Toast.LENGTH_SHORT).show()
Log.d("rty",qrString.toString())
//the following intent is opening multiple times
val visitordetails =
Intent(this#ActivityQR, DisplayQR::class.java)
visitordetails.putExtra("VISITOR_QR", qrString)
startActivity(visitordetails)
}
override fun onFailed() {
}
})
)
cameraProvider.bindToLifecycle(this, selectedCamera, imageAnalysis, cameraPreview)
Code for analyzing the image
class CodeAnalyzer(context: Context, callBackInterface: CallBackInterface):imageAnalysis.Analyzer {
private val context: Context = context
private val callback: CallBackInterface = callBackInterface
#SuppressLint("UnsafeOptInUsageError")
override fun analyze(image: ImageProxy) {
var scanner: BarcodeScanner = BarcodeScanning.getClient()
val scannedIMage = image.image
if (scannedIMage != null) {
var scannedInputImage = InputImage.fromMediaImage(
scannedIMage,
image.imageInfo.rotationDegrees
)
scanner.process(scannedInputImage).addOnSuccessListener { barCodes ->
for (qrCode in barCodes) {
when (qrCode.valueType) {
Barcode.TYPE_TEXT -> {
val qrString: String? = qrCode.rawValue
if (qrString != null) {
callback.onSuccess(qrString) //Here I am calling the callback
}
}
}
}
}.addOnFailureListener {
}.addOnCompleteListener {
image.close()
}
}
}
}
Edit: Corrected activity name
I am working with ML face detector: https://developers.google.com/ml-kit/vision/face-detection/android#kotlin_2
And I take an image from Camera2 and process it. But I constantly have the error "FaceDetector was not released with FaceDetector.release()"
More details:
W/FaceDetector: FaceDetector was not released with FaceDetector.release()
E/BufferQueueProducer: [ImageReader-960x1280f23m2-7166-0](id:1bfe00000000,api:4,p:386,c:7166) dequeueBuffer: BufferQueue has been abandoned
I had followed this tutorial in order to get the camera2: https://medium.com/#tylerwalker/integrating-camera2-api-on-android-feat-kotlin-4a4e65dc593f
Here is the code I tried to do for ML:
val realTimeOps = FaceDetectorOptions.Builder()
.setContourMode(FaceDetectorOptions.CONTOUR_MODE_ALL)
.build()
val detector = FaceDetection.getClient(realTimeOps)
imageReader.setOnImageAvailableListener({
imageReader.acquireLatestImage()?.let { image ->
val mlImage = InputImage.fromMediaImage(image, 0) // TODO change image for calculation
val result = detector.process(mlImage)
.addOnSuccessListener {faces ->
Log.d("photo", "Face found!")
}
.addOnFailureListener { e ->
Log.d("photo", "Error: $e")
}
image.close()
}
}, Handler { true })
And also, my "detector" val has no function release :'(
I hope that someone can help me with that :)
I finally figured out how did this error happen!
There must be only one instance of the detector. And the error was in another part of my code (I am new to android and first time working with camera2):
cameraCharacteristics[CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP]?.let { streamConfigurationMap ->
streamConfigurationMap.getOutputSizes(ImageFormat.YUV_420_888)
?.let { yuvSizes ->
val previewSize = yuvSizes.last()
val displayRotation = windowManager.defaultDisplay.rotation
val swappedDimensions = areDimensionsSwapped(displayRotation, cameraCharacteristics)
val rotatedPreviewWidth = if (swappedDimensions) previewSize.height else previewSize.width
val rotatedPreviewHeight = if (swappedDimensions) previewSize.width else previewSize.height
surfaceView.holder.setFixedSize(rotatedPreviewWidth, rotatedPreviewHeight)
val imageReader = ImageReader.newInstance(rotatedPreviewWidth, rotatedPreviewHeight,
ImageFormat.YUV_420_888, 2)
val realTimeOps = FaceDetectorOptions.Builder()
.setContourMode(FaceDetectorOptions.CONTOUR_MODE_ALL)
.build()
val detector: FaceDetector = FaceDetection.getClient(realTimeOps)
imageReader.setOnImageAvailableListener({
imageReader.acquireLatestImage()?.let { image ->
val mlImage = InputImage.fromMediaImage(image, getRotationCompensation(cameraDevice.id, getInstance(), true))
val result = detector.process(mlImage)
.addOnSuccessListener {faces ->
if (faces.size > 0)
Log.d("photo", "Face found!")
else
Log.d("photo", "No face have been found")
}
.addOnFailureListener { e ->
Log.d("photo", "Error: $e")
}
.addOnCompleteListener {
image.close()
}
}
}, Handler { true })
val previewSurface = surfaceView.holder.surface
val recordingSurface = imageReader.surface
val captureCallback = object : CameraCaptureSession.StateCallback() {
override fun onConfigureFailed(session: CameraCaptureSession) {
}
override fun onConfigured(session: CameraCaptureSession) {
val previewRequestBuilder = cameraDevice.createCaptureRequest(
TEMPLATE_PREVIEW
).apply {
addTarget(previewSurface)
addTarget(recordingSurface)
}
session.setRepeatingRequest(
previewRequestBuilder.build(),
object: CameraCaptureSession.CaptureCallback() {},
Handler { true }
)
}
}
cameraDevice.createCaptureSession(mutableListOf(previewSurface, recordingSurface), captureCallback, Handler {true})
}
I put the creation of the FaceDetector inside of the cameraCharacteristics, but this function was called everytime an image was captured. I needed to put the creation of the FaceDector outside of this one (so obviously since he want only one instance, google is yelling at me).
Now I got it at the begining of the listener "onOpened" of the CameraDevice.StateCallback().
I think the best would be to put the detector at null as a private attribute of the class, then load it in the listener of onOpened() and close it at the listener of onDisconnected().
I try to use new CameraX api and I got this error: When I capture the image this image stored with wrong rotation. For example I capture in portrait orientation but result image in landscape orientation.
There is my code:
private fun startCamera() {
val previewConfig = PreviewConfig.Builder().apply {
setTargetResolution(Size(textureView.width, textureView.height))
setTargetRotation(textureView.display.rotation)
}.build()
val imageCaptureConfig = ImageCaptureConfig.Builder().apply {
setCaptureMode(CaptureMode.MIN_LATENCY)
setTargetAspectRatio(RATIO_4_3)
setTargetRotation(textureView.display.rotation)
}.build()
imageCapture = ImageCapture(imageCaptureConfig)
val preview = Preview(previewConfig)
preview.setOnPreviewOutputUpdateListener { previewOutput ->
removeView(textureView)
addViewMatchParent(textureView, position = 0)
textureView.surfaceTexture = previewOutput.surfaceTexture
textureView.updateTransformForCameraFinderView()
}
(context as? LifecycleOwner)?.let { lifecycleOwner ->
CameraX.bindToLifecycle(lifecycleOwner, preview, imageCapture)
}
}
private fun capturePhoto() {
tempImageFile = generateTmpFile(false)
val executor = Executor { it.run() }
imageCapture.takePicture(tempImageFile!!, executor, object : OnImageSavedListener {
override fun onError(error: ImageCaptureError, message: String, exc: Throwable?) {
exc?.printStackTrace()
}
override fun onImageSaved(photoFile: File) {
post {
// load image into ImageView by Glide
showCapturedPhotoPreview(photoFile)
}
}
})
}
Please give me advise how can I fix it?
P.S. I tried to find solution so don't copy-paste first looking like something similar)
Update: I tried to do my CameraView like in this sample but in their case it works, in my - no)
Try it:
val imageCaptureConfig = ImageCaptureConfig.Builder().apply {
setCaptureMode(CaptureMode.MIN_LATENCY)
setTargetAspectRatio(RATIO_4_3)
// play with this line!
setTargetRotation(Surface.ROTATION_0)
setTargetRotation(textureView.display.rotation)
}.build()
I select line that fixes my problem again:
setTargetRotation(Surface.ROTATION_0)
I am trying to implement face detection using Firebase MLKit and CameraX ImageAnalysis. It works fine when using back camera, but when i tried with front camera, it detected nothing:
val config = PreviewConfig.Builder()
.setLensFacing(CameraX.LensFacing.FRONT)
.build()
val previewUseCase = Preview(config)
previewUseCase.setOnPreviewOutputUpdateListener { previewOutput ->
viewFinder.post {
removeView(viewFinder)
addView(viewFinder, 0)
viewFinder.surfaceTexture = previewOutput.surfaceTexture
updateTransform(previewOutput)
}
}
val highAccuracyOpts = FirebaseVisionFaceDetectorOptions.Builder()
.setPerformanceMode(FirebaseVisionFaceDetectorOptions.ACCURATE)
.build()
val detector = FirebaseVision.getInstance().getVisionFaceDetector(highAccuracyOpts)
val imageAnalysisConfig = ImageAnalysisConfig.Builder()
.setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE)
.build()
val imageAnalysis = ImageAnalysis(imageAnalysisConfig).apply {
setAnalyzer(
Executors.newSingleThreadExecutor(),
ImageAnalysis.Analyzer { image, rotationDegrees ->
if (image.image != null && isBusy.compareAndSet(false, true)) {
val visionImage = FirebaseVisionImage.fromMediaImage(image.image!!, degreesToFirebaseRotation(rotationDegrees))
detector.detectInImage(visionImage)
.addOnSuccessListener { faces ->
// faces.size always zero when using front camera
Timber.d("${faces.size}")
isBusy.set(false)
}
.addOnFailureListener { error ->
Timber.d("$error")
}
}
})
}
CameraX.bindToLifecycle(lifecycleOwner, previewUseCase, imageAnalysis)
I tested on Nokia 8.1 with Android 10. I tried https://github.com/firebase/quickstart-android/tree/master/mlkit which does not use CameraX and it works fine with front camera.
Solved it by setting lensfacing for ImageAnalysis to CameraX.LensFacing.FRONT:
val imageAnalysisConfig = ImageAnalysisConfig.Builder()
.setLensFacing(CameraX.LensFacing.FRONT)
.setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE)
.build()