Error getting native address of native library: task_vision_jni_gms - android

I am trying to integrate a tensorflow-lite model in my android app. I have a simple model to differentiate between cats and dogs.I downloaded the required dataset from kaggle and used teachable machine website to train the model. Then I downloaded the model as tensorflow-lite and selected the quantized option. Below is my android code to detect the model.
class ObjectDetectorHelper(
var threshold: Float = 0.5f,
var numThreads: Int = 2,
var maxResults: Int = 1,
var currentDelegate: Int = 0,
var currentModel: Int = 0,
val context: Context,
val objectDetectorListener: DetectorListener
) {
private val TAG = "ObjectDetectionHelper"
// For this example this needs to be a var so it can be reset on changes. If the ObjectDetector
// will not change, a lazy val would be preferable.
private var objectDetector: ObjectDetector? = null
private var gpuSupported = false
init {
TfLiteGpu.isGpuDelegateAvailable(context).onSuccessTask { gpuAvailable: Boolean ->
val optionsBuilder =
TfLiteInitializationOptions.builder()
if (gpuAvailable) {
optionsBuilder.setEnableGpuDelegateSupport(true)
}
TfLiteVision.initialize(context, optionsBuilder.build())
}.addOnSuccessListener {
objectDetectorListener.onInitialized()
}.addOnFailureListener{
objectDetectorListener.onError("TfLiteVision failed to initialize: "
+ it.message)
}
}
fun clearObjectDetector() {
objectDetector = null
}
// Initialize the object detector using current settings on the
// thread that is using it. CPU and NNAPI delegates can be used with detectors
// that are created on the main thread and used on a background thread, but
// the GPU delegate needs to be used on the thread that initialized the detector
private fun setupObjectDetector() {
if (!TfLiteVision.isInitialized()) {
Log.e(TAG, "setupObjectDetector: TfLiteVision is not initialized yet")
return
}
// Create the base options for the detector using specifies max results and score threshold
val optionsBuilder =
ObjectDetector.ObjectDetectorOptions.builder()
.setScoreThreshold(threshold)
.setMaxResults(maxResults)
// Set general detection options, including number of used threads
val baseOptionsBuilder = BaseOptions.builder().setNumThreads(numThreads)
// Use the specified hardware for running the model. Default to CPU
when (currentDelegate) {
DELEGATE_CPU -> {
// Default
}
DELEGATE_GPU -> {
if (gpuSupported) {
baseOptionsBuilder.useGpu()
} else {
objectDetectorListener.onError("GPU is not supported on this device")
}
}
DELEGATE_NNAPI -> {
baseOptionsBuilder.useNnapi()
}
}
optionsBuilder.setBaseOptions(baseOptionsBuilder.build())
val modelName =
when (currentModel) {
MODEL_MOBILENETV1 -> "model.tflite"
MODEL_EFFICIENTDETV0 -> "model.tflite"
MODEL_EFFICIENTDETV1 -> "model.tflite"
MODEL_EFFICIENTDETV2 -> "model.tflite"
else -> "model.tflite"
}
try {
objectDetector =
ObjectDetector.createFromFileAndOptions(context, modelName, optionsBuilder.build())
} catch (e: Exception) {
objectDetectorListener.onError(
"Object detector failed to initialize. See error logs for details"
)
Log.e(TAG, "TFLite failed to load model with error: " + e.message)
}
}
fun detect(image: Bitmap, imageRotation: Int) {
Log.i("resultssss","9")
if (!TfLiteVision.isInitialized()) {
Log.e(TAG, "detect: TfLiteVision is not initialized yet")
return
}
Log.i("resultssss","10")
if (objectDetector == null) {
setupObjectDetector()
}
Log.i("resultssss","11")
// Inference time is the difference between the system time at the start and finish of the
// process
var inferenceTime = SystemClock.uptimeMillis()
Log.i("resultssss","12")
// Create preprocessor for the image.
// See https://www.tensorflow.org/lite/inference_with_metadata/
// lite_support#imageprocessor_architecture
val imageProcessor = ImageProcessor.Builder().add(Rot90Op(-imageRotation / 90)).build()
Log.i("resultssss","13")
// Preprocess the image and convert it into a TensorImage for detection.
val tensorImage = imageProcessor.process(TensorImage.fromBitmap(image))
Log.i("resultssss","14")
val results = objectDetector?.detect(tensorImage)
Log.i("resultssss","15")
inferenceTime = SystemClock.uptimeMillis() - inferenceTime
Log.i("resultssss","16")
objectDetectorListener.onResults(
results,
inferenceTime,
tensorImage.height,
tensorImage.width)
}
interface DetectorListener {
fun onInitialized()
fun onError(error: String)
fun onResults(
results: MutableList<Detection>?,
inferenceTime: Long,
imageHeight: Int,
imageWidth: Int
)
}
companion object {
const val DELEGATE_CPU = 0
const val DELEGATE_GPU = 1
const val DELEGATE_NNAPI = 2
const val MODEL_MOBILENETV1 = 0
const val MODEL_EFFICIENTDETV0 = 1
const val MODEL_EFFICIENTDETV1 = 2
const val MODEL_EFFICIENTDETV2 = 3
}
}
class MainActivity : AppCompatActivity(), ObjectDetectorHelper.DetectorListener {
private lateinit var cameraExecutor: ExecutorService
private var mCameraProvider: ProcessCameraProvider? = null
private lateinit var viewFinder: PreviewView
private lateinit var objectDetectorHelper: ObjectDetectorHelper
private lateinit var bitmapBuffer: Bitmap
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
viewFinder = findViewById(R.id.viewFinder)
objectDetectorHelper = ObjectDetectorHelper(
context = this,
objectDetectorListener = this
)
}
private fun setUpCamera() {
if (allPermissionsGranted()) {
startCamera()
}
}
private fun detectObjects(image: ImageProxy) {
Log.i("resultssss", "5")
// Copy out RGB bits to the shared bitmap buffer
image.use { bitmapBuffer.copyPixelsFromBuffer(image.planes[0].buffer) }
Log.i("resultssss", "6")
val imageRotation = image.imageInfo.rotationDegrees
Log.i("resultssss", "7")
// Pass Bitmap and rotation to the object detector helper for processing and detection
objectDetectorHelper.detect(bitmapBuffer, imageRotation)
Log.i("resultssss", "8")
}
private fun startCamera() {
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener({
// Used to bind the lifecycle of cameras to the lifecycle owner
val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
mCameraProvider = cameraProvider
// Preview
val surfacePreview = Preview.Builder()
.setTargetAspectRatio(AspectRatio.RATIO_4_3)
.setTargetRotation(viewFinder.display.rotation)
.build()
.also {
it.setSurfaceProvider(viewFinder.surfaceProvider)
}
val imageAnalyzer =
ImageAnalysis.Builder()
.setTargetAspectRatio(AspectRatio.RATIO_4_3)
.setTargetRotation(viewFinder.display.rotation)
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.setOutputImageFormat(OUTPUT_IMAGE_FORMAT_RGBA_8888)
.build()
// The analyzer can then be assigned to the instance
.also {
Log.i("resultssss", "1")
it.setAnalyzer(cameraExecutor) { image ->
Log.i("resultssss", "2")
if (!::bitmapBuffer.isInitialized) {
Log.i("resultssss", "3")
// The image rotation and RGB image buffer are initialized only once
// the analyzer has started running
bitmapBuffer = Bitmap.createBitmap(
image.width,
image.height,
Bitmap.Config.ARGB_8888
)
}
Log.i("resultssss", "4")
detectObjects(image)
}
}
// Select back camera as a default
val cameraSelector = CameraSelector.DEFAULT_BACK_CAMERA
try {
// Unbind use cases before rebinding
cameraProvider.unbindAll()
// Bind use cases to camera
cameraProvider.bindToLifecycle(
this, cameraSelector, surfacePreview, imageAnalyzer
)
} catch (exc: Exception) {
Toast.makeText(this, exc.message, Toast.LENGTH_LONG).show()
}
}, ContextCompat.getMainExecutor(this))
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
baseContext, it
) == PackageManager.PERMISSION_GRANTED
}
override fun onDestroy() {
super.onDestroy()
objectDetectorHelper.clearObjectDetector()
cameraExecutor.shutdown()
}
companion object {
private const val REQUEST_CODE_PERMISSIONS = 10
private val REQUIRED_PERMISSIONS =
mutableListOf(
android.Manifest.permission.CAMERA
).toTypedArray()
}
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>, grantResults:
IntArray
) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults)
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
setUpCamera()
} else {
Toast.makeText(
this,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT
).show()
finish()
}
}
}
override fun onInitialized() {
if (allPermissionsGranted()) {
setUpCamera()
} else {
ActivityCompat.requestPermissions(
this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS
)
}
cameraExecutor = Executors.newSingleThreadExecutor()
}
override fun onError(error: String) {
runOnUiThread { Toast.makeText(this, error, Toast.LENGTH_SHORT).show() }
}
override fun onResults(
results: MutableList<Detection>?,
inferenceTime: Long,
imageHeight: Int,
imageWidth: Int
) {
runOnUiThread {
Log.i(
"resultssss",
"${results?.get(0)?.categories.toString()} ${results?.get(0)?.boundingBox.toString()}"
)
}
}
}
Complete error log is as follows
Error getting native address of native library: task_vision_jni_gms
java.lang.IllegalArgumentException: Error occurred when initializing ObjectDetector: Mobile SSD models are expected to have exactly 4 outputs, found 1
at org.tensorflow.lite.task.gms.vision.detector.ObjectDetector.initJniWithModelFdAndOptions(Native Method)
at org.tensorflow.lite.task.gms.vision.detector.ObjectDetector.zzb(Unknown Source:0)
at org.tensorflow.lite.task.gms.vision.detector.zzb.createHandle(org.tensorflow:tensorflow-lite-task-vision-play-services##0.4.2:4)
at org.tensorflow.lite.task.core.TaskJniUtils$1.createHandle(TaskJniUtils.java:70)
at org.tensorflow.lite.task.core.TaskJniUtils.createHandleFromLibrary(TaskJniUtils.java:91)
at org.tensorflow.lite.task.core.TaskJniUtils.createHandleFromFdAndOptions(TaskJniUtils.java:66)
at org.tensorflow.lite.task.gms.vision.detector.ObjectDetector.createFromFileAndOptions(org.tensorflow:tensorflow-lite-task-vision-play-services##0.4.2:2)
at com.affinidi.tfdemoone.ObjectDetectorHelper.setupObjectDetector(ObjectDetectorHelper.kt:104)
at com.affinidi.tfdemoone.ObjectDetectorHelper.detect(ObjectDetectorHelper.kt:121)
at com.affinidi.tfdemoone.MainActivity.detectObjects(MainActivity.kt:89)
at com.affinidi.tfdemoone.MainActivity.startCamera$lambda$4$lambda$3$lambda$2(MainActivity.kt:132)
at com.affinidi.tfdemoone.MainActivity.$r8$lambda$cwS3iJ069sufgGf-nT7H81EEGtQ(Unknown Source:0)
at com.affinidi.tfdemoone.MainActivity$$ExternalSyntheticLambda3.analyze(Unknown Source:2)
at androidx.camera.core.ImageAnalysis.lambda$setAnalyzer$2(ImageAnalysis.java:481)
at androidx.camera.core.ImageAnalysis$$ExternalSyntheticLambda2.analyze(Unknown Source:2)
at androidx.camera.core.ImageAnalysisAbstractAnalyzer.lambda$analyzeImage$0$androidx-camera-core-ImageAnalysisAbstractAnalyzer(ImageAnalysisAbstractAnalyzer.java:286)
at androidx.camera.core.ImageAnalysisAbstractAnalyzer$$ExternalSyntheticLambda1.run(Unknown Source:14)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1167)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:641)
at java.lang.Thread.run(Thread.java:920)

Checking model.tflite revealed that the model you trained is a classification model but you are using the ObjectDetector APIs.
Debugging,
# Load the TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(TFLITE_FILE_PATH)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Test the model on cat image
from PIL import Image
image = Image.open("/home/vijay/Downloads/cat.jpg")
input_shape = input_details[0]['shape']
interpreter.set_tensor(input_details[0]['index'], img[None,...])
interpreter.invoke()
#get output
output_data = interpreter.get_tensor(output_details[0]['index'])
#output_data [255, 0]--> idx 0 ---> cat
#checking the above for a dog photo
#output_data [0, 255]--> idx 1 ---> dog
As the error points out, you will get only one output in a classification model. So check the android example on how to handle classification problem: https://github.com/tensorflow/examples/tree/master/lite/examples/image_classification/android

Related

Display name is empty for tensor flow lite model generated using teachable machine

I am using teachable machine to do generate a tensorflow-lite model and using that model in my android app. The result from tensor flow lite model shows display name as empty. Below is a screenshot of my teachable-machine process. I have added Cats and Dogs as name though other values like index, label, score are displayed properly. Here is a sample of the output after detection
<Category "1" (displayName= score=0.99609375 index=1)>
Here is my code snippet
class ImageClassifierHelper(
var threshold: Float = 0.5f,
var numThreads: Int = 2,
var maxResults: Int = 2,
var currentDelegate: Int = 0,
var currentModel: Int = 0,
val context: Context,
val imageClassifierListener: ClassifierListener?
) {
private var imageClassifier: ImageClassifier? = null
init {
setupImageClassifier()
}
fun clearImageClassifier() {
imageClassifier = null
}
private fun setupImageClassifier() {
val optionsBuilder = ImageClassifier.ImageClassifierOptions.builder()
.setScoreThreshold(threshold)
.setMaxResults(maxResults)
val baseOptionsBuilder = BaseOptions.builder().setNumThreads(numThreads)
when (currentDelegate) {
DELEGATE_CPU -> {
// Default
}
DELEGATE_GPU -> {
if (CompatibilityList().isDelegateSupportedOnThisDevice) {
baseOptionsBuilder.useGpu()
} else {
imageClassifierListener?.onError("GPU is not supported on this device")
}
}
DELEGATE_NNAPI -> {
baseOptionsBuilder.useNnapi()
}
}
optionsBuilder.setBaseOptions(baseOptionsBuilder.build())
val modelName =
when (currentModel) {
MODEL_MOBILENETV1 -> "model.tflite"
MODEL_EFFICIENTNETV0 -> "model.tflite"
MODEL_EFFICIENTNETV1 -> "model.tflite"
MODEL_EFFICIENTNETV2 -> "model.tflite"
else -> "model.tflite"
}
try {
imageClassifier =
ImageClassifier.createFromFileAndOptions(context, modelName, optionsBuilder.build())
} catch (e: IllegalStateException) {
imageClassifierListener?.onError(
"Image classifier failed to initialize. See error logs for details"
)
Log.e(TAG, "TFLite failed to load model with error: " + e.message)
}
}
fun classify(image: Bitmap, rotation: Int) {
if (imageClassifier == null) {
setupImageClassifier()
}
// Inference time is the difference between the system time at the start and finish of the
// process
var inferenceTime = SystemClock.uptimeMillis()
// Create preprocessor for the image.
// See https://www.tensorflow.org/lite/inference_with_metadata/
// lite_support#imageprocessor_architecture
val imageProcessor =
ImageProcessor.Builder()
.build()
// Preprocess the image and convert it into a TensorImage for classification.
val tensorImage = imageProcessor.process(TensorImage.fromBitmap(image))
val imageProcessingOptions = ImageProcessingOptions.builder()
.setOrientation(getOrientationFromRotation(rotation))
.build()
val results = imageClassifier?.classify(tensorImage, imageProcessingOptions)
inferenceTime = SystemClock.uptimeMillis() - inferenceTime
imageClassifierListener?.onResults(
results,
inferenceTime
)
}
// Receive the device rotation (Surface.x values range from 0->3) and return EXIF orientation
// http://jpegclub.org/exif_orientation.html
private fun getOrientationFromRotation(rotation: Int) : ImageProcessingOptions.Orientation {
return when (rotation) {
Surface.ROTATION_270 ->
ImageProcessingOptions.Orientation.BOTTOM_RIGHT
Surface.ROTATION_180 ->
ImageProcessingOptions.Orientation.RIGHT_BOTTOM
Surface.ROTATION_90 ->
ImageProcessingOptions.Orientation.TOP_LEFT
else ->
ImageProcessingOptions.Orientation.RIGHT_TOP
}
}
interface ClassifierListener {
fun onError(error: String)
fun onResults(
results: List<Classifications>?,
inferenceTime: Long
)
}
companion object {
const val DELEGATE_CPU = 0
const val DELEGATE_GPU = 1
const val DELEGATE_NNAPI = 2
const val MODEL_MOBILENETV1 = 0
const val MODEL_EFFICIENTNETV0 = 1
const val MODEL_EFFICIENTNETV1 = 2
const val MODEL_EFFICIENTNETV2 = 3
private const val TAG = "ImageClassifierHelper"
}
}
I don't think issue is with my android app code but with the model generated from teachable machine.
The model only outputs as numbers, each of the unique number representing a category.
In the above example, you need to create a map that assigns a Category to display name: {'0': 'Cat', '1': 'dog'}.

How to stop the intent from opening multiple times after scaning QR code with camerax and image analysis use case?

I am creating a simple qr scanner using Camerax and google ML Kit. I am opening an intent after the string value is extracted from the QR code. The problem I'm facing is , the intent is opening multiple times. How do I resolve this?
The following is the setup for image analysis.DisplayQR intent will open after receiving string value inside QR code.
val imageAnalysis = ImageAnalysis.Builder()
.setTargetResolution(Size(640, 480))
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build()
imageAnalysis.setAnalyzer(
ContextCompat.getMainExecutor(this),
CodeAnalyzer(this, object : CallBackInterface {
override fun onSuccess(qrString: String?) {
imageAnalysis.clearAnalyzer()
Toast.makeText(this#ActivityQR,qrString,Toast.LENGTH_SHORT).show()
Log.d("rty",qrString.toString())
//the following intent is opening multiple times
val visitordetails =
Intent(this#ActivityQR, DisplayQR::class.java)
visitordetails.putExtra("VISITOR_QR", qrString)
startActivity(visitordetails)
}
override fun onFailed() {
}
})
)
cameraProvider.bindToLifecycle(this, selectedCamera, imageAnalysis, cameraPreview)
Code for analyzing the image
class CodeAnalyzer(context: Context, callBackInterface: CallBackInterface):imageAnalysis.Analyzer {
private val context: Context = context
private val callback: CallBackInterface = callBackInterface
#SuppressLint("UnsafeOptInUsageError")
override fun analyze(image: ImageProxy) {
var scanner: BarcodeScanner = BarcodeScanning.getClient()
val scannedIMage = image.image
if (scannedIMage != null) {
var scannedInputImage = InputImage.fromMediaImage(
scannedIMage,
image.imageInfo.rotationDegrees
)
scanner.process(scannedInputImage).addOnSuccessListener { barCodes ->
for (qrCode in barCodes) {
when (qrCode.valueType) {
Barcode.TYPE_TEXT -> {
val qrString: String? = qrCode.rawValue
if (qrString != null) {
callback.onSuccess(qrString) //Here I am calling the callback
}
}
}
}
}.addOnFailureListener {
}.addOnCompleteListener {
image.close()
}
}
}
}
Edit: Corrected activity name

ImageReader's onImageAvailable method doesn't call and preview shows only 8 frames in slow motion and freezes (Camera2)

I noticed strange behavior on Xiaomi Redmi Note 9 Pro. I tested the application on hundreds of phones but this problem appears only on this device and only when used ImageReader with YUV_420_888 format and 176*144 preview resolution (for example with 320 * 240 or JPEG or without ImageReader as capture surface everything works well). onImageAvailable method doesn't call, preview shows only 8 frames in slow motion and freezes, app slows down. onCaptureCompleted() in CameraCurrentParamsReceiver also calls only 8 times.
I get the smallest resolution by using getMinPreviewSize (176 * 144 for this Xiaomi phone).
const val PREVIEW_IMAGE_FORMAT = ImageFormat.YUV_420_888
const val IMAGE_READER_MAX_SIMULTANEOUS_IMAGES = 4
val previewCaptureCallback = CameraCurrentParamsReceiver(this)
private fun startPreview(cameraDevice: CameraDevice, cameraProperties: CameraProperties)
{
val imageReader = ImageReader.newInstance(cameraProperties.previewSize.width,
cameraProperties.previewSize.height,
PREVIEW_IMAGE_FORMAT,
IMAGE_READER_MAX_SIMULTANEOUS_IMAGES)
this.imageReader = imageReader
bufferedImageConverter = BufferedImageConverter(cameraProperties.previewSize.width, cameraProperties.previewSize.height)
val previewSurface = previewSurface
val previewSurfaceForCamera =
if (previewSurface != null)
{
if (previewSurface.isValid)
{
previewSurface
}
else
{
Log.w(TAG, "Invalid preview surface - camera preview display is not available")
null
}
}
else
{
null
}
val captureSurfaces = listOfNotNull(imageReader.surface, previewSurfaceForCamera)
cameraDevice.createCaptureSession(
captureSurfaces,
object : CameraCaptureSession.StateCallback()
{
override fun onConfigureFailed(cameraCaptureSession: CameraCaptureSession)
{
Log.e(TAG, "onConfigureFailed() cannot configure camera")
if (isCameraOpened(cameraDevice))
{
shutDown("onConfigureFailed")
}
}
override fun onConfigured(cameraCaptureSession: CameraCaptureSession)
{
Log.d(TAG, "onConfigured()")
if (!isCameraOpened(cameraDevice))
{
cameraCaptureSession.close()
shutDown("onConfigured.isCameraOpened")
return
}
captureSession = cameraCaptureSession
try
{
val request = cameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW)
captureSurfaces.forEach { request.addTarget(it) }
CameraPreviewRequestInitializer.initializePreviewRequest(request, cameraProperties, controlParams, isControlParamsStrict)
captureRequestBuilder = request
val previewCallback = PreviewFrameHandler(this#Camera2)
this#Camera2.previewFrameHandler = previewCallback
imageReader.setOnImageAvailableListener(previewCallback, previewCallback.backgroundHandler)
cameraCaptureSession.setRepeatingRequest(request.build(), previewCaptureCallback, null)
}
catch (ex: CameraAccessException)
{
Log.e(TAG, "onConfigured() failed with exception", ex)
shutDown("onConfigured.CameraAccessException")
}
}
},
null)
}
private fun chooseCamera(manager: CameraManager): CameraProperties?
{
val cameraIdList = manager.cameraIdList
if (cameraIdList.isEmpty())
{
return null
}
for (cameraId in cameraIdList)
{
val characteristics = manager.getCameraCharacteristics(cameraId)
val facing = characteristics.get(CameraCharacteristics.LENS_FACING)
if (facing != null && facing == CameraCharacteristics.LENS_FACING_BACK)
{
val minPreviewSize = getMinPreviewSize(characteristics)
if (minPreviewSize == null)
{
Log.e(TAG, "chooseCamera() Cannot determine the preview size")
return null
}
Log.d(TAG, "chooseCamera() chosen camera id: $cameraId, preview size: $minPreviewSize")
return CameraProperties(cameraId,
minPreviewSize,
characteristics)
}
}
return null
}
private fun getMinPreviewSize(characteristics: CameraCharacteristics): Size?
{
val map = characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)
if (map == null)
{
Log.e(TAG, "getMinPreviewSize() Map is empty")
return null
}
return map.getOutputSizes(Constants.Camera.PREVIEW_IMAGE_FORMAT)?.minBy { it.width * it.height }
}
PreviewFrameHandler and CameraCurrentParamsReceiver (previewCaptureCallback variable)
private class PreviewFrameHandler(private val parent: Camera2) : ImageReader.OnImageAvailableListener, Handler.Callback
{
val backgroundHandler: Handler
private val backgroundHandlerThread: HandlerThread = HandlerThread("Camera2.PreviewFrame.HandlerThread")
private val mainHandler: Handler = Handler(Looper.getMainLooper(), this)
/**
* Main thread.
*/
init
{
backgroundHandlerThread.start()
backgroundHandler = Handler(backgroundHandlerThread.looper)
}
fun shutDown()
{
backgroundHandlerThread.quit()
mainHandler.removeMessages(0)
}
override fun handleMessage(msg: Message?): Boolean
{
msg ?: return false
parent.cameraFrameListener.onFrame(msg.obj as RGBImage)
return true
}
/**
* Background thread.
*/
private val relativeTimestamp = RelativeTimestamp()
override fun onImageAvailable(reader: ImageReader)
{
var image: Image? = null
try
{
image = reader.acquireNextImage()
image ?: return
val rgbImage = parent.bufferedImageConverter?.convertYUV420spToRGB(image, relativeTimestamp.updateAndGetSeconds(image.timestamp))
rgbImage ?: return
mainHandler.sendMessage(mainHandler.obtainMessage(0, rgbImage))
}
catch (ex: Exception)
{
Log.e(TAG, "onImageAvailable()", ex)
}
finally
{
image?.close()
}
}
private class RelativeTimestamp
{
private var initialNanos = 0L
fun updateAndGetSeconds(currentNanos: Long): Double
{
if (initialNanos == 0L)
{
initialNanos = currentNanos
}
return nanosToSeconds(currentNanos - initialNanos)
}
}
}
/**
* Class used to read current camera params.
*/
private class CameraCurrentParamsReceiver(private val parent: Camera2) : CameraCaptureSession.CaptureCallback()
{
private var isExposureTimeExceptionLogged = false
private var isIsoExceptionLogged = false
override fun onCaptureSequenceAborted(session: CameraCaptureSession, sequenceId: Int)
{
}
override fun onCaptureCompleted(session: CameraCaptureSession, request: CaptureRequest, result: TotalCaptureResult)
{
try
{
val exposureTimeNanos = result.get(CaptureResult.SENSOR_EXPOSURE_TIME)
if (exposureTimeNanos != null)
{
parent.currentExposureTimeNanos = exposureTimeNanos
}
}
catch (ex: IllegalArgumentException)
{
if (!isExposureTimeExceptionLogged)
{
isExposureTimeExceptionLogged = true
}
}
try
{
val iso = result.get(CaptureResult.SENSOR_SENSITIVITY)
if (iso != null)
{
parent.currentIso = iso
}
}
catch (ex: IllegalArgumentException)
{
if (!isIsoExceptionLogged)
{
Log.i(TAG, "Cannot get current SENSOR_SENSITIVITY, exception: " + ex.message)
isIsoExceptionLogged = true
}
}
}
override fun onCaptureFailed(session: CameraCaptureSession, request: CaptureRequest, failure: CaptureFailure)
{
}
override fun onCaptureSequenceCompleted(session: CameraCaptureSession, sequenceId: Int, frameNumber: Long)
{
}
override fun onCaptureStarted(session: CameraCaptureSession, request: CaptureRequest, timestamp: Long, frameNumber: Long)
{
}
override fun onCaptureProgressed(session: CameraCaptureSession, request: CaptureRequest, partialResult: CaptureResult)
{
}
override fun onCaptureBufferLost(session: CameraCaptureSession, request: CaptureRequest, target: Surface, frameNumber: Long)
{
}
}
As I understand something is wrong with preview size but I cannot find correct way how to get this value and the strangest thing is that this problem appears only on this Xiaomi device. Any thoughts?
176x144 is sometimes a problematic resolution for devices. It's really only listed by camera devices because it's sometimes required for recording videos for MMS (multimedia text message) messages. These videos, frankly, look awful, but it's still frequently a requirement by cellular carriers that they work.
But on modern devices with 12 - 50 MP cameras, the camera hardware actually struggles to scale images down to 176x144 from the sensor full resolution (> 20x downscale!), so sometimes certain combinations of sizes can cause problems.
I'd generally recommend not using preview resolutions below 320x240, to minimize issues, and definitely not mix a 176x144 preview with a high-resolution still capture.

Issues with the latest CameraX and Barcode Scanning on some devices

So I'm working on an app that requires QR scanner as a main feature. Previously I was using camerax-alpha06 with Firebase ML vision 24.0.3 and they were working fine for months, no customer complaints about scanning issues.
Then about two weeks ago I had to change Firebase ML vision to MLKit barcode scanning (related to the Crashlytics migration - out of topic) and now some of the users who could scan in the previous version now could not. Some sample devices be Samsung Tab A7 (Android 5.1.1) and Vivo 1919 (Android 10)
This is my build.gradle section that involves this feature
def camerax_version = "1.0.0-beta11"
implementation "androidx.camera:camera-core:${camerax_version}"
implementation "androidx.camera:camera-camera2:${camerax_version}"
implementation "androidx.camera:camera-lifecycle:${camerax_version}"
implementation "androidx.camera:camera-view:1.0.0-alpha18"
implementation "androidx.camera:camera-extensions:1.0.0-alpha18"
implementation 'com.google.android.gms:play-services-mlkit-barcode-scanning:16.1.2'
This is my camera handler file
class ScanQRCameraViewHandler(
private val fragment: ScanQRDialogFragment,
private val previewView: PreviewView
) {
private val displayLayout get() = previewView
companion object {
private const val RATIO_4_3_VALUE = 4.0 / 3.0
private const val RATIO_16_9_VALUE = 16.0 / 9.0
}
private val analyzer = GMSMLKitAnalyzer(onFoundQR = { extractedString ->
fragment.verifyExtractedString(extractedString)
}, onNotFoundQR = {
resetStateToAllowNewImageStream()
})
private var cameraProviderFuture: ListenableFuture<ProcessCameraProvider>? = null
private var camera: Camera? = null
private var isAnalyzing = false
internal fun resetStateToAllowNewImageStream() {
isAnalyzing = false
}
internal fun setTorceEnable(isEnabled: Boolean) {
camera?.cameraControl?.enableTorch(isEnabled)
}
internal fun initCameraProviderIfHasNot() {
if (cameraProviderFuture == null) {
fragment.context?.let {
cameraProviderFuture = ProcessCameraProvider.getInstance(it)
val executor = ContextCompat.getMainExecutor(it)
cameraProviderFuture?.addListener({
bindPreview(cameraProviderFuture?.get(), executor)
}, executor)
}
}
}
private fun bindPreview(cameraProvider: ProcessCameraProvider?, executor: Executor) {
val metrics = DisplayMetrics().also { displayLayout.display.getRealMetrics(it) }
val screenAspectRatio = aspectRatio(metrics.widthPixels, metrics.heightPixels)
val preview = initPreview(screenAspectRatio)
val imageAnalyzer = createImageAnalyzer()
val imageAnalysis = createImageAnalysis(executor, imageAnalyzer, screenAspectRatio)
val cameraSelector = createCameraSelector()
cameraProvider?.unbindAll()
camera = cameraProvider?.bindToLifecycle(
fragment as LifecycleOwner,
cameraSelector, imageAnalysis, preview
)
}
private fun createCameraSelector(): CameraSelector {
return CameraSelector.Builder()
.requireLensFacing(CameraSelector.LENS_FACING_BACK)
.build()
}
private fun createImageAnalysis(
executor: Executor, imageAnalyzer: ImageAnalysis.Analyzer, screenAspectRatio: Int
): ImageAnalysis {
val rotation = displayLayout.rotation
val imageAnalysis = ImageAnalysis.Builder()
// .setTargetRotation(rotation.toInt())
// .setTargetAspectRatio(screenAspectRatio)
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build()
imageAnalysis.setAnalyzer(executor, imageAnalyzer)
return imageAnalysis
}
private fun createImageAnalyzer(): ImageAnalysis.Analyzer {
return ImageAnalysis.Analyzer {
isAnalyzing = true
analyzer.analyze(it)
}
}
private fun initPreview(screenAspectRatio: Int): Preview {
val preview: Preview = Preview.Builder()
//.setTargetResolution(Size(840, 840))
// .setTargetAspectRatio(screenAspectRatio)
// .setTargetRotation(displayLayout.rotation.toInt())
.build()
preview.setSurfaceProvider(previewView.surfaceProvider)
return preview
}
fun unbindAll() {
cameraProviderFuture?.get()?.unbindAll()
}
private fun aspectRatio(width: Int, height: Int): Int {
val previewRatio = width.coerceAtLeast(height).toDouble() / width.coerceAtMost(height)
if (kotlin.math.abs(previewRatio - RATIO_4_3_VALUE) <= kotlin.math.abs(previewRatio - RATIO_16_9_VALUE)) {
return AspectRatio.RATIO_4_3
}
return AspectRatio.RATIO_16_9
}
}
And my analyzer
internal class GMSMLKitAnalyzer(
private val onFoundQR: (String) -> Unit,
private val onNotFoundQR: () -> Unit
) :
ImageAnalysis.Analyzer {
private val options = BarcodeScannerOptions.Builder()
.setBarcodeFormats(Barcode.FORMAT_QR_CODE).build()
#SuppressLint("UnsafeExperimentalUsageError")
override fun analyze(imageProxy: ImageProxy) {
imageProxy.image?.let { mediaImage ->
val image = InputImage.fromMediaImage(mediaImage, imageProxy.imageInfo.rotationDegrees)
val scanner = BarcodeScanning.getClient(options)
CoroutineScope(Dispatchers.Main).launch {
val result = scanner.process(image).await()
result.result?.let { barcodes ->
barcodes.find { it.rawValue != null }?.rawValue?.let {
onFoundQR(it)
} ?: run { onNotFoundQR() }
}
imageProxy.close()
}
} ?: imageProxy.close()
}
}
The commented out lines are what I've tried to add and didn't help, some even caused issues on other (used-to-be-working) devices.
I am unsure if I misconfigure anything or not, so I would like any suggestions that would help me find the solution.
Thank you
P.S. This is my first post so if I've done anything wrong or missed something please advise.
BarcodeScanning does not work on some devices running with camera-camera2:1.0.0-beta08 version or later. You can use an earlier version of camera-camera2 to bypass this issue. For example:
See: https://developers.google.com/ml-kit/known-issues
We are working on fix internally in MLKit for the next SDK release.
Update your ML barcode scan plugin above 16.1.1
This issue was fixed in 'com.google.mlkit:barcode-scanning:16.1.1'

FaceDetector was not released with FaceDetector.release()

I am working with ML face detector: https://developers.google.com/ml-kit/vision/face-detection/android#kotlin_2
And I take an image from Camera2 and process it. But I constantly have the error "FaceDetector was not released with FaceDetector.release()"
More details:
W/FaceDetector: FaceDetector was not released with FaceDetector.release()
E/BufferQueueProducer: [ImageReader-960x1280f23m2-7166-0](id:1bfe00000000,api:4,p:386,c:7166) dequeueBuffer: BufferQueue has been abandoned
I had followed this tutorial in order to get the camera2: https://medium.com/#tylerwalker/integrating-camera2-api-on-android-feat-kotlin-4a4e65dc593f
Here is the code I tried to do for ML:
val realTimeOps = FaceDetectorOptions.Builder()
.setContourMode(FaceDetectorOptions.CONTOUR_MODE_ALL)
.build()
val detector = FaceDetection.getClient(realTimeOps)
imageReader.setOnImageAvailableListener({
imageReader.acquireLatestImage()?.let { image ->
val mlImage = InputImage.fromMediaImage(image, 0) // TODO change image for calculation
val result = detector.process(mlImage)
.addOnSuccessListener {faces ->
Log.d("photo", "Face found!")
}
.addOnFailureListener { e ->
Log.d("photo", "Error: $e")
}
image.close()
}
}, Handler { true })
And also, my "detector" val has no function release :'(
I hope that someone can help me with that :)
I finally figured out how did this error happen!
There must be only one instance of the detector. And the error was in another part of my code (I am new to android and first time working with camera2):
cameraCharacteristics[CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP]?.let { streamConfigurationMap ->
streamConfigurationMap.getOutputSizes(ImageFormat.YUV_420_888)
?.let { yuvSizes ->
val previewSize = yuvSizes.last()
val displayRotation = windowManager.defaultDisplay.rotation
val swappedDimensions = areDimensionsSwapped(displayRotation, cameraCharacteristics)
val rotatedPreviewWidth = if (swappedDimensions) previewSize.height else previewSize.width
val rotatedPreviewHeight = if (swappedDimensions) previewSize.width else previewSize.height
surfaceView.holder.setFixedSize(rotatedPreviewWidth, rotatedPreviewHeight)
val imageReader = ImageReader.newInstance(rotatedPreviewWidth, rotatedPreviewHeight,
ImageFormat.YUV_420_888, 2)
val realTimeOps = FaceDetectorOptions.Builder()
.setContourMode(FaceDetectorOptions.CONTOUR_MODE_ALL)
.build()
val detector: FaceDetector = FaceDetection.getClient(realTimeOps)
imageReader.setOnImageAvailableListener({
imageReader.acquireLatestImage()?.let { image ->
val mlImage = InputImage.fromMediaImage(image, getRotationCompensation(cameraDevice.id, getInstance(), true))
val result = detector.process(mlImage)
.addOnSuccessListener {faces ->
if (faces.size > 0)
Log.d("photo", "Face found!")
else
Log.d("photo", "No face have been found")
}
.addOnFailureListener { e ->
Log.d("photo", "Error: $e")
}
.addOnCompleteListener {
image.close()
}
}
}, Handler { true })
val previewSurface = surfaceView.holder.surface
val recordingSurface = imageReader.surface
val captureCallback = object : CameraCaptureSession.StateCallback() {
override fun onConfigureFailed(session: CameraCaptureSession) {
}
override fun onConfigured(session: CameraCaptureSession) {
val previewRequestBuilder = cameraDevice.createCaptureRequest(
TEMPLATE_PREVIEW
).apply {
addTarget(previewSurface)
addTarget(recordingSurface)
}
session.setRepeatingRequest(
previewRequestBuilder.build(),
object: CameraCaptureSession.CaptureCallback() {},
Handler { true }
)
}
}
cameraDevice.createCaptureSession(mutableListOf(previewSurface, recordingSurface), captureCallback, Handler {true})
}
I put the creation of the FaceDetector inside of the cameraCharacteristics, but this function was called everytime an image was captured. I needed to put the creation of the FaceDector outside of this one (so obviously since he want only one instance, google is yelling at me).
Now I got it at the begining of the listener "onOpened" of the CameraDevice.StateCallback().
I think the best would be to put the detector at null as a private attribute of the class, then load it in the listener of onOpened() and close it at the listener of onDisconnected().

Categories

Resources