MainActivity.kt
package com.memex.eu
import android.Manifest
import android.app.Activity
import android.app.ActivityManager
import android.content.pm.PackageManager
import android.graphics.Bitmap
import android.net.Uri
import android.os.Build
import android.os.Bundle
import android.util.Log
import android.util.Size
import android.view.Gravity
import android.view.MotionEvent
import android.widget.Toast
import androidx.appcompat.app.AppCompatActivity
import androidx.camera.core.CameraSelector
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.Preview
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import com.google.ar.core.HitResult
import com.google.ar.core.Plane
import com.google.ar.sceneform.AnchorNode
import com.google.ar.sceneform.rendering.ModelRenderable
import com.google.ar.sceneform.ux.ArFragment
import com.google.ar.sceneform.ux.TransformableNode
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.objects.ObjectDetection
import com.google.mlkit.vision.objects.ObjectDetector
import com.google.mlkit.vision.objects.defaults.ObjectDetectorOptions
import kotlinx.android.synthetic.main.activity_main.*
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.function.Consumer
import java.util.function.Function
class MainActivity : AppCompatActivity() {
private lateinit var cameraExecutor: ExecutorService
private val TAG: String = MainActivity::class.java.getSimpleName()
private var arFragment: ArFragment? = null
private var andyRenderable: ModelRenderable? = null
private lateinit var options: ObjectDetectorOptions
private lateinit var objectDetector: ObjectDetector
private lateinit var analyzer: MyImageAnalyzer
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
// Request camera permissions
if (allPermissionsGranted()) {
startCamera()
} else {
ActivityCompat.requestPermissions(
this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS
)
}
cameraExecutor = Executors.newSingleThreadExecutor()
if (!checkIsSupportedDeviceOrFinish(this)) {
return
}
arFragment = supportFragmentManager.findFragmentById(R.id.sceneform_fragment) as ArFragment?
// When you build a Renderable, Sceneform loads its resources in the background while returning
// a CompletableFuture. Call thenAccept(), handle(), or check isDone() before calling get().
ModelRenderable.builder()
.setSource(this, R.raw.andy)
.build()
.thenAccept(Consumer { renderable: ModelRenderable? ->
andyRenderable = renderable
})
.exceptionally(
Function<Throwable, Void?> { throwable: Throwable? ->
val toast =
Toast.makeText(this, "Unable to load andy renderable", Toast.LENGTH_LONG)
toast.setGravity(Gravity.CENTER, 0, 0)
toast.show()
null
})
arFragment!!.setOnTapArPlaneListener { hitResult: HitResult, plane: Plane?, motionEvent: MotionEvent? ->
if (andyRenderable == null) {
return#setOnTapArPlaneListener
}
// Create the Anchor.
val anchor = hitResult.createAnchor()
val anchorNode =
AnchorNode(anchor)
anchorNode.setParent(arFragment!!.arSceneView.scene)
// Create the transformable andy and add it to the anchor.
val andy =
TransformableNode(arFragment!!.transformationSystem)
andy.setParent(anchorNode)
andy.renderable = andyRenderable
andy.select()
}
// Live detection and tracking
options = ObjectDetectorOptions.Builder()
.setDetectorMode(ObjectDetectorOptions.STREAM_MODE)
.enableClassification() // Optional
.build()
objectDetector = ObjectDetection.getClient(options)
#androidx.camera.core.ExperimentalGetImage
analyzer = MyImageAnalyzer()
}
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>, grantResults:
IntArray
) {
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
startCamera()
} else {
Toast.makeText(
this,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT
).show()
finish()
}
}
}
override fun onDestroy() {
super.onDestroy()
cameraExecutor.shutdown()
}
private fun startCamera() {
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener(Runnable {
// Used to bind the lifecycle of cameras to the lifecycle owner
val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
// Preview
val preview = Preview.Builder()
.build()
.also {
it.setSurfaceProvider(viewFinder.createSurfaceProvider())
}
val imageAnalyzer = ImageAnalysis.Builder()
.setTargetResolution(Size(1280, 720))
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build()
.also {
it.setAnalyzer(cameraExecutor, analyzer)
}
// Select back camera as a default
val cameraSelector = CameraSelector.DEFAULT_BACK_CAMERA
try {
// Unbind use cases before rebinding
cameraProvider.unbindAll()
// Bind use cases to camera
cameraProvider.bindToLifecycle(
this, cameraSelector, preview, imageAnalyzer)
} catch(exc: Exception) {
Log.e(TAG, "Use case binding failed", exc)
}
}, ContextCompat.getMainExecutor(this))
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
baseContext, it
) == PackageManager.PERMISSION_GRANTED
}
companion object {
private const val TAG = "MainActivity"
private const val REQUEST_CODE_PERMISSIONS = 10
private val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
private const val MIN_OPENGL_VERSION = 3.0
/**
* Returns false and displays an error message if Sceneform can not run, true if Sceneform can run
* on this device.
*
*
* Sceneform requires Android N on the device as well as OpenGL 3.0 capabilities.
*
*
* Finishes the activity if Sceneform can not run
*/
fun checkIsSupportedDeviceOrFinish(activity: Activity): Boolean {
val openGlVersionString =
(activity.getSystemService(ACTIVITY_SERVICE) as ActivityManager)
.deviceConfigurationInfo
.glEsVersion
if (openGlVersionString.toDouble() < MIN_OPENGL_VERSION) {
Log.e(TAG, "Sceneform requires OpenGL ES 3.0 later")
Toast.makeText(
activity,
"Sceneform requires OpenGL ES 3.0 or later",
Toast.LENGTH_LONG
)
.show()
activity.finish()
return false
}
return true
}
}
}
MyImageAnalyzer.kt
package com.memex.eu
import android.util.Log
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.ImageProxy
import androidx.fragment.app.FragmentManager
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.objects.DetectedObject
import com.google.mlkit.vision.objects.ObjectDetection
import com.google.mlkit.vision.objects.defaults.ObjectDetectorOptions
import com.google.mlkit.vision.objects.defaults.PredefinedCategory
#androidx.camera.core.ExperimentalGetImage
class MyImageAnalyzer : ImageAnalysis.Analyzer{
// Live detection and tracking
val options = ObjectDetectorOptions.Builder()
.setDetectorMode(ObjectDetectorOptions.STREAM_MODE)
.enableClassification() // Optional
.build()
val objectDetector = ObjectDetection.getClient(options)
private val TAG: String = MyImageAnalyzer::class.java.getSimpleName()
override fun analyze(imageProxy: ImageProxy) {
val mediaImage = imageProxy.image
Log.e(TAG, "DETECTIONS")
if (mediaImage != null) {
val image = InputImage.fromMediaImage(mediaImage, imageProxy.imageInfo.rotationDegrees)
objectDetector.process(image)
.addOnCompleteListener {
imageProxy.close()
if (it.isSuccessful) {
displayDets(it.result as List<DetectedObject>)
} else {
it.exception?.printStackTrace()
}
}
}
imageProxy.close()
}
private fun displayDets(results: List<DetectedObject>) {
for (detectedObject in results) {
val boundingBox = detectedObject.boundingBox
val trackingId = detectedObject.trackingId
for (label in detectedObject.labels) {
val text = label.text
if (PredefinedCategory.FOOD == text) {
Log.e(TAG, text)
}
val index = label.index
if (PredefinedCategory.FOOD_INDEX == index) {
Log.e(TAG, text)
}
val confidence = label.confidence
Log.e(TAG, confidence.toString())
Log.e(TAG, trackingId.toString())
Log.e(TAG, boundingBox.toString())
}
}
}
}
But in my console, I see none of the Log statements from ImageAnalysis.Analyzer. What's going on? Why is ImageAnalysis.Analyzer not being called? I am printing out the log statements first before I eventually draw the bounding boxes. However, the analyze function of ImageAnalysis.Analyzer is not being called at all. How do I tell camerax to call that function?
Related
I followed a tutorial on Camerax from the Android Studio website, and I wanted the camera button when pressed to take a picture from the front and the back camera, the problem is only the second function is called.
Ctrl + F THE PROBLEM IS HERE to find the two functions.
MainActivity.kt file:
package com.android.example.cameraxapp
import androidx.appcompat.app.AppCompatActivity
import android.os.Bundle
import android.Manifest
import android.content.pm.PackageManager
import android.util.Log
import android.widget.Toast
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import java.util.concurrent.Executors
import androidx.camera.core.*
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.camera.video.*
import androidx.camera.video.VideoCapture
import androidx.core.content.PermissionChecker
import java.nio.ByteBuffer
import java.text.SimpleDateFormat
import java.util.*
import java.util.concurrent.ExecutorService
import android.provider.MediaStore
import android.content.ContentValues
import android.os.Build
import androidx.camera.view.PreviewView
import com.nasro.camerax.R
import com.nasro.camerax.databinding.ActivityMainBinding
typealias LumaListener = (luma: Double) -> Unit
class MainActivity : AppCompatActivity() {
lateinit var viewBinding: ActivityMainBinding
lateinit var cameraSelector: CameraSelector
var imageCapture: ImageCapture? = null
var videoCapture: VideoCapture<Recorder>? = null
var recording: Recording? = null
lateinit var cameraExecutor: ExecutorService
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
viewBinding = ActivityMainBinding.inflate(layoutInflater)
setContentView(viewBinding.root)
// Request camera permissions
if (allPermissionsGranted()) {
Toast.makeText(baseContext, "Ready", Toast.LENGTH_SHORT).show()
} else {
ActivityCompat.requestPermissions(
this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS)
}
// Set up the listeners for take photo and video capture buttons
viewBinding.imageCaptureButton.setOnClickListener {start()}
viewBinding.videoCaptureButton.setOnClickListener {
}
cameraExecutor = Executors.newSingleThreadExecutor()
}
fun captureVideo() {}
fun start() {
//THE PROBLEM IS HERE, THE PROBLEM IS HERE, THE PROBLEM IS HERE.
startCamera(CameraSelector.DEFAULT_FRONT_CAMERA)
startCamera(CameraSelector.DEFAULT_BACK_CAMERA)
}
private fun startCamera(pizza:CameraSelector) {
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener({
// Used to bind the lifecycle of cameras to the lifecycle owner
val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
// Preview
val preview = Preview.Builder()
.build()
.also {
it.setSurfaceProvider(findViewById<PreviewView>(R.id.viewFinder).surfaceProvider)
}
imageCapture = ImageCapture.Builder()
.build()
try {
// Unbind use cases before rebinding
cameraProvider.unbindAll()
// Bind use cases to camera
cameraProvider.bindToLifecycle(
this, pizza, preview, imageCapture)
} catch(exc: Exception) {
Log.e(TAG, "Use case binding failed", exc)
}
takePhoto()
}, ContextCompat.getMainExecutor(this))
}
fun takePhoto() {
// Get a stable reference of the modifiable image capture use case
var imageCapture = imageCapture ?: return
// Create time stamped name and MediaStore entry.
val name = SimpleDateFormat(FILENAME_FORMAT, Locale.US)
.format(System.currentTimeMillis())
val contentValues = ContentValues().apply {
put(MediaStore.MediaColumns.DISPLAY_NAME, name)
put(MediaStore.MediaColumns.MIME_TYPE, "image/jpeg")
if(Build.VERSION.SDK_INT > Build.VERSION_CODES.P) {
put(MediaStore.Images.Media.RELATIVE_PATH, "Pictures/CameraX-Image")
}
}
// Create output options object which contains file + metadata
val outputOptions = ImageCapture.OutputFileOptions
.Builder(contentResolver,
MediaStore.Images.Media.EXTERNAL_CONTENT_URI,
contentValues)
.build()
// Set up image capture listener, which is triggered after photo has
// been taken
imageCapture.takePicture(
outputOptions,
ContextCompat.getMainExecutor(this),
object : ImageCapture.OnImageSavedCallback {
override fun onError(exc: ImageCaptureException) {
Log.e(TAG, "Photo capture failed: ${exc.message}", exc)
}
override fun
onImageSaved(output: ImageCapture.OutputFileResults){
val msg = "Photo capture succeeded: ${output.savedUri}"
Toast.makeText(baseContext, msg, Toast.LENGTH_SHORT).show()
Log.d(TAG, msg)
}
}
)
}
fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
baseContext, it) == PackageManager.PERMISSION_GRANTED
}
override fun onDestroy() {
super.onDestroy()
cameraExecutor.shutdown()
}
val name = SimpleDateFormat(FILENAME_FORMAT, Locale.US)
.format(System.currentTimeMillis())
val contentValues = ContentValues().apply {
put(MediaStore.MediaColumns.DISPLAY_NAME, name)
put(MediaStore.MediaColumns.MIME_TYPE, "image/jpeg")
if(Build.VERSION.SDK_INT > Build.VERSION_CODES.P) {
put(MediaStore.Images.Media.RELATIVE_PATH, "Pictures/CameraX-Image")
}
}
companion object {
const val TAG = "CameraXApp"
const val FILENAME_FORMAT = "yyyy-MM-dd-HH-mm-ss-SSS"
const val REQUEST_CODE_PERMISSIONS = 10
val REQUIRED_PERMISSIONS =
mutableListOf (
Manifest.permission.CAMERA,
Manifest.permission.RECORD_AUDIO,
Manifest.permission.WRITE_EXTERNAL_STORAGE
).apply {
}.toTypedArray()
}
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>, grantResults:
IntArray) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults)
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
Toast.makeText(baseContext, "Ready", Toast.LENGTH_SHORT).show()
} else {
Toast.makeText(this,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT).show()
finish()
}
}
}
}
What if instead of calling it twice on the button click you called the front camera only, and then call startCamera() again in the onImageSaved function conditionally. You'd just have to pass the value of the param from startCamera() to takePicture() or set a global variable.
override fun
onImageSaved(output: ImageCapture.OutputFileResults){
val msg = "Photo capture succeeded: ${output.savedUri}"
Toast.makeText(baseContext, msg, Toast.LENGTH_SHORT).show()
Log.d(TAG, msg)
if (pizza == CameraSelector.DEFAULT_FRONT_CAMERA) {
startCamera(CameraSelector.DEFAULT_BACK_CAMERA)
}
}
I am using CameraX and ML-Kit's Android Text Recognition API to make a Text-Recognition app. The camera works fine until I point at a word and it recognizes it. Then the screen goes black, and I see the word written there(which is kind of what I have in mind, but I'd like the word to be there while the camera Preview is still on). I originally thought that maybe the word had some background color on it that caused this, so I set the background color to transparent, but that didn't change anything. I have tried searching Stack Overflow, and haven't found anything here. And the MLKit and Camera X documents haven't said anything about this either.
Here is my activity_main.xml file, which might be the cause.
<FrameLayout
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".MainActivity">
<androidx.camera.view.PreviewView
android:id="#+id/cameraPreviewView"
android:layout_width="match_parent"
android:layout_height="match_parent"
app:layout_constraintTop_toTopOf="parent">
</androidx.camera.view.PreviewView>
<TextView
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:fontFamily="sans-serif-black"
android:textColor="#color/white"
android:textSize="20sp"
android:textStyle="bold"
app:layout_constraintBottom_toTopOf="#+id/cameraPreviewView"
app:layout_constraintTop_toTopOf="parent"
android:id="#+id/Result"
android:gravity="bottom"/>
and this is my only Kotlin file.
import android.Manifest
import android.annotation.SuppressLint
import android.content.pm.PackageManager
import android.media.Image
import androidx.appcompat.app.AppCompatActivity
import android.os.Bundle
import android.util.Log
import android.widget.TextView
import android.widget.Toast
import androidx.camera.core.*
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import com.example.myapplication3.databinding.ActivityMainBinding
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.text.Text
import com.google.mlkit.vision.text.TextRecognition
import java.io.IOException
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
class MainActivity : AppCompatActivity() {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
val binding = ActivityMainBinding.inflate(layoutInflater)
setContentView(binding.root)
if (allPermissionsGranted()) {
startCamera()
} else {
requestPermissions()
}
}
private val cameraExecutor: ExecutorService by lazy { Executors.newSingleThreadExecutor() }
override fun onDestroy() {
super.onDestroy()
cameraExecutor.shutdown()
}
private companion object {
val TAG: String = MainActivity::class.java.simpleName
const val REQUEST_CODE_PERMISSIONS = 10
val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(baseContext, it) == PackageManager.PERMISSION_GRANTED
}
private fun requestPermissions() {
ActivityCompat.requestPermissions(
this,
REQUIRED_PERMISSIONS,
REQUEST_CODE_PERMISSIONS
)
}
override fun onRequestPermissionsResult(
requestCode: Int,
permissions: Array<String>,
grantResults: IntArray
) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults)
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
startCamera()
} else {
Toast.makeText(
this,
"Permissions not granted.",
Toast.LENGTH_SHORT
).show()
finish()
}
}
}
class TextReaderAnalyzer(
private val textFoundListener: (String) -> Unit
) : ImageAnalysis.Analyzer {
#SuppressLint("UnsafeOptInUsageError")
override fun analyze(imageProxy: ImageProxy) {
imageProxy.image?.let {process(it, imageProxy) }
}
private fun process(image: Image, imageProxy: ImageProxy) {
try {
readTextFromImage(InputImage.fromMediaImage(image, 90), imageProxy)
} catch (e: IOException) {
Log.d(TAG, "Failed to load the image")
e.printStackTrace()
}
}
#SuppressLint("UnsafeOptInUsageError")
private fun readTextFromImage(image: InputImage, imageProxy: ImageProxy) {
TextRecognition.getClient()
.process(image)
.addOnSuccessListener { visionText ->
processTextFromImage(visionText)
}
.addOnFailureListener { error ->
Log.d(TAG, "Failed to process the image")
error.printStackTrace()
imageProxy.close()
}
}
private fun processTextFromImage(visionText: Text) {
for (block in visionText.textBlocks) {
// You can access whole block of text using block.text
for (line in block.lines) {
// You can access whole line of text using line.text
for (element in line.elements) {
textFoundListener(element.text)
}
}
}
}
}
private val imageAnalyzer by lazy {
ImageAnalysis.Builder()
.setTargetAspectRatio(AspectRatio.RATIO_16_9)
.build()
.also {
it.setAnalyzer(
cameraExecutor,
TextReaderAnalyzer(::onTextFound)
)
}
}
private fun onTextFound(foundText: String) {
val binding = ActivityMainBinding.inflate(layoutInflater)
setContentView(binding.root)
val textView: TextView = binding.Result
textView.text = foundText
}
private fun startCamera() {
val binding = ActivityMainBinding.inflate(layoutInflater)
setContentView(binding.root)
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener(
{
val preview = Preview.Builder()
.build()
.also { it.setSurfaceProvider(binding.cameraPreviewView.surfaceProvider) }
cameraProviderFuture.get().bind(preview, imageAnalyzer)
},
ContextCompat.getMainExecutor(this)
)
}
private fun ProcessCameraProvider.bind(
preview: Preview,
imageAnalyzer: ImageAnalysis
) = try {
unbindAll()
bindToLifecycle(
this#MainActivity,
CameraSelector.DEFAULT_BACK_CAMERA,
preview,
imageAnalyzer
)
} catch (ise: IllegalStateException) {
// Thrown if binding is not done from the main thread
Log.e(TAG, "Binding failed", ise)
}
}
You should change your .addOnSuccessListener block a little bit.
TextRecognition.getClient()
.process(image)
.addOnSuccessListener { visionText ->
processTextFromImage(visionText)
//Add below line here
imageProxy.close()
}
.addOnFailureListener { error ->
Log.d(TAG, "Failed to process the image")
error.printStackTrace()
imageProxy.close()
}
Hello I have an app that used now deprecated Camera module in android for displaying the camera view and drawing filters onto it using mlkit face detection. Recently we've decided to upgrade it to CameraX, I did all the necessary steps to make it work as a separate android app and it works. When I want to incorporate it to our existing Custom React Native module no matter what I do, it doesn't show anything on the screen. I've checked the view sizes and they seem to be ok. Below is the code I use to start the camera module.
package com.myProject.speech.components
import android.annotation.SuppressLint
import android.content.Context
import android.content.pm.PackageManager
import android.graphics.Bitmap
import android.graphics.Canvas
import android.hardware.camera2.CameraManager
import android.os.Bundle
import android.util.AttributeSet
import android.util.Log
import android.view.LayoutInflater
import android.view.View
import android.view.ViewGroup
import android.widget.RelativeLayout
import android.widget.Toast
import androidx.activity.ComponentActivity
import androidx.appcompat.app.AppCompatActivity
import androidx.camera.core.*
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.camera.view.PreviewView
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import androidx.core.view.size
import androidx.lifecycle.ViewModelProvider
import androidx.lifecycle.ViewModelStoreOwner
import com.facebook.react.uimanager.ThemedReactContext
import com.google.mlkit.common.MlKitException
import com.google.mlkit.vision.face.FaceDetectorOptions
import com.myProject.grpc.speech.StickerDatabase
import com.myProject.speech.R
import com.myProject.speech.utils.PreferenceUtils
import java.util.*
import androidx.databinding.DataBindingUtil
import com.myProject.camera_test.mlkit.facedetection.FaceContourDetectionProcessor
import com.myProject.speech.mlkit.GraphicOverlay
import java.util.concurrent.ExecutorService
class myProjectCameraView : RelativeLayout {
private var previewView: PreviewView? = null
private var graphicOverlay: GraphicOverlay? = null
private var context: ThemedReactContext? = null
private lateinit var cameraExecutor: ExecutorService
var faceRecognitionEnabled = false
var cameraSelector: CameraSelector? = null
private var cameraProvider: ProcessCameraProvider? = null
var cameraLifecycleObserver: CameraLifecycleObserver? = null
private var lensFacing = CameraSelector.LENS_FACING_FRONT
private lateinit var cameraManager: CameraManager
private var imageCapture: ImageCapture? = null
val TAG = "myProjectCameraView"
constructor(context: ThemedReactContext,
cameraLifecycleObserver: CameraLifecycleObserver?) : super(context) {
this.context = context
this.cameraLifecycleObserver = cameraLifecycleObserver;
Log.d(TAG, "constructor Called")
val inflater = context.getSystemService(
Context.LAYOUT_INFLATER_SERVICE) as LayoutInflater
inflater.inflate(R.layout.live_preview, this, true)
previewView = findViewById(R.id.firePreview)
graphicOverlay = findViewById(R.id.fireFaceOverlay)
if (allPermissionsGranted()) {
startCamera()
if (cameraLifecycleObserver != null) {
cameraLifecycleObserver.registerActionHandler(this);
}
} else {
Log.d(TAG, "Not all Permissions Granted")
}
this.cameraLifecycleObserver!!.registerActionHandler(this)
}
private fun startCamera() {
val cameraProviderFuture = ProcessCameraProvider.getInstance(this.context as Context)
cameraProviderFuture.addListener(Runnable {
// Used to bind the lifecycle of cameras to the lifecycle owner
val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
// Preview
val preview = Preview.Builder()
.build()
.also {
it.setSurfaceProvider(previewView!!.createSurfaceProvider())
}
imageCapture = ImageCapture.Builder()
.build()
val imageAnalyzer = ImageAnalysis.Builder().setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build()
.also {
it.setAnalyzer(cameraExecutor, FaceContourDetectionProcessor(graphicOverlay!!))
}
// Select back camera as a default
val cameraSelector = CameraSelector.Builder()
.requireLensFacing(CameraSelector.LENS_FACING_FRONT)
.build()
try {
// Unbind use cases before rebinding
cameraProvider.unbindAll()
// Bind use cases to camera
cameraProvider.bindToLifecycle(
this.context!!.currentActivity!! as AppCompatActivity, cameraSelector, preview, imageCapture, imageAnalyzer)
} catch (exc: Exception) {
Log.e(TAG, "Use case binding failed", exc)
}
}, ContextCompat.getMainExecutor(this.context as Context))
}
fun enableFaceRecognition(enabled: Boolean) {
Log.d(TAG, "enableFaceRecognition Called")
if (enabled) {
Log.d(TAG, "With FaceRecognition")
Log.d(TAG, "Parent size: ${this!!.measuredWidth}x${this!!.measuredHeight}")
Log.d(TAG, "previewView sizes: ${previewView!!.measuredWidth}x${previewView!!.measuredHeight}")
Log.d(TAG, "graphicOverlay sizes: ${graphicOverlay!!.measuredWidth}x${graphicOverlay!!.measuredHeight}")
} else {
Log.d(TAG, "Without FaceRecognition")
}
}
fun onActivityStopped() {
Log.d(TAG, "onActivityStopped")
}
fun onActivityResumed() {
Log.d(TAG, "onActivityResumed")
}
fun onActivityPaused() {
Log.d(TAG, "onActivityPaused")
}
fun onActivityDestroyed() {
Log.d(TAG, "onActivityDestroyed")
}
private fun allPermissionsGranted(): Boolean {
for (permission in requiredPermissions) {
if (!isPermissionGranted(getContext(), permission)) {
return false
}
}
return true
}
private val requiredPermissions: Array<String?>
private get() = try {
val info = getContext().packageManager.getPackageInfo(
getContext().packageName, PackageManager.GET_PERMISSIONS)
val ps = info.requestedPermissions
if (ps != null && ps.size > 0) {
ps
} else {
arrayOfNulls(0)
}
} catch (e: Exception) {
arrayOfNulls(0)
}
constructor(context: Context?, attrs: AttributeSet?) : super(context, attrs) {}
constructor(context: Context?, attrs: AttributeSet?,
defStyleAttr: Int) : super(context, attrs, defStyleAttr) {
}
val viewBitmap: Bitmap
get() {
val v = rootView
val b = Bitmap.createBitmap(v.layoutParams.width,
v.layoutParams.height,
Bitmap.Config.ARGB_8888)
val c = Canvas(b)
v.layout(v.left, v.top, v.right, v.bottom)
v.draw(c)
return b
}
private fun isPermissionGranted(context: Context,
permission: String?): Boolean {
if (ContextCompat.checkSelfPermission(context, permission!!) ==
PackageManager.PERMISSION_GRANTED) {
Log.i(this.TAG, "Permission granted: $permission")
return true
}
Log.i(TAG, "Permission NOT granted: $permission")
return false
}
companion object {
private const val FACE_DETECTION = "Face Detection"
private const val CLASSIFICATION = "Classification"
private const val PERMISSION_REQUESTS = 1
private const val lastDBPath = ""
private var context: ThemedReactContext? = null
val lastDB: StickerDatabase? = null
}
}
Any insights would be very appreciated.
Thanks
The issue happens because the relative layout is not resized after being added to the scene. The workaround is not mine, but I could not find where it was from right now so I'll just leave it here in case someone else is having the similar issue. I solved the issue by calling the layoutHack in the constructor of my RelativeLayout
fun setupLayoutHack() {
Choreographer.getInstance().postFrameCallback(object : Choreographer.FrameCallback {
override fun doFrame(frameTimeNanos: Long) {
manuallyLayoutChildren()
viewTreeObserver.dispatchOnGlobalLayout()
Choreographer.getInstance().postFrameCallback(this)
}
})
}
fun manuallyLayoutChildren() {
for (i in 0 until childCount) {
val child = getChildAt(i)
child.measure(MeasureSpec.makeMeasureSpec(measuredWidth, MeasureSpec.EXACTLY),
MeasureSpec.makeMeasureSpec(measuredHeight, MeasureSpec.EXACTLY))
child.layout(0, 0, child.measuredWidth, child.measuredHeight)
}
}
I am trying to recognize qr code in my app when camera is turned on. And then transfer to according activity depend on qr code text.
I am doing it with firebase ML kit and CameraX library with help of google's documentation but I have errors with ImageAnalyzer's analyze method.
ImageAnalyzer offers me to implement analyze method even though it's already implemented. But it has two parameters: (imageProxy: ImageProxy?, degrees: Int) and says that it overrides nothing. if I delete second parameter (degrees: Int), then it recognizes, but "degrees" is not recognized.
I tried to follow some tutorials but they use lots of third party libraries. I'd like to use cameraX and ML Kit.
How can I fix this?
Here is my code:
package ge.softservice.nfcwithactivties
import android.Manifest
import android.annotation.SuppressLint
import android.content.pm.PackageManager
import android.os.Bundle
import android.util.Log
import android.widget.Toast
import androidx.appcompat.app.AppCompatActivity
import androidx.camera.core.*
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import com.google.firebase.ml.vision.FirebaseVision
import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcode
import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcodeDetectorOptions
import com.google.firebase.ml.vision.common.FirebaseVisionImage
import com.google.firebase.ml.vision.common.FirebaseVisionImageMetadata
import kotlinx.android.synthetic.main.activity_qr.*
import java.io.File
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
class QrActivity : AppCompatActivity() {
private var preview: Preview? = null
private var imageAnalyzer: ImageAnalysis? = null
private var camera: Camera? = null
internal var isDetected = false
private lateinit var outputDirectory: File
private lateinit var cameraExecutor: ExecutorService
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>, grantResults:
IntArray
) {
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
startCamera()
} else {
Toast.makeText(
this,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT
).show()
finish()
}
}
}
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_qr)
// Request camera permissions
if (allPermissionsGranted()) {
startCamera()
} else {
ActivityCompat.requestPermissions(
this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS
)
}
// outputDirectory = getOutputDirectory()
cameraExecutor = Executors.newSingleThreadExecutor()
}
private fun startCamera() {
val options = FirebaseVisionBarcodeDetectorOptions.Builder()
.setBarcodeFormats(
FirebaseVisionBarcode.FORMAT_QR_CODE,
FirebaseVisionBarcode.FORMAT_AZTEC
)
.build()
val detector = FirebaseVision.getInstance().getVisionBarcodeDetector(options)
val result = detector.detectInImage(image)
.addOnSuccessListener { barcodes ->
// Task completed successfully
// ...
}
.addOnFailureListener {
// Task failed with an exception
// ...
}
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener(Runnable {
// Used to bind the lifecycle of cameras to the lifecycle owner
val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
// Preview
preview = Preview.Builder()
.build()
// Select back camera
val cameraSelector =
CameraSelector.Builder().requireLensFacing(CameraSelector.LENS_FACING_BACK).build()
try {
// Unbind use cases before rebinding
cameraProvider.unbindAll()
// Bind use cases to camera
camera = cameraProvider.bindToLifecycle(
this, cameraSelector, preview
)
preview?.setSurfaceProvider(viewFinder.createSurfaceProvider(/*camera?.cameraInfo*/))
} catch (exc: Exception) {
Log.e(TAG, "Use case binding failed", exc)
}
}, ContextCompat.getMainExecutor(this))
}
private fun takePhoto() {
// TODO
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
baseContext, it
) == PackageManager.PERMISSION_GRANTED
}
/* fun getOutputDirectory(): File {
val mediaDir = externalMediaDirs.firstOrNull()?.let {
File(it, resources.getString(R.string.app_name)).apply { mkdirs() } }
return if (mediaDir != null && mediaDir.exists())
mediaDir else filesDir
}*/
companion object {
private const val TAG = "CameraXBasic"
private const val FILENAME_FORMAT = "yyyy-MM-dd-HH-mm-ss-SSS"
private const val REQUEST_CODE_PERMISSIONS = 10
private val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
}
}
private class MyImageAnalyzer : ImageAnalysis.Analyzer {
private fun degreesToFirebaseRotation(degrees: Int): Int = when(degrees) {
0 -> FirebaseVisionImageMetadata.ROTATION_0
90 -> FirebaseVisionImageMetadata.ROTATION_90
180 -> FirebaseVisionImageMetadata.ROTATION_180
270 -> FirebaseVisionImageMetadata.ROTATION_270
else -> throw Exception("Rotation must be 0, 90, 180, or 270.")
}
#SuppressLint("UnsafeExperimentalUsageError")
override fun analyze(imageProxy: ImageProxy?, degrees: Int) {
val mediaImage = imageProxy?.image
val imageRotation = degreesToFirebaseRotation(degrees)
if (mediaImage != null) {
val image = FirebaseVisionImage.fromMediaImage(mediaImage, imageRotation)
// Pass image to an ML Kit Vision API
// ...
}
}
}
Looking at your code snippet, it seems you're using camera-camera2 version beta04 with camerax-view version alpha11.
The documentation may be out of date, the Analyzer now only receives an ImageProxy inside its analyze callback. The degrees information that was previously also passed in can now be accessed via ImageProxy.getImageInfo().getRotationDegrees().
So your Analyzer should look like this
private class MyImageAnalyzer : ImageAnalysis.Analyzer {
private fun degreesToFirebaseRotation(degrees: Int): Int {
// ...
}
override fun analyze(imageProxy: ImageProxy) {
val mediaImage = imageProxy.image
val imageRotation = degreesToFirebaseRotation(imageProxy.imageInfo.rotationDegrees)
val image = FirebaseVisionImage.fromMediaImage(mediaImage, imageRotation)
// ...
}
}
I have two classes: QrActivity, where camera is turned on to scan qr codes, and BarcodeAnalyzer, where image analyze process is going. I am doing it with cameraX and ML Kit.
That's my QrActivity:
package ge.softservice.nfcwithactivties
import android.Manifest
import android.content.pm.PackageManager
import android.os.Bundle
import android.util.Log
import android.widget.Toast
import androidx.appcompat.app.AppCompatActivity
import androidx.camera.core.*
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import kotlinx.android.synthetic.main.activity_qr.*
import java.io.File
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
class QrActivity : AppCompatActivity() {
private var preview: Preview? = null
private var imageAnalyzer: ImageAnalysis? = null
private var camera: Camera? = null
private lateinit var outputDirectory: File
private lateinit var cameraExecutor: ExecutorService
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>, grantResults:
IntArray
) {
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
startCamera()
} else {
Toast.makeText(
this,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT
).show()
finish()
}
}
}
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_qr)
// Request camera permissions
if (allPermissionsGranted()) {
startCamera()
} else {
ActivityCompat.requestPermissions(
this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS
)
}
// outputDirectory = getOutputDirectory()
cameraExecutor = Executors.newSingleThreadExecutor()
}
private fun startCamera() {
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener(Runnable {
// Used to bind the lifecycle of cameras to the lifecycle owner
val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
// Preview
preview = Preview.Builder()
.build()
// Select back camera
val cameraSelector =
CameraSelector.Builder().requireLensFacing(CameraSelector.LENS_FACING_BACK).build()
try {
// Unbind use cases before rebinding
cameraProvider.unbindAll()
// Bind use cases to camera
camera = cameraProvider.bindToLifecycle(
this, cameraSelector, preview
)
preview?.setSurfaceProvider(viewFinder.createSurfaceProvider(/*camera?.cameraInfo*/))
} catch (exc: Exception) {
Log.e(TAG, "Use case binding failed", exc)
}
}, ContextCompat.getMainExecutor(this))
}
private fun takePhoto() {
// TODO
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
baseContext, it
) == PackageManager.PERMISSION_GRANTED
}
/* fun getOutputDirectory(): File {
val mediaDir = externalMediaDirs.firstOrNull()?.let {
File(it, resources.getString(R.string.app_name)).apply { mkdirs() } }
return if (mediaDir != null && mediaDir.exists())
mediaDir else filesDir
}*/
companion object {
private const val TAG = "CameraXBasic"
private const val FILENAME_FORMAT = "yyyy-MM-dd-HH-mm-ss-SSS"
private const val REQUEST_CODE_PERMISSIONS = 10
private val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
}
}
Thats' my Barcode analyzer:
package ge.softservice.nfcwithactivties
import android.annotation.SuppressLint
import android.content.Context
import android.widget.Toast
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.ImageProxy
import com.google.firebase.ml.vision.FirebaseVision
import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcode
import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcodeDetectorOptions
import com.google.firebase.ml.vision.common.FirebaseVisionImage
import com.google.firebase.ml.vision.common.FirebaseVisionImageMetadata
class BarcodeAnalyzer : ImageAnalysis.Analyzer {
lateinit var context: Context;
private fun degreesToFirebaseRotation(degrees: Int): Int = when(degrees) {
0 -> FirebaseVisionImageMetadata.ROTATION_0
90 -> FirebaseVisionImageMetadata.ROTATION_90
180 -> FirebaseVisionImageMetadata.ROTATION_180
270 -> FirebaseVisionImageMetadata.ROTATION_270
else -> throw Exception("Rotation must be 0, 90, 180, or 270.")
}
#SuppressLint("UnsafeExperimentalUsageError")
override fun analyze(imageProxy: ImageProxy) {
// val degrees by Delegates.notNull<Int>()
val mediaImage = imageProxy.image
val imageRotation = degreesToFirebaseRotation(imageProxy.imageInfo.rotationDegrees)
if (mediaImage != null) {
val image = FirebaseVisionImage.fromMediaImage(mediaImage, imageRotation)
// Pass image to an ML Kit Vision API
val options = FirebaseVisionBarcodeDetectorOptions.Builder()
.setBarcodeFormats(
FirebaseVisionBarcode.FORMAT_QR_CODE
)
.build()
val detector = FirebaseVision.getInstance().getVisionBarcodeDetector(options)
val result = detector.detectInImage(image)
.addOnSuccessListener { barcodes ->
// Task completed successfully
Toast.makeText(context, "it works", Toast.LENGTH_SHORT).show()
}
.addOnFailureListener {
// Task failed with an exception
Toast.makeText(context, "something went wrong", Toast.LENGTH_SHORT).show()
}
}
}
}
I also tried to put this code instead of BarcodeAnalyzer into QrActivity class, but then image value is not recognized.
val options = FirebaseVisionBarcodeDetectorOptions.Builder()
.setBarcodeFormats(
FirebaseVisionBarcode.FORMAT_QR_CODE
)
.build()
val detector = FirebaseVision.getInstance().getVisionBarcodeDetector(options)
val result = detector.detectInImage(image)
.addOnSuccessListener { barcodes ->
// Task completed successfully
Toast.makeText(context, "it works", Toast.LENGTH_SHORT).show()
}
.addOnFailureListener {
// Task failed with an exception
Toast.makeText(context, "something went wrong", Toast.LENGTH_SHORT).show()
}
if I put it in BarcodeAnalyzer, where it is now, there is no error but result value is gray color and not used.
I found this project and tried to do similar but in my project it shows me errors: https://github.com/Lavanyagaur22/Visitor-Card
I also tried this and other tutorials but lots of things are unclear for me: https://www.bignerdranch.com/blog/using-firebasemlkit-with-camerax/
I tried to do instance in QrActivty class of BarcodeAnalyzer but is shows me errors.
I am following google's firebase ML Kit tutorial but still doesn't work for me: https://firebase.google.com/docs/ml-kit/android/read-barcodes#kotlin+ktx_1
So, how can I connect BarcodeAnalyzer class to QrActivity class or how to make an instance to recognize qr code?
To connect your image analyzer to your camera, you need to add a use case where you've already added one; the preview use case here: cameraProvider.bindToLifecycle(this, cameraSelector, preview)
You can create an instance of the analyzer like this:
val analyzer = ImageAnalysis.Builder()
.setTargetAspectRatio(AspectRatio.RATIO_16_9)
.setTargetRotation(previewView.display.rotation)
.build().also {
it.setAnalyzer(Executors.newSingleThreadExecutor(), BarcodeAnalyzer())
}
And then bind this use case:
cameraProvider.bindToLifecycle(this, cameraSelector, preview, analyzer)