Only the second function is called, Android studio Kotlin - android

I followed a tutorial on Camerax from the Android Studio website, and I wanted the camera button when pressed to take a picture from the front and the back camera, the problem is only the second function is called.
Ctrl + F THE PROBLEM IS HERE to find the two functions.
MainActivity.kt file:
package com.android.example.cameraxapp
import androidx.appcompat.app.AppCompatActivity
import android.os.Bundle
import android.Manifest
import android.content.pm.PackageManager
import android.util.Log
import android.widget.Toast
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import java.util.concurrent.Executors
import androidx.camera.core.*
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.camera.video.*
import androidx.camera.video.VideoCapture
import androidx.core.content.PermissionChecker
import java.nio.ByteBuffer
import java.text.SimpleDateFormat
import java.util.*
import java.util.concurrent.ExecutorService
import android.provider.MediaStore
import android.content.ContentValues
import android.os.Build
import androidx.camera.view.PreviewView
import com.nasro.camerax.R
import com.nasro.camerax.databinding.ActivityMainBinding
typealias LumaListener = (luma: Double) -> Unit
class MainActivity : AppCompatActivity() {
lateinit var viewBinding: ActivityMainBinding
lateinit var cameraSelector: CameraSelector
var imageCapture: ImageCapture? = null
var videoCapture: VideoCapture<Recorder>? = null
var recording: Recording? = null
lateinit var cameraExecutor: ExecutorService
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
viewBinding = ActivityMainBinding.inflate(layoutInflater)
setContentView(viewBinding.root)
// Request camera permissions
if (allPermissionsGranted()) {
Toast.makeText(baseContext, "Ready", Toast.LENGTH_SHORT).show()
} else {
ActivityCompat.requestPermissions(
this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS)
}
// Set up the listeners for take photo and video capture buttons
viewBinding.imageCaptureButton.setOnClickListener {start()}
viewBinding.videoCaptureButton.setOnClickListener {
}
cameraExecutor = Executors.newSingleThreadExecutor()
}
fun captureVideo() {}
fun start() {
//THE PROBLEM IS HERE, THE PROBLEM IS HERE, THE PROBLEM IS HERE.
startCamera(CameraSelector.DEFAULT_FRONT_CAMERA)
startCamera(CameraSelector.DEFAULT_BACK_CAMERA)
}
private fun startCamera(pizza:CameraSelector) {
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener({
// Used to bind the lifecycle of cameras to the lifecycle owner
val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
// Preview
val preview = Preview.Builder()
.build()
.also {
it.setSurfaceProvider(findViewById<PreviewView>(R.id.viewFinder).surfaceProvider)
}
imageCapture = ImageCapture.Builder()
.build()
try {
// Unbind use cases before rebinding
cameraProvider.unbindAll()
// Bind use cases to camera
cameraProvider.bindToLifecycle(
this, pizza, preview, imageCapture)
} catch(exc: Exception) {
Log.e(TAG, "Use case binding failed", exc)
}
takePhoto()
}, ContextCompat.getMainExecutor(this))
}
fun takePhoto() {
// Get a stable reference of the modifiable image capture use case
var imageCapture = imageCapture ?: return
// Create time stamped name and MediaStore entry.
val name = SimpleDateFormat(FILENAME_FORMAT, Locale.US)
.format(System.currentTimeMillis())
val contentValues = ContentValues().apply {
put(MediaStore.MediaColumns.DISPLAY_NAME, name)
put(MediaStore.MediaColumns.MIME_TYPE, "image/jpeg")
if(Build.VERSION.SDK_INT > Build.VERSION_CODES.P) {
put(MediaStore.Images.Media.RELATIVE_PATH, "Pictures/CameraX-Image")
}
}
// Create output options object which contains file + metadata
val outputOptions = ImageCapture.OutputFileOptions
.Builder(contentResolver,
MediaStore.Images.Media.EXTERNAL_CONTENT_URI,
contentValues)
.build()
// Set up image capture listener, which is triggered after photo has
// been taken
imageCapture.takePicture(
outputOptions,
ContextCompat.getMainExecutor(this),
object : ImageCapture.OnImageSavedCallback {
override fun onError(exc: ImageCaptureException) {
Log.e(TAG, "Photo capture failed: ${exc.message}", exc)
}
override fun
onImageSaved(output: ImageCapture.OutputFileResults){
val msg = "Photo capture succeeded: ${output.savedUri}"
Toast.makeText(baseContext, msg, Toast.LENGTH_SHORT).show()
Log.d(TAG, msg)
}
}
)
}
fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
baseContext, it) == PackageManager.PERMISSION_GRANTED
}
override fun onDestroy() {
super.onDestroy()
cameraExecutor.shutdown()
}
val name = SimpleDateFormat(FILENAME_FORMAT, Locale.US)
.format(System.currentTimeMillis())
val contentValues = ContentValues().apply {
put(MediaStore.MediaColumns.DISPLAY_NAME, name)
put(MediaStore.MediaColumns.MIME_TYPE, "image/jpeg")
if(Build.VERSION.SDK_INT > Build.VERSION_CODES.P) {
put(MediaStore.Images.Media.RELATIVE_PATH, "Pictures/CameraX-Image")
}
}
companion object {
const val TAG = "CameraXApp"
const val FILENAME_FORMAT = "yyyy-MM-dd-HH-mm-ss-SSS"
const val REQUEST_CODE_PERMISSIONS = 10
val REQUIRED_PERMISSIONS =
mutableListOf (
Manifest.permission.CAMERA,
Manifest.permission.RECORD_AUDIO,
Manifest.permission.WRITE_EXTERNAL_STORAGE
).apply {
}.toTypedArray()
}
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>, grantResults:
IntArray) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults)
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
Toast.makeText(baseContext, "Ready", Toast.LENGTH_SHORT).show()
} else {
Toast.makeText(this,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT).show()
finish()
}
}
}
}

What if instead of calling it twice on the button click you called the front camera only, and then call startCamera() again in the onImageSaved function conditionally. You'd just have to pass the value of the param from startCamera() to takePicture() or set a global variable.
override fun
onImageSaved(output: ImageCapture.OutputFileResults){
val msg = "Photo capture succeeded: ${output.savedUri}"
Toast.makeText(baseContext, msg, Toast.LENGTH_SHORT).show()
Log.d(TAG, msg)
if (pizza == CameraSelector.DEFAULT_FRONT_CAMERA) {
startCamera(CameraSelector.DEFAULT_BACK_CAMERA)
}
}

Related

Why does the camera turn off and the screen go black after my app scans the first text it sees with ML-Kit?

I am using CameraX and ML-Kit's Android Text Recognition API to make a Text-Recognition app. The camera works fine until I point at a word and it recognizes it. Then the screen goes black, and I see the word written there(which is kind of what I have in mind, but I'd like the word to be there while the camera Preview is still on). I originally thought that maybe the word had some background color on it that caused this, so I set the background color to transparent, but that didn't change anything. I have tried searching Stack Overflow, and haven't found anything here. And the MLKit and Camera X documents haven't said anything about this either.
Here is my activity_main.xml file, which might be the cause.
<FrameLayout
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".MainActivity">
<androidx.camera.view.PreviewView
android:id="#+id/cameraPreviewView"
android:layout_width="match_parent"
android:layout_height="match_parent"
app:layout_constraintTop_toTopOf="parent">
</androidx.camera.view.PreviewView>
<TextView
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:fontFamily="sans-serif-black"
android:textColor="#color/white"
android:textSize="20sp"
android:textStyle="bold"
app:layout_constraintBottom_toTopOf="#+id/cameraPreviewView"
app:layout_constraintTop_toTopOf="parent"
android:id="#+id/Result"
android:gravity="bottom"/>
and this is my only Kotlin file.
import android.Manifest
import android.annotation.SuppressLint
import android.content.pm.PackageManager
import android.media.Image
import androidx.appcompat.app.AppCompatActivity
import android.os.Bundle
import android.util.Log
import android.widget.TextView
import android.widget.Toast
import androidx.camera.core.*
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import com.example.myapplication3.databinding.ActivityMainBinding
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.text.Text
import com.google.mlkit.vision.text.TextRecognition
import java.io.IOException
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
class MainActivity : AppCompatActivity() {
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
val binding = ActivityMainBinding.inflate(layoutInflater)
setContentView(binding.root)
if (allPermissionsGranted()) {
startCamera()
} else {
requestPermissions()
}
}
private val cameraExecutor: ExecutorService by lazy { Executors.newSingleThreadExecutor() }
override fun onDestroy() {
super.onDestroy()
cameraExecutor.shutdown()
}
private companion object {
val TAG: String = MainActivity::class.java.simpleName
const val REQUEST_CODE_PERMISSIONS = 10
val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(baseContext, it) == PackageManager.PERMISSION_GRANTED
}
private fun requestPermissions() {
ActivityCompat.requestPermissions(
this,
REQUIRED_PERMISSIONS,
REQUEST_CODE_PERMISSIONS
)
}
override fun onRequestPermissionsResult(
requestCode: Int,
permissions: Array<String>,
grantResults: IntArray
) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults)
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
startCamera()
} else {
Toast.makeText(
this,
"Permissions not granted.",
Toast.LENGTH_SHORT
).show()
finish()
}
}
}
class TextReaderAnalyzer(
private val textFoundListener: (String) -> Unit
) : ImageAnalysis.Analyzer {
#SuppressLint("UnsafeOptInUsageError")
override fun analyze(imageProxy: ImageProxy) {
imageProxy.image?.let {process(it, imageProxy) }
}
private fun process(image: Image, imageProxy: ImageProxy) {
try {
readTextFromImage(InputImage.fromMediaImage(image, 90), imageProxy)
} catch (e: IOException) {
Log.d(TAG, "Failed to load the image")
e.printStackTrace()
}
}
#SuppressLint("UnsafeOptInUsageError")
private fun readTextFromImage(image: InputImage, imageProxy: ImageProxy) {
TextRecognition.getClient()
.process(image)
.addOnSuccessListener { visionText ->
processTextFromImage(visionText)
}
.addOnFailureListener { error ->
Log.d(TAG, "Failed to process the image")
error.printStackTrace()
imageProxy.close()
}
}
private fun processTextFromImage(visionText: Text) {
for (block in visionText.textBlocks) {
// You can access whole block of text using block.text
for (line in block.lines) {
// You can access whole line of text using line.text
for (element in line.elements) {
textFoundListener(element.text)
}
}
}
}
}
private val imageAnalyzer by lazy {
ImageAnalysis.Builder()
.setTargetAspectRatio(AspectRatio.RATIO_16_9)
.build()
.also {
it.setAnalyzer(
cameraExecutor,
TextReaderAnalyzer(::onTextFound)
)
}
}
private fun onTextFound(foundText: String) {
val binding = ActivityMainBinding.inflate(layoutInflater)
setContentView(binding.root)
val textView: TextView = binding.Result
textView.text = foundText
}
private fun startCamera() {
val binding = ActivityMainBinding.inflate(layoutInflater)
setContentView(binding.root)
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener(
{
val preview = Preview.Builder()
.build()
.also { it.setSurfaceProvider(binding.cameraPreviewView.surfaceProvider) }
cameraProviderFuture.get().bind(preview, imageAnalyzer)
},
ContextCompat.getMainExecutor(this)
)
}
private fun ProcessCameraProvider.bind(
preview: Preview,
imageAnalyzer: ImageAnalysis
) = try {
unbindAll()
bindToLifecycle(
this#MainActivity,
CameraSelector.DEFAULT_BACK_CAMERA,
preview,
imageAnalyzer
)
} catch (ise: IllegalStateException) {
// Thrown if binding is not done from the main thread
Log.e(TAG, "Binding failed", ise)
}
}
You should change your .addOnSuccessListener block a little bit.
TextRecognition.getClient()
.process(image)
.addOnSuccessListener { visionText ->
processTextFromImage(visionText)
//Add below line here
imageProxy.close()
}
.addOnFailureListener { error ->
Log.d(TAG, "Failed to process the image")
error.printStackTrace()
imageProxy.close()
}

ImageAnalysis.Analyzer does nothing when it is put on cameraProvider.bindToLifecycle

MainActivity.kt
package com.memex.eu
import android.Manifest
import android.app.Activity
import android.app.ActivityManager
import android.content.pm.PackageManager
import android.graphics.Bitmap
import android.net.Uri
import android.os.Build
import android.os.Bundle
import android.util.Log
import android.util.Size
import android.view.Gravity
import android.view.MotionEvent
import android.widget.Toast
import androidx.appcompat.app.AppCompatActivity
import androidx.camera.core.CameraSelector
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.Preview
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import com.google.ar.core.HitResult
import com.google.ar.core.Plane
import com.google.ar.sceneform.AnchorNode
import com.google.ar.sceneform.rendering.ModelRenderable
import com.google.ar.sceneform.ux.ArFragment
import com.google.ar.sceneform.ux.TransformableNode
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.objects.ObjectDetection
import com.google.mlkit.vision.objects.ObjectDetector
import com.google.mlkit.vision.objects.defaults.ObjectDetectorOptions
import kotlinx.android.synthetic.main.activity_main.*
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.function.Consumer
import java.util.function.Function
class MainActivity : AppCompatActivity() {
private lateinit var cameraExecutor: ExecutorService
private val TAG: String = MainActivity::class.java.getSimpleName()
private var arFragment: ArFragment? = null
private var andyRenderable: ModelRenderable? = null
private lateinit var options: ObjectDetectorOptions
private lateinit var objectDetector: ObjectDetector
private lateinit var analyzer: MyImageAnalyzer
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
// Request camera permissions
if (allPermissionsGranted()) {
startCamera()
} else {
ActivityCompat.requestPermissions(
this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS
)
}
cameraExecutor = Executors.newSingleThreadExecutor()
if (!checkIsSupportedDeviceOrFinish(this)) {
return
}
arFragment = supportFragmentManager.findFragmentById(R.id.sceneform_fragment) as ArFragment?
// When you build a Renderable, Sceneform loads its resources in the background while returning
// a CompletableFuture. Call thenAccept(), handle(), or check isDone() before calling get().
ModelRenderable.builder()
.setSource(this, R.raw.andy)
.build()
.thenAccept(Consumer { renderable: ModelRenderable? ->
andyRenderable = renderable
})
.exceptionally(
Function<Throwable, Void?> { throwable: Throwable? ->
val toast =
Toast.makeText(this, "Unable to load andy renderable", Toast.LENGTH_LONG)
toast.setGravity(Gravity.CENTER, 0, 0)
toast.show()
null
})
arFragment!!.setOnTapArPlaneListener { hitResult: HitResult, plane: Plane?, motionEvent: MotionEvent? ->
if (andyRenderable == null) {
return#setOnTapArPlaneListener
}
// Create the Anchor.
val anchor = hitResult.createAnchor()
val anchorNode =
AnchorNode(anchor)
anchorNode.setParent(arFragment!!.arSceneView.scene)
// Create the transformable andy and add it to the anchor.
val andy =
TransformableNode(arFragment!!.transformationSystem)
andy.setParent(anchorNode)
andy.renderable = andyRenderable
andy.select()
}
// Live detection and tracking
options = ObjectDetectorOptions.Builder()
.setDetectorMode(ObjectDetectorOptions.STREAM_MODE)
.enableClassification() // Optional
.build()
objectDetector = ObjectDetection.getClient(options)
#androidx.camera.core.ExperimentalGetImage
analyzer = MyImageAnalyzer()
}
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>, grantResults:
IntArray
) {
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
startCamera()
} else {
Toast.makeText(
this,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT
).show()
finish()
}
}
}
override fun onDestroy() {
super.onDestroy()
cameraExecutor.shutdown()
}
private fun startCamera() {
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener(Runnable {
// Used to bind the lifecycle of cameras to the lifecycle owner
val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
// Preview
val preview = Preview.Builder()
.build()
.also {
it.setSurfaceProvider(viewFinder.createSurfaceProvider())
}
val imageAnalyzer = ImageAnalysis.Builder()
.setTargetResolution(Size(1280, 720))
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build()
.also {
it.setAnalyzer(cameraExecutor, analyzer)
}
// Select back camera as a default
val cameraSelector = CameraSelector.DEFAULT_BACK_CAMERA
try {
// Unbind use cases before rebinding
cameraProvider.unbindAll()
// Bind use cases to camera
cameraProvider.bindToLifecycle(
this, cameraSelector, preview, imageAnalyzer)
} catch(exc: Exception) {
Log.e(TAG, "Use case binding failed", exc)
}
}, ContextCompat.getMainExecutor(this))
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
baseContext, it
) == PackageManager.PERMISSION_GRANTED
}
companion object {
private const val TAG = "MainActivity"
private const val REQUEST_CODE_PERMISSIONS = 10
private val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
private const val MIN_OPENGL_VERSION = 3.0
/**
* Returns false and displays an error message if Sceneform can not run, true if Sceneform can run
* on this device.
*
*
* Sceneform requires Android N on the device as well as OpenGL 3.0 capabilities.
*
*
* Finishes the activity if Sceneform can not run
*/
fun checkIsSupportedDeviceOrFinish(activity: Activity): Boolean {
val openGlVersionString =
(activity.getSystemService(ACTIVITY_SERVICE) as ActivityManager)
.deviceConfigurationInfo
.glEsVersion
if (openGlVersionString.toDouble() < MIN_OPENGL_VERSION) {
Log.e(TAG, "Sceneform requires OpenGL ES 3.0 later")
Toast.makeText(
activity,
"Sceneform requires OpenGL ES 3.0 or later",
Toast.LENGTH_LONG
)
.show()
activity.finish()
return false
}
return true
}
}
}
MyImageAnalyzer.kt
package com.memex.eu
import android.util.Log
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.ImageProxy
import androidx.fragment.app.FragmentManager
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.objects.DetectedObject
import com.google.mlkit.vision.objects.ObjectDetection
import com.google.mlkit.vision.objects.defaults.ObjectDetectorOptions
import com.google.mlkit.vision.objects.defaults.PredefinedCategory
#androidx.camera.core.ExperimentalGetImage
class MyImageAnalyzer : ImageAnalysis.Analyzer{
// Live detection and tracking
val options = ObjectDetectorOptions.Builder()
.setDetectorMode(ObjectDetectorOptions.STREAM_MODE)
.enableClassification() // Optional
.build()
val objectDetector = ObjectDetection.getClient(options)
private val TAG: String = MyImageAnalyzer::class.java.getSimpleName()
override fun analyze(imageProxy: ImageProxy) {
val mediaImage = imageProxy.image
Log.e(TAG, "DETECTIONS")
if (mediaImage != null) {
val image = InputImage.fromMediaImage(mediaImage, imageProxy.imageInfo.rotationDegrees)
objectDetector.process(image)
.addOnCompleteListener {
imageProxy.close()
if (it.isSuccessful) {
displayDets(it.result as List<DetectedObject>)
} else {
it.exception?.printStackTrace()
}
}
}
imageProxy.close()
}
private fun displayDets(results: List<DetectedObject>) {
for (detectedObject in results) {
val boundingBox = detectedObject.boundingBox
val trackingId = detectedObject.trackingId
for (label in detectedObject.labels) {
val text = label.text
if (PredefinedCategory.FOOD == text) {
Log.e(TAG, text)
}
val index = label.index
if (PredefinedCategory.FOOD_INDEX == index) {
Log.e(TAG, text)
}
val confidence = label.confidence
Log.e(TAG, confidence.toString())
Log.e(TAG, trackingId.toString())
Log.e(TAG, boundingBox.toString())
}
}
}
}
But in my console, I see none of the Log statements from ImageAnalysis.Analyzer. What's going on? Why is ImageAnalysis.Analyzer not being called? I am printing out the log statements first before I eventually draw the bounding boxes. However, the analyze function of ImageAnalysis.Analyzer is not being called at all. How do I tell camerax to call that function?

Kotlin CameraX can't capture image

I want to capture image and save to a file using CameraX library. I captured image and save. The image file's size is 0B. I don't know where i went wrong. Logs says this error:
androidx.camera.core.ImageCaptureException: Not bound to a valid Camera [ImageCapture:androidx.camera.core.ImageCapture-52180692-0099-40c3-8d17-508e08019b84]
Here is my capture code :
fun bindPreview(
lifecycleOwner: LifecycleOwner,
previewView: PreviewView,
cameraProvider: ProcessCameraProvider,
){
val preview = Preview.Builder()
.build().also {
it.setSurfaceProvider(previewView.surfaceProvider)
}
imageCapture = ImageCapture.Builder().build()
val cameraSelector = CameraSelector.DEFAULT_BACK_CAMERA
try {
cameraProvider.unbindAll()
cameraProvider.bindToLifecycle(
lifecycleOwner, cameraSelector, preview, imageCapture)
}catch(exception: Exception) {
Log.e(TAG, "Use case binding failed", exception)
}
}
fun onImageCaptureClicked(context: Context){
outputDirectory = getOutputDirectory(context)
val photoFile = File(outputDirectory, SimpleDateFormat(FILENAME_FORMAT, Locale.US
).format(System.currentTimeMillis()) + ".jpg")
val outputOptions = ImageCapture.OutputFileOptions.Builder(photoFile).build()
imageCapture.takePicture(
outputOptions, ContextCompat.getMainExecutor(context), object :ImageCapture.OnImageSavedCallback{
override fun onError(exception: ImageCaptureException) {
Log.e( TAG, "Photo capture failed: ${exception.message}", exception)
}
override fun onImageSaved(output: ImageCapture.OutputFileResults) {
val savedUri = Uri.fromFile(photoFile)
val msg = "Photo capture succeeded: $savedUri"
Toast.makeText(context, msg, Toast.LENGTH_SHORT).show()
Log.d(TAG, msg)
}
}
)
}
What should i do?
I Used Fragment For CameraX In My Project:
package com.example.splashscreenkotlin.fragments
import android.Manifest
import android.content.Context
import android.content.pm.PackageManager
import android.net.Uri
import android.os.Bundle
import android.util.Log
import androidx.fragment.app.Fragment
import android.view.LayoutInflater
import android.view.View
import android.view.ViewGroup
import android.widget.Toast
import androidx.camera.core.CameraSelector
import androidx.camera.core.ImageCapture
import androidx.camera.core.ImageCaptureException
import androidx.camera.core.Preview
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.content.ContextCompat
import com.example.splashscreenkotlin.R
import com.example.splashscreenkotlin.databinding.FragmentCameraBinding
import java.io.File
import java.text.SimpleDateFormat
import java.util.*
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
class CameraFragment : Fragment() {
private var binding: FragmentCameraBinding? = null
private val _binding get() = binding!!
private var imageCapture: ImageCapture? = null
private lateinit var mContext: Context
private lateinit var outputDirectory: File
private lateinit var cameraExecutor: ExecutorService
override fun onAttach(context: Context) {
super.onAttach(context)
mContext = context
}
override fun onCreateView(
inflater: LayoutInflater, container: ViewGroup?,
savedInstanceState: Bundle?
): View {
binding = FragmentCameraBinding.inflate(layoutInflater,container,false)
return _binding.root
}
override fun onViewCreated(view: View, savedInstanceState: Bundle?) {
super.onViewCreated(view, savedInstanceState)
outputDirectory = getOutputDirectory()
cameraExecutor = Executors.newSingleThreadExecutor()
when{
allPermissionGuaranted() -> {
startCamera()
}
shouldShowRequestPermissionRationale("permission") -> {
// In an educational UI, explain to the user why your app requires this
// permission for a specific feature to behave as expected. In this UI,
// include a "cancel" or "no thanks" button that allows the user to
// continue using your app without granting the permission.
}
else -> {
// You can directly ask for the permission.
requestPermissions(
REQUIRED_PERMISSIONS,
REQUEST_CODE_PERMISSIONS
)
}
}
_binding.cameraButton.setOnClickListener {
takePhoto()
}
}
private fun allPermissionGuaranted() =
REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
mContext,it
) == PackageManager.PERMISSION_GRANTED
// You can use the API that requires the permission.
}
private fun startCamera() {
val cameraProviderFuture = ProcessCameraProvider.getInstance(mContext)
cameraProviderFuture.addListener({
val preview = Preview.Builder().build().also { mPreview ->
mPreview.setSurfaceProvider(_binding.camera.surfaceProvider)
}
imageCapture = ImageCapture.Builder().build()
val cameraSelector = CameraSelector.DEFAULT_BACK_CAMERA
try {
cameraProviderFuture.get().unbindAll()
cameraProviderFuture.get().bindToLifecycle(
this,cameraSelector, preview, imageCapture
)
}catch (e:Exception){
Log.d(TAG,"Camera Start Fail",e)
}
},ContextCompat.getMainExecutor(mContext))
}
#Deprecated("Deprecated in Java")
override fun onRequestPermissionsResult(
requestCode: Int,
permissions: Array<String>,
grantResults: IntArray) {
when(requestCode == REQUEST_CODE_PERMISSIONS){
allPermissionGuaranted() -> {
startCamera()
}
else -> {
Toast.makeText(mContext,"Permission Not granted by the User",Toast.LENGTH_SHORT).show()
}
}
}
private fun getOutputDirectory(): File{
val mediaDir = activity?.externalMediaDirs?.firstOrNull()?.let { mFile ->
File(mFile, resources.getString(R.string.app_name)).apply {
mkdirs()
}
}
return if(mediaDir !=null && mediaDir.exists())
mediaDir else activity?.filesDir!!
}
private fun takePhoto(){
val imageCapture = imageCapture?: return
val photoFile = File(
outputDirectory,
SimpleDateFormat(File_Name_Format, Locale.getDefault())
.format(System
.currentTimeMillis())+ ".jpg")
val outputOptions = ImageCapture.OutputFileOptions
.Builder(photoFile).build()
imageCapture.takePicture(
outputOptions, ContextCompat.getMainExecutor(mContext),
object : ImageCapture.OnImageSavedCallback{
override fun onImageSaved(outputFileResults: ImageCapture.OutputFileResults) {
val savedUri = Uri.fromFile(photoFile)
val msg = "Photo Saved"
Toast.makeText(mContext,"${msg}, $savedUri",Toast.LENGTH_LONG).show()
}
override fun onError(exception: ImageCaptureException) {
Log.e(TAG,"onError: ${exception.message}",exception)
}
}
)
}
override fun onDestroyView() {
super.onDestroyView()
binding = null
cameraExecutor.shutdown()
}
companion object {
const val TAG = "CameraX"
const val File_Name_Format = "yyyy-MM-dd-HH-mm-ss-SS"
const val REQUEST_CODE_PERMISSIONS = 200
val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
}
}
Sorry I didnt write Instructions but if u didn't know something and why I write it just text me I'm here to help
Also Don't forget Permissions in Manifest And Adding Dependencies of CameraX
U can find it in android developers page

How can I recognize barcode with firebase ML Kit?

I am trying to recognize qr code in my app when camera is turned on. And then transfer to according activity depend on qr code text.
I am doing it with firebase ML kit and CameraX library with help of google's documentation but I have errors with ImageAnalyzer's analyze method.
ImageAnalyzer offers me to implement analyze method even though it's already implemented. But it has two parameters: (imageProxy: ImageProxy?, degrees: Int) and says that it overrides nothing. if I delete second parameter (degrees: Int), then it recognizes, but "degrees" is not recognized.
I tried to follow some tutorials but they use lots of third party libraries. I'd like to use cameraX and ML Kit.
How can I fix this?
Here is my code:
package ge.softservice.nfcwithactivties
import android.Manifest
import android.annotation.SuppressLint
import android.content.pm.PackageManager
import android.os.Bundle
import android.util.Log
import android.widget.Toast
import androidx.appcompat.app.AppCompatActivity
import androidx.camera.core.*
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import com.google.firebase.ml.vision.FirebaseVision
import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcode
import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcodeDetectorOptions
import com.google.firebase.ml.vision.common.FirebaseVisionImage
import com.google.firebase.ml.vision.common.FirebaseVisionImageMetadata
import kotlinx.android.synthetic.main.activity_qr.*
import java.io.File
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
class QrActivity : AppCompatActivity() {
private var preview: Preview? = null
private var imageAnalyzer: ImageAnalysis? = null
private var camera: Camera? = null
internal var isDetected = false
private lateinit var outputDirectory: File
private lateinit var cameraExecutor: ExecutorService
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>, grantResults:
IntArray
) {
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
startCamera()
} else {
Toast.makeText(
this,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT
).show()
finish()
}
}
}
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_qr)
// Request camera permissions
if (allPermissionsGranted()) {
startCamera()
} else {
ActivityCompat.requestPermissions(
this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS
)
}
// outputDirectory = getOutputDirectory()
cameraExecutor = Executors.newSingleThreadExecutor()
}
private fun startCamera() {
val options = FirebaseVisionBarcodeDetectorOptions.Builder()
.setBarcodeFormats(
FirebaseVisionBarcode.FORMAT_QR_CODE,
FirebaseVisionBarcode.FORMAT_AZTEC
)
.build()
val detector = FirebaseVision.getInstance().getVisionBarcodeDetector(options)
val result = detector.detectInImage(image)
.addOnSuccessListener { barcodes ->
// Task completed successfully
// ...
}
.addOnFailureListener {
// Task failed with an exception
// ...
}
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener(Runnable {
// Used to bind the lifecycle of cameras to the lifecycle owner
val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
// Preview
preview = Preview.Builder()
.build()
// Select back camera
val cameraSelector =
CameraSelector.Builder().requireLensFacing(CameraSelector.LENS_FACING_BACK).build()
try {
// Unbind use cases before rebinding
cameraProvider.unbindAll()
// Bind use cases to camera
camera = cameraProvider.bindToLifecycle(
this, cameraSelector, preview
)
preview?.setSurfaceProvider(viewFinder.createSurfaceProvider(/*camera?.cameraInfo*/))
} catch (exc: Exception) {
Log.e(TAG, "Use case binding failed", exc)
}
}, ContextCompat.getMainExecutor(this))
}
private fun takePhoto() {
// TODO
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
baseContext, it
) == PackageManager.PERMISSION_GRANTED
}
/* fun getOutputDirectory(): File {
val mediaDir = externalMediaDirs.firstOrNull()?.let {
File(it, resources.getString(R.string.app_name)).apply { mkdirs() } }
return if (mediaDir != null && mediaDir.exists())
mediaDir else filesDir
}*/
companion object {
private const val TAG = "CameraXBasic"
private const val FILENAME_FORMAT = "yyyy-MM-dd-HH-mm-ss-SSS"
private const val REQUEST_CODE_PERMISSIONS = 10
private val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
}
}
private class MyImageAnalyzer : ImageAnalysis.Analyzer {
private fun degreesToFirebaseRotation(degrees: Int): Int = when(degrees) {
0 -> FirebaseVisionImageMetadata.ROTATION_0
90 -> FirebaseVisionImageMetadata.ROTATION_90
180 -> FirebaseVisionImageMetadata.ROTATION_180
270 -> FirebaseVisionImageMetadata.ROTATION_270
else -> throw Exception("Rotation must be 0, 90, 180, or 270.")
}
#SuppressLint("UnsafeExperimentalUsageError")
override fun analyze(imageProxy: ImageProxy?, degrees: Int) {
val mediaImage = imageProxy?.image
val imageRotation = degreesToFirebaseRotation(degrees)
if (mediaImage != null) {
val image = FirebaseVisionImage.fromMediaImage(mediaImage, imageRotation)
// Pass image to an ML Kit Vision API
// ...
}
}
}
Looking at your code snippet, it seems you're using camera-camera2 version beta04 with camerax-view version alpha11.
The documentation may be out of date, the Analyzer now only receives an ImageProxy inside its analyze callback. The degrees information that was previously also passed in can now be accessed via ImageProxy.getImageInfo().getRotationDegrees().
So your Analyzer should look like this
private class MyImageAnalyzer : ImageAnalysis.Analyzer {
private fun degreesToFirebaseRotation(degrees: Int): Int {
// ...
}
override fun analyze(imageProxy: ImageProxy) {
val mediaImage = imageProxy.image
val imageRotation = degreesToFirebaseRotation(imageProxy.imageInfo.rotationDegrees)
val image = FirebaseVisionImage.fromMediaImage(mediaImage, imageRotation)
// ...
}
}

How can I connect ImageAnalyzer class to class where qr code should be recognized?

I have two classes: QrActivity, where camera is turned on to scan qr codes, and BarcodeAnalyzer, where image analyze process is going. I am doing it with cameraX and ML Kit.
That's my QrActivity:
package ge.softservice.nfcwithactivties
import android.Manifest
import android.content.pm.PackageManager
import android.os.Bundle
import android.util.Log
import android.widget.Toast
import androidx.appcompat.app.AppCompatActivity
import androidx.camera.core.*
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.app.ActivityCompat
import androidx.core.content.ContextCompat
import kotlinx.android.synthetic.main.activity_qr.*
import java.io.File
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
class QrActivity : AppCompatActivity() {
private var preview: Preview? = null
private var imageAnalyzer: ImageAnalysis? = null
private var camera: Camera? = null
private lateinit var outputDirectory: File
private lateinit var cameraExecutor: ExecutorService
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>, grantResults:
IntArray
) {
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
startCamera()
} else {
Toast.makeText(
this,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT
).show()
finish()
}
}
}
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_qr)
// Request camera permissions
if (allPermissionsGranted()) {
startCamera()
} else {
ActivityCompat.requestPermissions(
this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS
)
}
// outputDirectory = getOutputDirectory()
cameraExecutor = Executors.newSingleThreadExecutor()
}
private fun startCamera() {
val cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener(Runnable {
// Used to bind the lifecycle of cameras to the lifecycle owner
val cameraProvider: ProcessCameraProvider = cameraProviderFuture.get()
// Preview
preview = Preview.Builder()
.build()
// Select back camera
val cameraSelector =
CameraSelector.Builder().requireLensFacing(CameraSelector.LENS_FACING_BACK).build()
try {
// Unbind use cases before rebinding
cameraProvider.unbindAll()
// Bind use cases to camera
camera = cameraProvider.bindToLifecycle(
this, cameraSelector, preview
)
preview?.setSurfaceProvider(viewFinder.createSurfaceProvider(/*camera?.cameraInfo*/))
} catch (exc: Exception) {
Log.e(TAG, "Use case binding failed", exc)
}
}, ContextCompat.getMainExecutor(this))
}
private fun takePhoto() {
// TODO
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
baseContext, it
) == PackageManager.PERMISSION_GRANTED
}
/* fun getOutputDirectory(): File {
val mediaDir = externalMediaDirs.firstOrNull()?.let {
File(it, resources.getString(R.string.app_name)).apply { mkdirs() } }
return if (mediaDir != null && mediaDir.exists())
mediaDir else filesDir
}*/
companion object {
private const val TAG = "CameraXBasic"
private const val FILENAME_FORMAT = "yyyy-MM-dd-HH-mm-ss-SSS"
private const val REQUEST_CODE_PERMISSIONS = 10
private val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
}
}
Thats' my Barcode analyzer:
package ge.softservice.nfcwithactivties
import android.annotation.SuppressLint
import android.content.Context
import android.widget.Toast
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.ImageProxy
import com.google.firebase.ml.vision.FirebaseVision
import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcode
import com.google.firebase.ml.vision.barcode.FirebaseVisionBarcodeDetectorOptions
import com.google.firebase.ml.vision.common.FirebaseVisionImage
import com.google.firebase.ml.vision.common.FirebaseVisionImageMetadata
class BarcodeAnalyzer : ImageAnalysis.Analyzer {
lateinit var context: Context;
private fun degreesToFirebaseRotation(degrees: Int): Int = when(degrees) {
0 -> FirebaseVisionImageMetadata.ROTATION_0
90 -> FirebaseVisionImageMetadata.ROTATION_90
180 -> FirebaseVisionImageMetadata.ROTATION_180
270 -> FirebaseVisionImageMetadata.ROTATION_270
else -> throw Exception("Rotation must be 0, 90, 180, or 270.")
}
#SuppressLint("UnsafeExperimentalUsageError")
override fun analyze(imageProxy: ImageProxy) {
// val degrees by Delegates.notNull<Int>()
val mediaImage = imageProxy.image
val imageRotation = degreesToFirebaseRotation(imageProxy.imageInfo.rotationDegrees)
if (mediaImage != null) {
val image = FirebaseVisionImage.fromMediaImage(mediaImage, imageRotation)
// Pass image to an ML Kit Vision API
val options = FirebaseVisionBarcodeDetectorOptions.Builder()
.setBarcodeFormats(
FirebaseVisionBarcode.FORMAT_QR_CODE
)
.build()
val detector = FirebaseVision.getInstance().getVisionBarcodeDetector(options)
val result = detector.detectInImage(image)
.addOnSuccessListener { barcodes ->
// Task completed successfully
Toast.makeText(context, "it works", Toast.LENGTH_SHORT).show()
}
.addOnFailureListener {
// Task failed with an exception
Toast.makeText(context, "something went wrong", Toast.LENGTH_SHORT).show()
}
}
}
}
I also tried to put this code instead of BarcodeAnalyzer into QrActivity class, but then image value is not recognized.
val options = FirebaseVisionBarcodeDetectorOptions.Builder()
.setBarcodeFormats(
FirebaseVisionBarcode.FORMAT_QR_CODE
)
.build()
val detector = FirebaseVision.getInstance().getVisionBarcodeDetector(options)
val result = detector.detectInImage(image)
.addOnSuccessListener { barcodes ->
// Task completed successfully
Toast.makeText(context, "it works", Toast.LENGTH_SHORT).show()
}
.addOnFailureListener {
// Task failed with an exception
Toast.makeText(context, "something went wrong", Toast.LENGTH_SHORT).show()
}
if I put it in BarcodeAnalyzer, where it is now, there is no error but result value is gray color and not used.
I found this project and tried to do similar but in my project it shows me errors: https://github.com/Lavanyagaur22/Visitor-Card
I also tried this and other tutorials but lots of things are unclear for me: https://www.bignerdranch.com/blog/using-firebasemlkit-with-camerax/
I tried to do instance in QrActivty class of BarcodeAnalyzer but is shows me errors.
I am following google's firebase ML Kit tutorial but still doesn't work for me: https://firebase.google.com/docs/ml-kit/android/read-barcodes#kotlin+ktx_1
So, how can I connect BarcodeAnalyzer class to QrActivity class or how to make an instance to recognize qr code?
To connect your image analyzer to your camera, you need to add a use case where you've already added one; the preview use case here: cameraProvider.bindToLifecycle(this, cameraSelector, preview)
You can create an instance of the analyzer like this:
val analyzer = ImageAnalysis.Builder()
.setTargetAspectRatio(AspectRatio.RATIO_16_9)
.setTargetRotation(previewView.display.rotation)
.build().also {
it.setAnalyzer(Executors.newSingleThreadExecutor(), BarcodeAnalyzer())
}
And then bind this use case:
cameraProvider.bindToLifecycle(this, cameraSelector, preview, analyzer)

Categories

Resources