takePicture require executor on CameraX (1.0.0-alpha06) - android

After update from
androidx.camera:camera-core:1.0.0-alpha03
to
androidx.camera:camera-core:1.0.0-alpha06
signatures of methods setTargetAspectRatio (in ImageCaptureConfig.Builder) and takePicture (in ImageCapture) have been changed.
Official documentation and info in web doesn't show how to use new methods (how to specify executor).
Code which broken after update:
...
val captureConfig = ImageCaptureConfig.Builder()
.setTargetAspectRatioCustom(Rational(1, 1)) //this method changed
.setFlashMode(flashMode)
.setLensFacing(lensFacing)
.build()
val capture = ImageCapture(captureConfig)
binding.takeAPhoto.setOnClickListener {
...
val imageFile = createTempFile(System.currentTimeMillis().toString(), ".jpg")
capture.takePicture(imageFile, object : ImageCapture.OnImageSavedListener { //this method also changed
override fun onImageSaved(file: File) {
...
}
override fun onError(useCaseError: ImageCapture.UseCaseError, message: String, cause: Throwable?) {
...
})
}
}
Does anyone have (or know where to find) example of how to use new methods?
Thanks in advance

The official Google Codelabs which obviously have been updated recently use: Executors.newSingleThreadExecutor()
Reference: https://codelabs.developers.google.com/codelabs/camerax-getting-started/#4
Edit: Since #kos's response also makes sense to me, I've added these two official Android docs references:
https://developer.android.com/reference/java/util/concurrent/Executors.html#newSingleThreadExecutor()
https://developer.android.com/reference/java/util/concurrent/Executors.html#newCachedThreadPool()
This way every reader of this topic can make up his/her own mind with respect to executors.
FURTHER EDIT: There are crucial API changes since 1.0.0-alpha07 so I studied some of the docs. There's a GitHub sample showing executor retrieval like so mainExecutor = ContextCompat.getMainExecutor(requireContext())(Source)
If some of you already implemented CameraX and it works fine, I'd definitely wait for the beta release as recommended by Android's release notes

I faced same thing as you are facing. I resolved it from my side.
class MainActivity : AppCompatActivity(), Executor {
private var right: Int = 0
private var bottom: Int = 0
private var left: Int = 0
private var top: Int = 0
private lateinit var preview: Preview
private val REQUEST_CODE_PERMISSIONS = 10
private val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
private lateinit var imageCapture: ImageCapture
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
if (allPermissionsGranted()) {
viewFinder.post { startCamera() }
} else {
ActivityCompat.requestPermissions(this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS)
}
viewFinder.addOnLayoutChangeListener { _, _, _, _, _, _, _, _, _ ->
updateTransform()
}
buttonPlus.setOnClickListener {
if (right < 100) {
right += 100
bottom += 100
left += 100
top += 100
val my = Rect(left, top, right, bottom)
preview.zoom(my)
}
}
buttonMinus.setOnClickListener {
if (right > 0) {
right -= 100
bottom -= 100
left -= 100
top -= 100
val my = Rect(left, top, right, bottom)
preview.zoom(my)
}
}
}
#SuppressLint("RestrictedApi")
private fun startCamera() {
val metrics = DisplayMetrics().also { viewFinder.display.getRealMetrics(it) }
val screenAspectRatio = Rational(metrics.widthPixels, metrics.heightPixels)
val previewConfig = PreviewConfig.Builder().apply {
setTargetAspectRatioCustom(screenAspectRatio)
setTargetRotation(viewFinder.display.rotation)
}.build()
preview = Preview(previewConfig)
preview.setOnPreviewOutputUpdateListener {
val parent = viewFinder.parent as ViewGroup
parent.removeView(viewFinder)
parent.addView(viewFinder, 0)
viewFinder.surfaceTexture = it.surfaceTexture
updateTransform()
}
CameraX.bindToLifecycle(this, preview)
captureImage()
}
#SuppressLint("RestrictedApi")
private fun captureImage() {
val imageCaptureConfig = ImageCaptureConfig.Builder()
.apply {
setTargetAspectRatioCustom(Rational(1, 1))
setCaptureMode(ImageCapture.CaptureMode.MIN_LATENCY)
}.build()
imageCapture = ImageCapture(imageCaptureConfig)
CameraX.bindToLifecycle(this, imageCapture)
capture_button.setOnClickListener {
val file = File(this.externalMediaDirs.first(), "${System.currentTimeMillis()}.jpg")
imageCapture.takePicture(file, this, object : ImageCapture.OnImageSavedListener {
override fun onImageSaved(file: File) {
val msg = "Photo capture succeeded: ${file.absolutePath}"
Log.d("CameraXApp", msg)
}
override fun onError(imageCaptureError: ImageCapture.ImageCaptureError, message: String, cause: Throwable?) {
val msg = "Photo capture failed: $message"
Log.e("CameraXApp", msg)
cause?.printStackTrace()
}
})
}
}
override fun execute(command: Runnable) {
command.run()
}
private fun updateTransform() {
val matrix = Matrix()
val centerX = viewFinder.width / 2f
val centerY = viewFinder.height / 2f
val rotationDegrees = when (viewFinder.display.rotation) {
Surface.ROTATION_0 -> 0
Surface.ROTATION_90 -> 90
Surface.ROTATION_180 -> 180
Surface.ROTATION_270 -> 270
else -> return
}
matrix.postRotate(-rotationDegrees.toFloat(), centerX, centerY)
viewFinder.setTransform(matrix)
}
override fun onRequestPermissionsResult(requestCode: Int, permissions: Array<String>, grantResults: IntArray) {
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
viewFinder.post { startCamera() }
} else {
Toast.makeText(this, "Permissions not granted by the user.", Toast.LENGTH_SHORT).show()
finish()
}
}
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(baseContext, it) == PackageManager.PERMISSION_GRANTED
}
override fun onDestroy() {
super.onDestroy()
imageCapture.let {
CameraX.unbind(imageCapture)
}
}
}
And the output is (As I print log in onImageSaved method)
Photo capture succeeded: /storage/emulated/0/Android/media/com.akshay.cameraxzoominoutdemo/1571052301192.jpg
It's working fine for me, try out this.

You can do it like this.
imageCapture.takePicture(file, { it.run() }, object : ImageCapture.OnImageSavedListener {
override fun onImageSaved(file: File) {}
override fun onError(useCaseError: ImageCapture.ImageCaptureError, message: String, cause: Throwable?) {}
})

Here is a change log for the changes in alpha06 : https://developer.android.com/jetpack/androidx/releases/camera
setTargetAspectRatio() method now takes AspectRatio enum with 4_3 or 16_9 value.
takePicture() method takes (file, metadata, executor, imageSavedListener) // could use executor as per your case/need. example is val executor = Executors.newSingleThreadExecutor()
instead of useCase.onPreviewOutputUpdateListener = use useCase.setOnPreviewOutputUpdateListener()
FYI : CameraX will be in Beta in Dec 2019

Inside your click listener call this function/method :
private fun saveImage(){
val file = File(this.externalMediaDirs.first(), "${System.currentTimeMillis()}.jpg")
val fileB = ImageCapture.OutputFileOptions.Builder(file).build()
imageCapture.takePicture(fileB, ContextCompat.getMainExecutor(this), object : ImageCapture.OnImageSavedCallback {
override fun onImageSaved(fileB: ImageCapture.OutputFileResults) {
val msg = "${fileB.savedUri} - ${file.absolutePath} - ${file.toURI()}"
}
override fun onError(imageCaptureError: ImageCaptureException) {
val msg = "Photo capture failed: ${imageCaptureError.toString()}"
}
})
}
And the msg in onImageSaved will contain something like this :
null - /storage/emulated/0/Android/media/com.mua.camx/1607589430984.jpg - file:/storage/emulated/0/Android/media/com.mua.camx/1607589430984.jpg

You have to only run the command as below.
#Override
public void execute(Runnable command) {
command.run(); // <-- THIS IS NEEDED
}

CameraX provides with built-in executors and take picture can be implemented as below:
imgCaptureButton.setOnClickListener(new View.OnClickListener() {
#Override
#SuppressLint("RestrictedApi")
public void onClick(View v) {
imgCap.takePicture(CameraXExecutors.mainThreadExecutor(),new ImageCapture.OnImageCapturedListener() {
#Override
public void onCaptureSuccess(ImageProxy image, int rotationDegrees) {
super.onCaptureSuccess(image, rotationDegrees);
// Play with the Image here.
}
});
}
});
It does not use File to save the image, instead saves the image as a buffer in the memory.

Related

MutableSharedFlow not trigger same event twice in kotlin

I am using MutableSharedFlow in project. My main project concept is very big, so I cannot add in here, instead I made a very small sample to reproduce my problem. I know this example is very wrong, but I have same scenario in my main project. I am using MutableSharedFlow as a Queue implementation with single Thread execution with the help of Mutex.
ExampleViewModel
class ExampleViewModel : ViewModel() {
val serviceNumber = ServiceNumber()
val serviceNumberEventFlow = serviceNumber.eventFlow
val mutex = Mutex()
var delayCounter = 0
suspend fun addItem(itemOne: Int = 2, itemTwo: Int = 2): Add {
return mutex.queueWithTimeout("add") {
serviceNumberEventFlow.onSubscription {
serviceNumber.add(itemOne, itemTwo)
delayCounter++
if (delayCounter == 1) {
delay(1000)
Log.w("Delay ", "Delay Started")
serviceNumber.add(8, 8)
}
}.firstOrNull {
it is Add
} as Add? ?: Add("No value")
}
}
suspend fun subItem(itemOne: Int = 2, itemTwo: Int = 2): Sub {
return mutex.queueWithTimeout("sub") {
serviceNumberEventFlow.onSubscription {
serviceNumber.sub(itemOne, itemTwo)
}.firstOrNull {
it is Sub
} as Sub? ?: Sub("No value")
}
}
private suspend fun <T> Mutex.queueWithTimeout(
action: String, timeout: Long = 5000L, block: suspend CoroutineScope.() -> T
): T {
return try {
withLock {
return#withLock withTimeout<T>(timeMillis = timeout, block = block)
}
} catch (e: Exception) {
Log.e("Wrong", " $e Timeout on BLE call: $action")
throw e
}
}
}
class ServiceNumber : Number {
val eventFlow = MutableSharedFlow<Event>(extraBufferCapacity = 50)
private val scope: CoroutineScope = CoroutineScope(SupervisorJob() + Dispatchers.IO)
override fun add(itemOne: Int, itemTwo: Int) {
Log.i("ServiceNumber", " Add event trigger with $itemOne -- $itemTwo")
eventFlow.emitEvent(Add("Item added ${itemOne + itemTwo}"))
}
override fun sub(itemOne: Int, itemTwo: Int) {
eventFlow.emitEvent(Sub("Item subtract ${itemOne - itemTwo}"))
}
private fun <T> MutableSharedFlow<T>.emitEvent(event: T) {
scope.launch { emit(event) }
}
}
interface Number {
fun add(itemOne: Int, itemTwo: Int)
fun sub(itemOne: Int, itemTwo: Int)
}
sealed class Event
data class Add(val item: String) : Event()
data class Sub(val item: String) : Event()
MainActivity.kt
class MainActivity : AppCompatActivity() {
private val viewModel: ExampleViewModel by viewModels()
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContent {
Theme {
Column {
Button(onClick = {
lifecycleScope.launchWhenCreated {
withContext(Dispatchers.IO) {
val result = viewModel.addItem()
Log.e("Result", "$result")
}
}
}) {
Text("Add")
}
Button(onClick = {
lifecycleScope.launchWhenCreated {
withContext(Dispatchers.IO) {
val result = viewModel.subItem()
Log.e("Result", "$result")
}
}
}) {
Text("Sub")
}
}
}
}
}
}
#Composable
fun Theme(content: #Composable () -> Unit) {
MaterialTheme(content = content)
}
Problem
This example is simple Add and subtract of two number. When I am click on Add Button first time, viewmodel.addItem(...) -> ... ->ServiceNumber.add() will trigger and emit the value and we can see log in console. Inside the Add Button function, I was also added a delay to trigger ServiceNumber.add() again to see that onSubscription will be also retrigger or not. MutableSharedFlow emit the value as I can see in log but onSubscription method not called. I don't understand what is the problem in here.
onSubscription is an operator so it creates a new copy of your shared flow. The lambda code will only be run when there are new collectors on this new flow. The only time you collect this new flow is when you call firstOrNull() on it, a terminal operator that collects a single value.

How to get step count with GoogleFit Api Sensor Manager

I am trying to geth step count and heart rpm from the Google Fit Api. I am aware that the getHistoryClient and getSensorClient are deprecated.
Please how can I replace this methods with the Sensor Manager. I do not seem to undersatnd the documentation.
class MainActivity : AppCompatActivity(), SensorEventListener {
private val listOfPermissionNotGranted = mutableListOf<String>()
lateinit var permissionsLauncher: ActivityResultLauncher<Array<String>>
lateinit var signInLauncher: ActivityResultLauncher<Intent>
private val sensorManager: SensorManager by lazy { getSystemService(SENSOR_SERVICE) as SensorManager }
private val accelerometer by lazy { sensorManager.getDefaultSensor(Sensor.TYPE_ACCELEROMETER) }
lateinit var googleApiClient: GoogleSignInClient
#RequiresApi(Build.VERSION_CODES.Q)
#OptIn(ExperimentalTime::class)
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
signInLauncher = registerForActivityResult(StartActivityForResult()) { result ->
if (result.resultCode == RESULT_OK) {
val task = GoogleSignIn.getSignedInAccountFromIntent(result.data)
if (task.isSuccessful) {
Toast.makeText(this, "Signed in", Toast.LENGTH_LONG).show()
Log.d("MainUser", "${task.result}")
val fitnessOptions = FitnessOptions.builder()
.addDataType(DataType.TYPE_STEP_COUNT_DELTA, FitnessOptions.ACCESS_READ)
.addDataType(
DataType.AGGREGATE_STEP_COUNT_DELTA,
FitnessOptions.ACCESS_READ
)
.build()
Fitness.getHistoryClient(this, GoogleSignIn.getAccountForExtension(this, fitnessOptions))
.readDailyTotal(DataType.TYPE_STEP_COUNT_DELTA)
.addOnSuccessListener { result ->
val totalSteps =
result.dataPoints.firstOrNull()?.getValue(Field.FIELD_STEPS)?.asInt() ?: 0
// Do something with totalSteps
Log.i("Main", "total $totalSteps")
}
.addOnFailureListener { e ->
Log.i("Main", "There was a problem getting steps.", e)
}
// val googleSignInAccount =
// GoogleSignIn.getAccountForExtension(this, fitnessOptions)
}
}
}
permissionsLauncher = registerForActivityResult(RequestMultiplePermissions()) { permissionMap ->
val permitted = permissionMap.all { it.value }
if (permitted) {
val gso = GoogleSignInOptions.Builder(GoogleSignInOptions.DEFAULT_SIGN_IN)
.requestScopes(Fitness.SCOPE_ACTIVITY_READ)
.build()
googleApiClient = GoogleSignIn.getClient(this, gso)
val signInIntent = googleApiClient.signInIntent
signInLauncher.launch(signInIntent)
}
}
permissionsLauncher.launch(
arrayOf(
Manifest.permission.BODY_SENSORS,
Manifest.permission.ACCESS_FINE_LOCATION,
Manifest.permission.ACTIVITY_RECOGNITION
)
)
listOfPermissionNotGranted.forEach { permission ->
Toast.makeText(this, "$permission is not granted", Toast.LENGTH_LONG).show()
}
}
override fun onResume() {
super.onResume()
sensorManager.registerListener(this, accelerometer, SensorManager.SENSOR_DELAY_NORMAL)
}
override fun onPause() {
super.onPause()
sensorManager.unregisterListener(this)
}
override fun onSensorChanged(event: SensorEvent?) {
if (event?.sensor?.type == Sensor.TYPE_STEP_DETECTOR) {
Log.d("MainSensorStep detected", "${event.values[0]}")
Toast.makeText(this, "Step is detected ${event.values[0]}", Toast.LENGTH_LONG).show()
} else if (event?.sensor?.type == Sensor.TYPE_STEP_COUNTER) {
Log.d("MainSensorStep counting", "${event.values[0]}")
Toast.makeText(this, "Step is counting ${event.values[0]}", Toast.LENGTH_LONG).show()
}
}
override fun onAccuracyChanged(sensor: Sensor?, p1: Int) {
Log.d("MainSensor", "$sensor")
}
}
You might want to go over this answer in a separate, but related SO post, and get a better understanding of the different approaches.
If you need to collate processed steps information from the User, then Google Fit is the better approach. You can see how you get the same step count as what's on the google fit app using the cited example code in their official docs: https://developers.google.com/fit/scenarios/read-daily-step-total#android
I see that you're also using the Android API for Fit, you might want to explore Android Health Connect too as the Android API for Fit will be deprecated soon.

Kotlin " surfaceChanged", "surfaceDestroyed",SurfaceCreated", overrides nothing

I am using surfaceview inside a fragment, however the methods of the surfaceHolder.Callback(surfaceCreated,surfaceChanged, and surfaceDestroyed) seems to overrides nothing.and I also get the following error " object is not abstract and does not implement abstract member".
Below is the error in the image.
e: /Users/malorimorow/AndroidStudioProjects/CapstoneProject/app/src/main/java/com/example/capstoneproject/main/ScannerFragment.kt: (158, 32): Object is not abstract and does not implement abstract member public abstract fun surfaceChanged(p0: SurfaceHolder, p1: Int, p2: Int, p3: Int): Unit defined in android.view.SurfaceHolder.Callback
Below is the source code and you can find the error in the overlay.apply method.I am new to kotlin and would greatly appreciate the help.
class ScannerFragment : Fragment() {
companion object {
fun newInstance() = ScannerFragment()
// We only need to analyze the part of the image that has text, so we set crop percentages
// to avoid analyze the entire image from the live camera feed.
const val DESIRED_WIDTH_CROP_PERCENT = 8
const val DESIRED_HEIGHT_CROP_PERCENT = 74
// This is an arbitrary number we are using to keep tab of the permission
// request. Where an app has multiple context for requesting permission,
// this can help differentiate the different contexts
private const val REQUEST_CODE_PERMISSIONS = 10
// This is an array of all the permission specified in the manifest
private val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
private const val RATIO_4_3_VALUE = 4.0 / 3.0
private const val RATIO_16_9_VALUE = 16.0 / 9.0
private const val TAG = "ScannerFragment"
}
private var displayId: Int = -1
private val viewModel: MainViewModel by viewModels()
private var cameraProvider: ProcessCameraProvider? = null
private var camera: Camera? = null
private var imageAnalyzer: ImageAnalysis? = null
private lateinit var container: ConstraintLayout
private lateinit var viewFinder: PreviewView
/** Blocking camera operations are performed using this executor */
private lateinit var cameraExecutor: ExecutorService
private lateinit var scopedExecutor: ScopedExecutor
override fun onCreateView(
inflater: LayoutInflater, container: ViewGroup?,
savedInstanceState: Bundle?
): View {
return inflater.inflate(R.layout.fragment_scanner, container, false)
}
override fun onDestroyView() {
super.onDestroyView()
// Shut down our background executor
cameraExecutor.shutdown()
scopedExecutor.shutdown()
}
override fun onViewCreated(view: View, savedInstanceState: Bundle?) {
super.onViewCreated(view, savedInstanceState)
container = view as ConstraintLayout
viewFinder = container.findViewById(R.id.viewfinder)
// Initialize our background executor
cameraExecutor = Executors.newSingleThreadExecutor()
scopedExecutor = ScopedExecutor(cameraExecutor)
// Request camera permissions
if (allPermissionsGranted()) {
// Wait for the views to be properly laid out
viewFinder.post {
// Keep track of the display in which this view is attached
displayId = viewFinder.display.displayId
// Set up the camera and its use cases
setUpCamera()
}
} else {
requestPermissions(
REQUIRED_PERMISSIONS,
REQUEST_CODE_PERMISSIONS
)
}
// Get available language list and set up the target language spinner
// with default selections.
val adapter = ArrayAdapter(
requireContext(),
android.R.layout.simple_spinner_dropdown_item, viewModel.availableLanguages
)
targetLangSelector.adapter = adapter
targetLangSelector.setSelection(adapter.getPosition(Language("en")))
targetLangSelector.onItemSelectedListener = object : AdapterView.OnItemSelectedListener {
override fun onItemSelected(
parent: AdapterView<*>,
view: View?,
position: Int,
id: Long
) {
viewModel.targetLang.value = adapter.getItem(position)
}
override fun onNothingSelected(parent: AdapterView<*>) {}
}
viewModel.sourceLang.observe(viewLifecycleOwner, Observer { srcLang.text = it.displayName })
viewModel.translatedText.observe(viewLifecycleOwner, Observer { resultOrError ->
resultOrError?.let {
if (it.error != null) {
translatedText.error = resultOrError.error?.localizedMessage
} else {
translatedText.text = resultOrError.result
}
}
})
viewModel.modelDownloading.observe(viewLifecycleOwner, Observer { isDownloading ->
progressBar.visibility = if (isDownloading) {
View.VISIBLE
} else {
View.INVISIBLE
}
progressText.visibility = progressBar.visibility
})
overlay.apply {
setZOrderOnTop(true)
holder.setFormat(PixelFormat.TRANSPARENT)
holder.addCallback(object : SurfaceHolder.Callback {
override fun surfaceChanged(
holder: SurfaceHolder?,
format: Int,
width: Int,
height: Int
) {
}
override fun surfaceDestroyed(holder: SurfaceHolder?) {
}
override fun surfaceCreated(holder: SurfaceHolder?) {
holder?.let { drawOverlay(it,
DESIRED_HEIGHT_CROP_PERCENT,
DESIRED_WIDTH_CROP_PERCENT
) }
}
})
}
}
/** Initialize CameraX, and prepare to bind the camera use cases */
private fun setUpCamera() {
val cameraProviderFuture = ProcessCameraProvider.getInstance(requireContext())
cameraProviderFuture.addListener(Runnable {
// CameraProvider
cameraProvider = cameraProviderFuture.get()
// Build and bind the camera use cases
bindCameraUseCases()
}, ContextCompat.getMainExecutor(requireContext()))
}
private fun bindCameraUseCases() {
val cameraProvider = cameraProvider
?: throw IllegalStateException("Camera initialization failed.")
// Get screen metrics used to setup camera for full screen resolution
val metrics = DisplayMetrics().also { viewFinder.display.getRealMetrics(it) }
Log.d(TAG, "Screen metrics: ${metrics.widthPixels} x ${metrics.heightPixels}")
val screenAspectRatio = aspectRatio(metrics.widthPixels, metrics.heightPixels)
Log.d(TAG, "Preview aspect ratio: $screenAspectRatio")
val rotation = viewFinder.display.rotation
val preview = Preview.Builder()
.setTargetAspectRatio(screenAspectRatio)
.setTargetRotation(rotation)
.build()
// Build the image analysis use case and instantiate our analyzer
imageAnalyzer = ImageAnalysis.Builder()
// We request aspect ratio but no resolution
.setTargetAspectRatio(screenAspectRatio)
.setTargetRotation(rotation)
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.build()
.also {
it.setAnalyzer(
cameraExecutor
, TextAnalyzer(
requireContext(),
lifecycle,
viewModel.sourceText,
viewModel.imageCropPercentages
)
)
}
viewModel.sourceText.observe(viewLifecycleOwner, Observer { srcText.text = it })
viewModel.imageCropPercentages.observe(viewLifecycleOwner,
Observer { drawOverlay(overlay.holder, it.first, it.second) })
// Select back camera since text detection does not work with front camera
val cameraSelector =
CameraSelector.Builder().requireLensFacing(CameraSelector.LENS_FACING_BACK).build()
try {
// Unbind use cases before rebinding
cameraProvider.unbindAll()
// Bind use cases to camera
camera = cameraProvider.bindToLifecycle(
this, cameraSelector, preview, imageAnalyzer
)
preview.setSurfaceProvider(viewFinder.createSurfaceProvider())
} catch (exc: IllegalStateException) {
Log.e(TAG, "Use case binding failed. This must be running on main thread.", exc)
}
}
private fun drawOverlay(
holder: SurfaceHolder,
heightCropPercent: Int,
widthCropPercent: Int
) {
val canvas = holder.lockCanvas()
val bgPaint = Paint().apply {
alpha = 140
}
canvas.drawPaint(bgPaint)
val rectPaint = Paint()
rectPaint.xfermode = PorterDuffXfermode(PorterDuff.Mode.CLEAR)
rectPaint.style = Paint.Style.FILL
rectPaint.color = Color.WHITE
val outlinePaint = Paint()
outlinePaint.style = Paint.Style.STROKE
outlinePaint.color = Color.WHITE
outlinePaint.strokeWidth = 4f
val surfaceWidth = holder.surfaceFrame.width()
val surfaceHeight = holder.surfaceFrame.height()
val cornerRadius = 25f
// Set rect centered in frame
val rectTop = surfaceHeight * heightCropPercent / 2 / 100f
val rectLeft = surfaceWidth * widthCropPercent / 2 / 100f
val rectRight = surfaceWidth * (1 - widthCropPercent / 2 / 100f)
val rectBottom = surfaceHeight * (1 - heightCropPercent / 2 / 100f)
val rect = RectF(rectLeft, rectTop, rectRight, rectBottom)
canvas.drawRoundRect(
rect, cornerRadius, cornerRadius, rectPaint
)
canvas.drawRoundRect(
rect, cornerRadius, cornerRadius, outlinePaint
)
val textPaint = Paint()
textPaint.color = Color.WHITE
textPaint.textSize = 50F
val overlayText = getString(R.string.overlay_help)
val textBounds = Rect()
textPaint.getTextBounds(overlayText, 0, overlayText.length, textBounds)
val textX = (surfaceWidth - textBounds.width()) / 2f
val textY = rectBottom + textBounds.height() + 15f // put text below rect and 15f padding
canvas.drawText(getString(R.string.overlay_help), textX, textY, textPaint)
holder.unlockCanvasAndPost(canvas)
}
/**
* [androidx.camera.core.ImageAnalysisConfig] requires enum value of
* [androidx.camera.core.AspectRatio]. Currently it has values of 4:3 & 16:9.
*
* Detecting the most suitable ratio for dimensions provided in #params by comparing absolute
* of preview ratio to one of the provided values.
*
* #param width - preview width
* #param height - preview height
* #return suitable aspect ratio
*/
private fun aspectRatio(width: Int, height: Int): Int {
val previewRatio = ln(max(width, height).toDouble() / min(width, height))
if (abs(previewRatio - ln(RATIO_4_3_VALUE))
<= abs(previewRatio - ln(RATIO_16_9_VALUE))
) {
return AspectRatio.RATIO_4_3
}
return AspectRatio.RATIO_16_9
}
/**
* Process result from permission request dialog box, has the request
* been granted? If yes, start Camera. Otherwise display a toast
*/
override fun onRequestPermissionsResult(
requestCode: Int, permissions: Array<String>, grantResults: IntArray
) {
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
viewFinder.post {
// Keep track of the display in which this view is attached
displayId = viewFinder.display.displayId
// Set up the camera and its use cases
setUpCamera()
}
} else {
Toast.makeText(
context,
"Permissions not granted by the user.",
Toast.LENGTH_SHORT
).show()
}
}
}
/**
* Check if all permission specified in the manifest have been granted
*/
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(
requireContext(), it
) == PackageManager.PERMISSION_GRANTED
}
}
Its probably because of Version confict . Just Remove all 3 methods and implement them with ALT+Enter . again .
As the error says surfaceChanged defined as follows
fun surfaceChanged(p0: SurfaceHolder, p1: Int, p2: Int, p3: Int): Unit
But when you are implementing it you are making SurfaceHolder parameter null-able.
holder.addCallback(object : SurfaceHolder.Callback {
override fun surfaceChanged(
holder: SurfaceHolder,
format: Int,
width: Int,
height: Int
) {
}
override fun surfaceDestroyed(holder: SurfaceHolder) {
}
override fun surfaceCreated(holder: SurfaceHolder) {
holder?.let { drawOverlay(it,
DESIRED_HEIGHT_CROP_PERCENT,
DESIRED_WIDTH_CROP_PERCENT
) }
}
})

CameraX Analyzer glitching preview

I am using CameraX Analyzer use-case to detect circles in an image using OpenCV Hough Circles, but even though I believe I am using a separate thread to do this analysis, the camera preview gets really slow sometimes when it gets too many detected circles.
I get that my implementation of the detector isn't the most efficient, but shouldn't this processing time be reflected only in the analysis rate, and not on the preview?
Got great part of the code from crysxd CameraX-Object-Tracking.
This is my MainActivity:
class MainActivity : AppCompatActivity() {
private lateinit var overlayTextureView: DetectionOverlayView
private val camera
get() = supportFragmentManager.findFragmentById(R.id.cameraFragment) as CameraFragment
override fun onCreate(savedInstanceState: Bundle?) {
OpenCVLoader.initDebug()
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
if (Timber.treeCount() == 0) {
Timber.plant(Timber.DebugTree())
}
overlayTextureView = findViewById(R.id.detectionOverlayView)
camera.imageAnalyzer = ViewModelProviders.of(this).get(HoughPupilDetector::class.java)
}
}
This is my CameraFragment initialization:
open class CameraFragment : Fragment() {
var cameraRunning = false
private set
var imageAnalyzer: ThreadedImageAnalyzer? = null
set(value) {
field = value
if (cameraRunning) {
startCamera()
}
}
override fun onCreateView(inflater: LayoutInflater, container: ViewGroup?, savedInstanceState: Bundle?): View =
inflater.inflate(R.layout.fragment_camera, container, false)
override fun onViewCreated(view: View, savedInstanceState: Bundle?) {
super.onViewCreated(view, savedInstanceState)
CameraPermissionHelper().requestCameraPermission(childFragmentManager) {
if (it) {
startCamera()
} else {
activity?.finish()
}
}
}
override fun onDestroyView() {
super.onDestroyView()
if (cameraRunning) {
CameraX.unbindAll()
cameraRunning = false
Timber.i("Stopping camera")
}
}
private fun startCamera() {
preview.post {
try {
val usesCases = mutableListOf<UseCase>()
// Make sure that there are no other use cases bound to CameraX
CameraX.unbindAll()
// Create configuration object for the viewfinder use case
val previewConfig = onCreatePreivewConfigBuilder().build()
usesCases.add(AutoFitPreviewBuilder.build(previewConfig, preview))
// Setup image analysis pipeline that computes average pixel luminance in real time
if (imageAnalyzer != null) {
val analyzerConfig = onCreateAnalyzerConfigBuilder().build()
usesCases.add(ImageAnalysis(analyzerConfig).apply {
analyzer = imageAnalyzer
})
}
// Bind use cases to lifecycle
CameraX.bindToLifecycle(this, *usesCases.toTypedArray())
cameraRunning = true
Timber.i("Started camera with useCases=$usesCases")
} catch (e: Exception) {
Timber.e(e)
AlertDialog.Builder(context)
.setMessage(getString(R.string.camera_error))
.setPositiveButton(android.R.string.ok) { _, _ ->
activity?.finish()
}
.create()
}
}
}
#Suppress("MemberVisibilityCanBePrivate")
protected open fun onCreateAnalyzerConfigBuilder() = ImageAnalysisConfig.Builder().apply {
// Use a worker thread for image analysis to prevent preview glitches
setCallbackHandler(imageAnalyzer!!.getHandler())
// In our analysis, we care more about the latest image than analyzing *every* image
setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE)
setTargetAspectRatio(Rational(1, 1))
setTargetResolution(Size(preview.width, preview.height))
}
#Suppress("MemberVisibilityCanBePrivate")
protected open fun onCreatePreivewConfigBuilder() = PreviewConfig.Builder().apply {
setTargetAspectRatio(Rational(1, 1))
setTargetResolution(Size(preview.width, preview.height))
}
}
This is my analyzer interface initialization:
abstract class PupilDetector(listener: PupilDetectionListener? = null) : ViewModel(), ThreadedImageAnalyzer {
private val listeners = ArrayList<PupilDetectionListener>().apply { listener?.let { add(it) } }
private val isBusy = AtomicBoolean(false)
private val handlerThread = HandlerThread("PupilDetector").apply { start() }
fun addListener(listener: PupilDetectionListener) = listeners.add(listener)
override fun analyze(image: ImageProxy, rotationDegrees: Int) {
if (isBusy.compareAndSet(false, true)) {
Timber.d("Running analysis...")
val pupil = detect(image, rotationDegrees)
Timber.d("Analysis done.")
isBusy.set(false)
// listeners.forEach { it(pupil) }
}
}
override fun getHandler() = Handler(handlerThread.looper)
abstract fun detect(image: ImageProxy, rotationDegrees: Int): Pupil?
}
And this is my Hough Circles analyzer:
class HoughPupilDetector(listener: PupilDetectionListener? = null): PupilDetector(listener) {
val maxCircles = 5
override fun detect(image: ImageProxy, rotationDegrees: Int): Pupil? {
val bitmap = image.toBitmap(rotationDegrees)
val circles = detectCircles(bitmap)
if(circles.isNotEmpty()) {
return Pupil(circles[0].point, circles[0].r)
} else {
return null
}
}
private fun detectCircles(bitmap: Bitmap): List<Circle> {
// Generate Mat object
val img = Mat()
Utils.bitmapToMat(bitmap, img)
// Detect circles
val cannyUpperThreshold = 100.0
val minRadius = 10
val maxRadius = 400
val accumulator = 100.0
val circles = Mat()
Imgproc.cvtColor(img, img, Imgproc.COLOR_RGB2GRAY)
Imgproc.GaussianBlur(img, img, org.opencv.core.Size(3.0, 3.0), 1.0)
Imgproc.HoughCircles(img, circles, Imgproc.CV_HOUGH_GRADIENT,
2.0, 2.0 / 8.0, cannyUpperThreshold, accumulator,
minRadius, maxRadius)
Imgproc.cvtColor(img, img, Imgproc.COLOR_GRAY2BGR)
// Convert Mat to list of circles
val result = toCircles(circles)
// Return detection
return result
}
private fun toCircles(circles: Mat): List<Circle>{
if (circles.cols() > 0){
return (0 until circles.cols().coerceAtMost(maxCircles)).map {
val vCircle = circles.get(0, it)
val pt = Point(vCircle[0].toInt(), vCircle[1].toInt())
val radius = Math.round(vCircle[2]).toInt()
// return circle
Circle(pt, radius)
}
} else {
return emptyList()
}
}
}
I updated CameraX dependencies from alpha01 to alpha05 and glitches stopped happening.

Can I record video with CameraX (Android Jetpack)?

Google has released the new CameraX library as part of Jetpack. It looks great for taking pictures, but my use case also require making video's. I tried googling for that, but couldn't find anything.
So, is it possible to record videos with the CameraX Jetpack library?
Yes, we can record video using CameraX. I have tried to implement myself with help of Github demo for CameraX. Please refer below code may it helps you.
Config for Video in CameraX:
val videoCaptureConfig = VideoCaptureConfig.Builder().apply {
setLensFacing(lensFacing)
setTargetAspectRatio(screenAspectRatio)
setTargetRotation(viewFinder.display.rotation)
}.build()
videoCapture = VideoCapture(videoCaptureConfig)
CameraX.bindToLifecycle(this, preview, imageCapture, videoCapture)
To Start video recording:
videoCapture?.startRecording(videoFile, object : VideoCapture.OnVideoSavedListener {
override fun onVideoSaved(file: File?) {
Log.i(javaClass.simpleName, "Video File : $file")
}
override fun onError(useCaseError: VideoCapture.UseCaseError?, message: String?, cause: Throwable?) {
Log.i(javaClass.simpleName, "Video Error: $message")
}
})
To Stop video recording:
videoCapture?.stopRecording()
Same above I have mentioned in Github issue comment: https://github.com/android/camera/issues/2#issuecomment-490773932
Notes: There may be different in code to implementation of video recording using CameraX. Because this above code was developed by me without any other reference rather than Github Demo.
Please check important comment of Oscar Wahltinez on this answer as of 14 May 2019
This is my solution
//Versions in Gradle
def camerax_version = "1.0.0-beta06"
def camera_extensions = "1.0.0-alpha13"
private lateinit var videoCapture: VideoCapture
private lateinit var viewFinder: PreviewView
private lateinit var outputDirectory: File
private var lensFacing: Int = CameraSelector.LENS_FACING_FRONT
private val executor = Executors.newSingleThreadExecutor()
private var isRecording = false
private var camera: Camera? = null
private lateinit var cameraProviderFuture: ListenableFuture<ProcessCameraProvider>
//onCreate
viewFinder = preview_video_view
runWithPermissions(*permissions) {
startCamera(view.context)
initClicks()
}
#SuppressLint("RestrictedApi", "UnsafeExperimentalUsageError")
private fun startCamera(context: Context) {
outputDirectory = getOutputDirectory(context)
cameraProviderFuture = ProcessCameraProvider.getInstance(context)
val cameraSelector = CameraSelector.Builder().requireLensFacing(lensFacing).build()
// Create a configuration object for the video use case
val videoCaptureConfig = VideoCaptureConfig.Builder().apply {
setTargetRotation(viewFinder.display.rotation)
setCameraSelector(cameraSelector)
}
//CameraX.initialize(context, this.cameraXConfig)
videoCapture = VideoCapture(videoCaptureConfig.useCaseConfig)
val preview: Preview = Preview.Builder().apply {
setTargetAspectRatio(AspectRatio.RATIO_16_9)
setTargetRotation(viewFinder.display.rotation)
}.build()
preview.setSurfaceProvider(viewFinder.createSurfaceProvider())
cameraProviderFuture.addListener(Runnable {
val cameraProvider = cameraProviderFuture.get()
camera = cameraProvider.bindToLifecycle(
viewLifecycleOwner,
cameraSelector,
preview,
videoCapture
)
}, ContextCompat.getMainExecutor(context))
}
#SuppressLint("RestrictedApi")
private fun startRecording() {
val file = createFile(
outputDirectory,
FILENAME,
VIDEO_EXTENSION
)
videoCapture.startRecording(
file,
executor,
object : VideoCapture.OnVideoSavedCallback {
override fun onVideoSaved(file: File) {
Handler(Looper.getMainLooper()).post {
showMessage(file.name + " is saved")
}
}
override fun onError(videoCaptureError: Int, message: String, cause: Throwable?) {
Handler(Looper.getMainLooper()).post {
showMessage(videoCaptureError.toString() + " " + message)
}
}
}
)
}
#SuppressLint("RestrictedApi")
private fun stopRecording() {
videoCapture.stopRecording()
}
override fun getCameraXConfig(): CameraXConfig {
return Camera2Config.defaultConfig()
}
companion object {
private const val FILENAME = "yyyy_MM_dd_HH_mm_ss"
private const val VIDEO_EXTENSION = ".mp4"
private val permissions = arrayOf(
Manifest.permission.WRITE_EXTERNAL_STORAGE,
Manifest.permission.READ_EXTERNAL_STORAGE,
Manifest.permission.CAMERA,
Manifest.permission.RECORD_AUDIO
)
fun getOutputDirectory(context: Context): File {
val appContext = context.applicationContext
val mediaDir = appContext.externalMediaDirs.firstOrNull()?.let {
File(it, appContext.resources.getString(R.string.app_name)).apply { mkdirs() }
}
return if (mediaDir != null && mediaDir.exists()) mediaDir else appContext.filesDir
}
fun createFile(baseFolder: File, format: String, extension: String) =
File(baseFolder, SimpleDateFormat(format, Locale.US)
.format(System.currentTimeMillis()) + extension)
}
Updating on Patel Pinkal's answer.
Having beta released, we can't use VideoCaptureConfig.Builder() anymore, instead you go with something like this:
videoCapture = VideoCapture.Builder().apply {
// init config here
}.build()
As of April 2021
val videoCapture = VideoCapture.Builder().build()
val outputDirectory = getOutputDirectory()
fun getOutputDirectory(): File {
val mediaDir = externalMediaDirs.firstOrNull()?.let {
File(it, resources.getString(R.string.app_name)).apply { mkdirs() } }
return if (mediaDir != null && mediaDir.exists())
mediaDir else filesDir
}
#SuppressLint("RestrictedApi")
private fun startRecording() {
val videoFile = File(
outputDirectory,
SimpleDateFormat("yyyy-MM-dd-HH-mm-ss-SSS", Locale.US
).format(System.currentTimeMillis()) + ".mp4")
val outputOptions = VideoCapture.OutputFileOptions.Builder(videoFile).build()
videoCapture?.startRecording(outputOptions, ContextCompat.getMainExecutor(this), object: VideoCapture.OnVideoSavedCallback {
override fun onError(videoCaptureError: Int, message: String, cause: Throwable?) {
Log.e(TAG, "Video capture failed: $message")
}
override fun onVideoSaved(outputFileResults: VideoCapture.OutputFileResults) {
val savedUri = Uri.fromFile(videoFile)
val msg = "Video capture succeeded: $savedUri"
Toast.makeText(baseContext, msg, Toast.LENGTH_SHORT).show()
Log.d(TAG, msg)
}
})
}
#SuppressLint("RestrictedApi")
private fun stopRecording() {
videoCapture?.stopRecording()
}
Please note that this API is still restricted and is still subject to change.
CameraX VideoCapture use case is implemented in camera-video, released since 1.1.0-alpha30. Refer to the following:
the official documentation
the reference doc
sample
youtube video(current CameraX status)
for details.
Regarding Sergei's answer, videoCapture.startRecording() recieves VideoCapture.OutputFileOptions instead of file for camerax_version = '1.0.0-rc01', so it should be used as:
videoCapture.startRecording(
VideoCapture.OutputFileOptions.Builder(file).build(),
executor,
object : VideoCapture.OnVideoSavedCallback {
override fun onVideoSaved(outputFileResults: VideoCapture.OutputFileResults) {
TODO("Not yet implemented")
}
override fun onError(videoCaptureError: Int, message: String, cause: Throwable?) {
TODO("Not yet implemented")
}
}

Categories

Resources