How to zoom using gms.vision.CameraSource - android

I am using CameraSource to decode a barcode for my mobile app, everything is working perfectly but I would like to zoom the video from my camera.
I have been reading the documentation of CameraSource but i can't find anything about zooming.
I am wondering if anyone knows how to apply zoom using google play api CameraSource, if it's not possible I will do without it.
Here is my activity code :
cameraView = findViewById(R.id.surfaceView)
val detector = BarcodeDetector.Builder(applicationContext)
.setBarcodeFormats(Barcode.EAN_13 or Barcode.EAN_8 or Barcode.UPC_A or Barcode.UPC_E)
.build()
cameraSource = CameraSource.Builder(this, detector)
.setAutoFocusEnabled(true)
.build()
cameraView.holder.addCallback(object : SurfaceHolder.Callback {
override fun surfaceCreated(holder: SurfaceHolder) {
try {
cameraSource.start(surfaceView.holder)
} catch (ie: Exception) {
Log.e("CAMERA SOURCE", ie.message)
}
}
override fun surfaceChanged(
holder: SurfaceHolder,
format: Int,
width: Int,
height: Int
) {
}
override fun surfaceDestroyed(holder: SurfaceHolder) {
cameraSource.stop()
}
})
detector.setProcessor(object : Detector.Processor<Barcode> {
override fun release() {}
override fun receiveDetections(detections: Detections<Barcode>) {
val barcodes = detections.detectedItems
if (barcodes.size() != 0) {
detector.release()
val resultIntent = Intent(this#BarcodeActivity, AddFoodActivity::class.java)
resultIntent.putExtra("EXTRA_BARCODE", barcodes.valueAt(0).rawValue)
startActivity(resultIntent)
finish()
}
}
})

Related

Can we do Speech Recognition and Video Capture in Android using camerax simultaneously?

I want to implement an app that can convert text to speech and record videos (with audio) simultaneously.
But when I am calling both function either one of them working (the recent one that has been called). Can anyone suggest some ways to implement these two together.
`
speechRecognizer = SpeechRecognizer.createSpeechRecognizer(activity)
val speechRecognizerIntent = Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH)
speechRecognizerIntent.putExtra(
RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM,
)
speechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_SPEECH_INPUT_COMPLETE_SILENCE_LENGTH_MILLIS,10000)
// speechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_SPEECH_INPUT_MINIMUM_LENGTH_MILLIS,30000)
speechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE, Locale.getDefault())
speechRecognizerIntent.putExtra("android.speech.extra.GET_AUDIO_FORMAT", "audio/MP3")
speechRecognizerIntent.putExtra("android.speech.extra.GET_AUDIO", true)
speechRecognizer.setRecognitionListener(object : RecognitionListener {
override fun onReadyForSpeech(bundle: Bundle?) {
speechRecognizer.startListening(speechRecognizerIntent)
}
override fun onBeginningOfSpeech() {}
override fun onRmsChanged(v: Float) {}
override fun onBufferReceived(bytes: ByteArray?) {}
override fun onEndOfSpeech() {
// changing the color of our mic icon to
// gray to indicate it is not listening
// #FF6D6A6A
}
override fun onError(i: Int) {}
override fun onResults(bundle: Bundle) {
}
override fun onPartialResults(bundle: Bundle) {
val result = bundle.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION)
if (result != null) {
for (i in 0 until result.size){
text.add(result[0])
Log.d("Record",result[0])
//binding.tvtext.text = binding.tvtext.text.toString() + result[0]
}
}
}
override fun onEvent(i: Int, bundle: Bundle?) {}
})
speechRecognizer.startListening(speechRecognizerIntent)
}
`

Playing my hls stream is freezing - why so?

Hello I'm try to play my hls stream http://81.25.234.43:8083/tv/1plus1HD/playlist.m3u8?wmsAuthSign=c2VydmVyX3RpbWU9Mi8xNi8yMDIxIDI6MTE6MjcgUE0maGFzaF92YWx1ZT1GYjhMdlVJdUh1OHVsQ05hSVFTWjBBPT0mdmFsaWRtaW51dGVzPTYwNDgwMCZpZD0zMQ==
via LibVLC android
api "org.videolan.android:libvlc-all:3.3.0-eap17"
but video is always freezing while playing and i got in logs
libvlc video output: picture is too late to be displayed (missing 53 ms)
My codes
class VlcMediaPlayer(context: Context) : IVLCVout.Callback {
private val libVLCFactory = FactoryManager.getFactory(ILibVLCFactory.factoryId) as ILibVLCFactory
private val factory = FactoryManager.getFactory(IMediaFactory.factoryId) as IMediaFactory
private val options = ArrayList<String>().apply {
add("-vvv");
add("--avcodec-dr");
add("--clock-jitter=1500");
add("--live-caching=1500");
add("--network-caching=1500");
add("--file-caching=3000");
add("--no-drop-late-frames");
add("--no-skip-frames");
add("--no-sout-smem-time-sync");
add("--sout-mp4-faststart");
add("--sout-x264-partitions=fast");
add("--adaptive-logic=nearoptimal");
add("--adaptive-use-access");
add("--avcodec-threads=10");
add("--sout-x264-psy");
add("--aout=opensles");
add("--demuxdump-append");
add("--avcodec-hw=d3d11va");
}
private val sLibVLC = libVLCFactory.getFromOptions(context, VLCOptions.libOptions)
private val mediaPlayer = MediaPlayer(sLibVLC).apply {
vlcVout.addCallback(this#VlcMediaPlayer)
}
override fun onSurfacesCreated(vlcVout: IVLCVout?) {
}
override fun onSurfacesDestroyed(vlcVout: IVLCVout?) {
}
#MainThread
fun setVideoScale(scale: Float) {
mediaPlayer.scale = scale
}
fun setVideoAspectRatio(aspect: String?) {
mediaPlayer.aspectRatio = aspect
}
#MainThread
fun setSurface(surface: Surface, holder: SurfaceHolder?) {
// mediaPlayer.stop()
//mediaPlayer.release()
mediaPlayer.vlcVout.apply {
setVideoSurface(surface, holder)
attachViews(null)
}
//release()
}
fun setWindowsSize(width: Int, height: Int) {
mediaPlayer.vlcVout.setWindowSize(width, height)
}
fun setMedia(uri: Uri) {
val media = factory.getFromUri(sLibVLC, uri)
options.forEach {
media.addOption(it)
}
mediaPlayer.media = media
}
fun play() = mediaPlayer.play()
fun stop() = mediaPlayer.stop()
fun release() {
mediaPlayer.release()
}
}

Stop mobile vision api after first detection

I'm trying to detect a QR code with the google mobile vision api.
The problem is that after detecting a QR code, the api calls continiously the "receiveDetections" function as long as the QR code is visible to the camera.
I need to stop after the first detection and send the result to my server to validate this code.
How can I stop the process after the first detection?
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.qrcode_scanner)
detector = BarcodeDetector.Builder(this).setBarcodeFormats(Barcode.ALL_FORMATS).build()
detector.setProcessor(object: Detector.Processor<Barcode> {
override fun release() {
override fun receiveDetections(detections: Detector.Detections<Barcode>?) {
val barcodes = detections?.detectedItems
if(barcodes!!.size()>0) {
Log.e("qrcode",barcodes.valueAt(0).displayValue)
sendQRCodeToServer(url,barcodes.valueAt(0).displayValue)
}
}
})
cameraSource = CameraSource.Builder(this,detector).setRequestedPreviewSize(1920,1080).setRequestedFps(25f).setAutoFocusEnabled(true).build()
svBarcode.holder.addCallback(object: SurfaceHolder.Callback2 {
override fun surfaceRedrawNeeded(holder: SurfaceHolder?) {
}
override fun surfaceChanged(holder: SurfaceHolder?, format: Int, width: Int, height: Int) {
}
override fun surfaceDestroyed(holder: SurfaceHolder?) {
cameraSource.stop()
}
override fun surfaceCreated(holder: SurfaceHolder?) {
if(ContextCompat.checkSelfPermission(this#Scanner,
Manifest.permission.CAMERA) == PackageManager.PERMISSION_GRANTED) {
cameraSource.start(holder)
startAnimation()
} else ActivityCompat.requestPermissions(this#Scanner, arrayOf(Manifest.permission.CAMERA),123)
}
})
}
}
override fun onDestroy() {
super.onDestroy()
detector.release()
cameraSource.stop()
cameraSource.release()
}
you can create function to stop camera,ex
private fun stopCamera(){
cameraSource.stop()
}
detector = BarcodeDetector.Builder(this).setBarcodeFormats(Barcode.ALL_FORMATS).build()
detector.setProcessor(object: Detector.Processor<Barcode> {
override fun release() {
override fun receiveDetections(detections: Detector.Detections<Barcode>?) {
val barcodes = detections?.detectedItems
if(barcodes!!.size()>0) {
Log.e("qrcode",barcodes.valueAt(0).displayValue)
sendQRCodeToServer(url,barcodes.valueAt(0).displayValue)
//add this to stop camera
stopCamera()
}
}
})
edit:
create variable for flag detection at first like
//to flag first detection
private var firstDetection=true
override fun onCreate(savedInstanceState: Bundle?) {
///.......
detector = BarcodeDetector.Builder(this).setBarcodeFormats(Barcode.ALL_FORMATS).build()
detector.setProcessor(object: Detector.Processor<Barcode> {
override fun release() {
}
override fun receiveDetections(detections: Detector.Detections<Barcode>?) {
val barcodes = detections?.detectedItems
//check firstDetection
if(barcodes!!.size()>0 && firstDetection) {
sendQRCodeToServer(url,barcodes.valueAt(0).displayValue)
//set firstDetection
firstDetection=false
}
}
})
}
}
hope this help....

takePicture require executor on CameraX (1.0.0-alpha06)

After update from
androidx.camera:camera-core:1.0.0-alpha03
to
androidx.camera:camera-core:1.0.0-alpha06
signatures of methods setTargetAspectRatio (in ImageCaptureConfig.Builder) and takePicture (in ImageCapture) have been changed.
Official documentation and info in web doesn't show how to use new methods (how to specify executor).
Code which broken after update:
...
val captureConfig = ImageCaptureConfig.Builder()
.setTargetAspectRatioCustom(Rational(1, 1)) //this method changed
.setFlashMode(flashMode)
.setLensFacing(lensFacing)
.build()
val capture = ImageCapture(captureConfig)
binding.takeAPhoto.setOnClickListener {
...
val imageFile = createTempFile(System.currentTimeMillis().toString(), ".jpg")
capture.takePicture(imageFile, object : ImageCapture.OnImageSavedListener { //this method also changed
override fun onImageSaved(file: File) {
...
}
override fun onError(useCaseError: ImageCapture.UseCaseError, message: String, cause: Throwable?) {
...
})
}
}
Does anyone have (or know where to find) example of how to use new methods?
Thanks in advance
The official Google Codelabs which obviously have been updated recently use: Executors.newSingleThreadExecutor()
Reference: https://codelabs.developers.google.com/codelabs/camerax-getting-started/#4
Edit: Since #kos's response also makes sense to me, I've added these two official Android docs references:
https://developer.android.com/reference/java/util/concurrent/Executors.html#newSingleThreadExecutor()
https://developer.android.com/reference/java/util/concurrent/Executors.html#newCachedThreadPool()
This way every reader of this topic can make up his/her own mind with respect to executors.
FURTHER EDIT: There are crucial API changes since 1.0.0-alpha07 so I studied some of the docs. There's a GitHub sample showing executor retrieval like so mainExecutor = ContextCompat.getMainExecutor(requireContext())(Source)
If some of you already implemented CameraX and it works fine, I'd definitely wait for the beta release as recommended by Android's release notes
I faced same thing as you are facing. I resolved it from my side.
class MainActivity : AppCompatActivity(), Executor {
private var right: Int = 0
private var bottom: Int = 0
private var left: Int = 0
private var top: Int = 0
private lateinit var preview: Preview
private val REQUEST_CODE_PERMISSIONS = 10
private val REQUIRED_PERMISSIONS = arrayOf(Manifest.permission.CAMERA)
private lateinit var imageCapture: ImageCapture
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
if (allPermissionsGranted()) {
viewFinder.post { startCamera() }
} else {
ActivityCompat.requestPermissions(this, REQUIRED_PERMISSIONS, REQUEST_CODE_PERMISSIONS)
}
viewFinder.addOnLayoutChangeListener { _, _, _, _, _, _, _, _, _ ->
updateTransform()
}
buttonPlus.setOnClickListener {
if (right < 100) {
right += 100
bottom += 100
left += 100
top += 100
val my = Rect(left, top, right, bottom)
preview.zoom(my)
}
}
buttonMinus.setOnClickListener {
if (right > 0) {
right -= 100
bottom -= 100
left -= 100
top -= 100
val my = Rect(left, top, right, bottom)
preview.zoom(my)
}
}
}
#SuppressLint("RestrictedApi")
private fun startCamera() {
val metrics = DisplayMetrics().also { viewFinder.display.getRealMetrics(it) }
val screenAspectRatio = Rational(metrics.widthPixels, metrics.heightPixels)
val previewConfig = PreviewConfig.Builder().apply {
setTargetAspectRatioCustom(screenAspectRatio)
setTargetRotation(viewFinder.display.rotation)
}.build()
preview = Preview(previewConfig)
preview.setOnPreviewOutputUpdateListener {
val parent = viewFinder.parent as ViewGroup
parent.removeView(viewFinder)
parent.addView(viewFinder, 0)
viewFinder.surfaceTexture = it.surfaceTexture
updateTransform()
}
CameraX.bindToLifecycle(this, preview)
captureImage()
}
#SuppressLint("RestrictedApi")
private fun captureImage() {
val imageCaptureConfig = ImageCaptureConfig.Builder()
.apply {
setTargetAspectRatioCustom(Rational(1, 1))
setCaptureMode(ImageCapture.CaptureMode.MIN_LATENCY)
}.build()
imageCapture = ImageCapture(imageCaptureConfig)
CameraX.bindToLifecycle(this, imageCapture)
capture_button.setOnClickListener {
val file = File(this.externalMediaDirs.first(), "${System.currentTimeMillis()}.jpg")
imageCapture.takePicture(file, this, object : ImageCapture.OnImageSavedListener {
override fun onImageSaved(file: File) {
val msg = "Photo capture succeeded: ${file.absolutePath}"
Log.d("CameraXApp", msg)
}
override fun onError(imageCaptureError: ImageCapture.ImageCaptureError, message: String, cause: Throwable?) {
val msg = "Photo capture failed: $message"
Log.e("CameraXApp", msg)
cause?.printStackTrace()
}
})
}
}
override fun execute(command: Runnable) {
command.run()
}
private fun updateTransform() {
val matrix = Matrix()
val centerX = viewFinder.width / 2f
val centerY = viewFinder.height / 2f
val rotationDegrees = when (viewFinder.display.rotation) {
Surface.ROTATION_0 -> 0
Surface.ROTATION_90 -> 90
Surface.ROTATION_180 -> 180
Surface.ROTATION_270 -> 270
else -> return
}
matrix.postRotate(-rotationDegrees.toFloat(), centerX, centerY)
viewFinder.setTransform(matrix)
}
override fun onRequestPermissionsResult(requestCode: Int, permissions: Array<String>, grantResults: IntArray) {
if (requestCode == REQUEST_CODE_PERMISSIONS) {
if (allPermissionsGranted()) {
viewFinder.post { startCamera() }
} else {
Toast.makeText(this, "Permissions not granted by the user.", Toast.LENGTH_SHORT).show()
finish()
}
}
}
private fun allPermissionsGranted() = REQUIRED_PERMISSIONS.all {
ContextCompat.checkSelfPermission(baseContext, it) == PackageManager.PERMISSION_GRANTED
}
override fun onDestroy() {
super.onDestroy()
imageCapture.let {
CameraX.unbind(imageCapture)
}
}
}
And the output is (As I print log in onImageSaved method)
Photo capture succeeded: /storage/emulated/0/Android/media/com.akshay.cameraxzoominoutdemo/1571052301192.jpg
It's working fine for me, try out this.
You can do it like this.
imageCapture.takePicture(file, { it.run() }, object : ImageCapture.OnImageSavedListener {
override fun onImageSaved(file: File) {}
override fun onError(useCaseError: ImageCapture.ImageCaptureError, message: String, cause: Throwable?) {}
})
Here is a change log for the changes in alpha06 : https://developer.android.com/jetpack/androidx/releases/camera
setTargetAspectRatio() method now takes AspectRatio enum with 4_3 or 16_9 value.
takePicture() method takes (file, metadata, executor, imageSavedListener) // could use executor as per your case/need. example is val executor = Executors.newSingleThreadExecutor()
instead of useCase.onPreviewOutputUpdateListener = use useCase.setOnPreviewOutputUpdateListener()
FYI : CameraX will be in Beta in Dec 2019
Inside your click listener call this function/method :
private fun saveImage(){
val file = File(this.externalMediaDirs.first(), "${System.currentTimeMillis()}.jpg")
val fileB = ImageCapture.OutputFileOptions.Builder(file).build()
imageCapture.takePicture(fileB, ContextCompat.getMainExecutor(this), object : ImageCapture.OnImageSavedCallback {
override fun onImageSaved(fileB: ImageCapture.OutputFileResults) {
val msg = "${fileB.savedUri} - ${file.absolutePath} - ${file.toURI()}"
}
override fun onError(imageCaptureError: ImageCaptureException) {
val msg = "Photo capture failed: ${imageCaptureError.toString()}"
}
})
}
And the msg in onImageSaved will contain something like this :
null - /storage/emulated/0/Android/media/com.mua.camx/1607589430984.jpg - file:/storage/emulated/0/Android/media/com.mua.camx/1607589430984.jpg
You have to only run the command as below.
#Override
public void execute(Runnable command) {
command.run(); // <-- THIS IS NEEDED
}
CameraX provides with built-in executors and take picture can be implemented as below:
imgCaptureButton.setOnClickListener(new View.OnClickListener() {
#Override
#SuppressLint("RestrictedApi")
public void onClick(View v) {
imgCap.takePicture(CameraXExecutors.mainThreadExecutor(),new ImageCapture.OnImageCapturedListener() {
#Override
public void onCaptureSuccess(ImageProxy image, int rotationDegrees) {
super.onCaptureSuccess(image, rotationDegrees);
// Play with the Image here.
}
});
}
});
It does not use File to save the image, instead saves the image as a buffer in the memory.

CameraX Analyzer glitching preview

I am using CameraX Analyzer use-case to detect circles in an image using OpenCV Hough Circles, but even though I believe I am using a separate thread to do this analysis, the camera preview gets really slow sometimes when it gets too many detected circles.
I get that my implementation of the detector isn't the most efficient, but shouldn't this processing time be reflected only in the analysis rate, and not on the preview?
Got great part of the code from crysxd CameraX-Object-Tracking.
This is my MainActivity:
class MainActivity : AppCompatActivity() {
private lateinit var overlayTextureView: DetectionOverlayView
private val camera
get() = supportFragmentManager.findFragmentById(R.id.cameraFragment) as CameraFragment
override fun onCreate(savedInstanceState: Bundle?) {
OpenCVLoader.initDebug()
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
if (Timber.treeCount() == 0) {
Timber.plant(Timber.DebugTree())
}
overlayTextureView = findViewById(R.id.detectionOverlayView)
camera.imageAnalyzer = ViewModelProviders.of(this).get(HoughPupilDetector::class.java)
}
}
This is my CameraFragment initialization:
open class CameraFragment : Fragment() {
var cameraRunning = false
private set
var imageAnalyzer: ThreadedImageAnalyzer? = null
set(value) {
field = value
if (cameraRunning) {
startCamera()
}
}
override fun onCreateView(inflater: LayoutInflater, container: ViewGroup?, savedInstanceState: Bundle?): View =
inflater.inflate(R.layout.fragment_camera, container, false)
override fun onViewCreated(view: View, savedInstanceState: Bundle?) {
super.onViewCreated(view, savedInstanceState)
CameraPermissionHelper().requestCameraPermission(childFragmentManager) {
if (it) {
startCamera()
} else {
activity?.finish()
}
}
}
override fun onDestroyView() {
super.onDestroyView()
if (cameraRunning) {
CameraX.unbindAll()
cameraRunning = false
Timber.i("Stopping camera")
}
}
private fun startCamera() {
preview.post {
try {
val usesCases = mutableListOf<UseCase>()
// Make sure that there are no other use cases bound to CameraX
CameraX.unbindAll()
// Create configuration object for the viewfinder use case
val previewConfig = onCreatePreivewConfigBuilder().build()
usesCases.add(AutoFitPreviewBuilder.build(previewConfig, preview))
// Setup image analysis pipeline that computes average pixel luminance in real time
if (imageAnalyzer != null) {
val analyzerConfig = onCreateAnalyzerConfigBuilder().build()
usesCases.add(ImageAnalysis(analyzerConfig).apply {
analyzer = imageAnalyzer
})
}
// Bind use cases to lifecycle
CameraX.bindToLifecycle(this, *usesCases.toTypedArray())
cameraRunning = true
Timber.i("Started camera with useCases=$usesCases")
} catch (e: Exception) {
Timber.e(e)
AlertDialog.Builder(context)
.setMessage(getString(R.string.camera_error))
.setPositiveButton(android.R.string.ok) { _, _ ->
activity?.finish()
}
.create()
}
}
}
#Suppress("MemberVisibilityCanBePrivate")
protected open fun onCreateAnalyzerConfigBuilder() = ImageAnalysisConfig.Builder().apply {
// Use a worker thread for image analysis to prevent preview glitches
setCallbackHandler(imageAnalyzer!!.getHandler())
// In our analysis, we care more about the latest image than analyzing *every* image
setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE)
setTargetAspectRatio(Rational(1, 1))
setTargetResolution(Size(preview.width, preview.height))
}
#Suppress("MemberVisibilityCanBePrivate")
protected open fun onCreatePreivewConfigBuilder() = PreviewConfig.Builder().apply {
setTargetAspectRatio(Rational(1, 1))
setTargetResolution(Size(preview.width, preview.height))
}
}
This is my analyzer interface initialization:
abstract class PupilDetector(listener: PupilDetectionListener? = null) : ViewModel(), ThreadedImageAnalyzer {
private val listeners = ArrayList<PupilDetectionListener>().apply { listener?.let { add(it) } }
private val isBusy = AtomicBoolean(false)
private val handlerThread = HandlerThread("PupilDetector").apply { start() }
fun addListener(listener: PupilDetectionListener) = listeners.add(listener)
override fun analyze(image: ImageProxy, rotationDegrees: Int) {
if (isBusy.compareAndSet(false, true)) {
Timber.d("Running analysis...")
val pupil = detect(image, rotationDegrees)
Timber.d("Analysis done.")
isBusy.set(false)
// listeners.forEach { it(pupil) }
}
}
override fun getHandler() = Handler(handlerThread.looper)
abstract fun detect(image: ImageProxy, rotationDegrees: Int): Pupil?
}
And this is my Hough Circles analyzer:
class HoughPupilDetector(listener: PupilDetectionListener? = null): PupilDetector(listener) {
val maxCircles = 5
override fun detect(image: ImageProxy, rotationDegrees: Int): Pupil? {
val bitmap = image.toBitmap(rotationDegrees)
val circles = detectCircles(bitmap)
if(circles.isNotEmpty()) {
return Pupil(circles[0].point, circles[0].r)
} else {
return null
}
}
private fun detectCircles(bitmap: Bitmap): List<Circle> {
// Generate Mat object
val img = Mat()
Utils.bitmapToMat(bitmap, img)
// Detect circles
val cannyUpperThreshold = 100.0
val minRadius = 10
val maxRadius = 400
val accumulator = 100.0
val circles = Mat()
Imgproc.cvtColor(img, img, Imgproc.COLOR_RGB2GRAY)
Imgproc.GaussianBlur(img, img, org.opencv.core.Size(3.0, 3.0), 1.0)
Imgproc.HoughCircles(img, circles, Imgproc.CV_HOUGH_GRADIENT,
2.0, 2.0 / 8.0, cannyUpperThreshold, accumulator,
minRadius, maxRadius)
Imgproc.cvtColor(img, img, Imgproc.COLOR_GRAY2BGR)
// Convert Mat to list of circles
val result = toCircles(circles)
// Return detection
return result
}
private fun toCircles(circles: Mat): List<Circle>{
if (circles.cols() > 0){
return (0 until circles.cols().coerceAtMost(maxCircles)).map {
val vCircle = circles.get(0, it)
val pt = Point(vCircle[0].toInt(), vCircle[1].toInt())
val radius = Math.round(vCircle[2]).toInt()
// return circle
Circle(pt, radius)
}
} else {
return emptyList()
}
}
}
I updated CameraX dependencies from alpha01 to alpha05 and glitches stopped happening.

Categories

Resources