I am pretty new to Android so I hope I can get some directions here.
I have a 360 camera running on Android 7.0. It includes a SDK to get access to the live stitched images. In this SDK there is a function to set a Surface where the output from the stitched images will be directed to.
This is the function provided by the SDK:
public static void SDK.setSurface(Surface inputSurface)
I want to grab an image from that surface every second.
How do I create the right kind of Surface? And how do I grab images from this Surface?
Any help is highly appreciated!
Since I found the answer I might share it here too.
I have create a class which works like a charm. This is what I used:
class Capture: ImageReader.OnImageAvailableListener{
private var mImageReader: ImageReader? = null
private var mThreadHandler: HandlerThread? = null
fun start() {
if (!init) {
if (mImageReader != null) {
mImageReader?.close()
mImageReader = null
}
if (mThreadHandler != null) {
mThreadHandler?.quitSafely()
mThreadHandler = null
}
mThreadHandler = HandlerThread("prev")
mThreadHandler?.start()
mImageReader = ImageReader.newInstance(width, height, PixelFormat.RGBA_8888, 5)
mImageReader?.setOnImageAvailableListener(this, Handler(mThreadHandler?.getLooper()))
SDK.setSurface(if (mImageReader == null) null else mImageReader?.getSurface())
init = true
}
}
override fun onImageAvailable(reader: ImageReader) {
val image = reader.acquireLatestImage()
val height = image.height
val stride = image.planes[0].rowStride / image.planes[0].pixelStride
captureCallback(
stride,
height,
image.timestamp,
image.planes[0].buffer
)
image.close()
}
fun stop() {
if (init) {
if (mImageReader != null) {
mImageReader!!.close()
mImageReader = null
}
if (mThreadHandler != null) {
mThreadHandler!!.quitSafely()
mThreadHandler = null
}
SDK.setSurface(null)
}
init = false
}
fun captureCallback(width: Int, Height: Int, timestamp: Long, data: ByteBuffer) {
// do something with data
}
}
`
Related
I'm developing an Android app for recognizing augmented images and displaying videos directly over them.
I have implemented everything and it's working well but the only issue is that the anchorNode model that gets attached to the augmented image anchor isn't very stable and doesn't remain exactly in the image's frame.
I tried to follow some best practices for improving tracking and got better results but there is only one point that I'm not understanding which is "If your image will never move from its initial position (for example, a poster affixed to a wall), you can attach a global anchor to the image to increase tracking stability.", could someone help explain how to that
, please? and It would be great if I can get some tips for improving tracking evermore.
Thanks in advance.
Here is the code I'm using for tracking and creating video anchors.
override fun onSessionConfiguration(session: Session?, config: Config?) {
Log.d("ttt","onSessionConfiguration")
this.session = session
// Disable plane detection
config!!.planeFindingMode = Config.PlaneFindingMode.DISABLED
Thread().run {
database = AugmentedImageDatabase(session).also {
for(fileName in requireContext().fileList().filter { it.contains("image.jpg") }){
val file = File(requireContext().filesDir,fileName)
if(file.exists()){
it.addImage(fileName.toString().split("-")[0],BitmapFactory.decodeFile(file.absolutePath),0.25f)
}
}
}
config.setAugmentedImageDatabase(database)
}
arFragment?.setOnAugmentedImageUpdateListener { it->
run {
onAugmentedImageTrackingUpdate(it)
}
}
}
private fun onAugmentedImageTrackingUpdate(augmentedImage: AugmentedImage) {
attachVideo(augmentedImage)
}
private fun attachVideo(augmentedImage: AugmentedImage){
if (augmentedImage.trackingState == TrackingState.TRACKING
&& augmentedImage.trackingMethod == AugmentedImage.TrackingMethod.FULL_TRACKING){
if (!matrixDetected) {
matrixDetected = true
lastTracketedImageName = augmentedImage.name
anchorNode = AnchorNode(augmentedImage.createAnchor(augmentedImage.centerPose))
anchorNode!!.isSmoothed = true
texture = ExternalTexture().also {
it.surfaceTexture.setOnFrameAvailableListener {
texture!!.surfaceTexture.setOnFrameAvailableListener(null)
val model = ShapeFactory.makeCube(
Vector3(augmentedImage.extentX,0f,augmentedImage.extentZ),
Vector3(0f,0f,0f),
plainVideoMaterial
)
model.isShadowReceiver = false
model.isShadowCaster = false
val renderable = anchorNode!!.setRenderable(model)
renderable.material!!.setExternalTexture("videoTexture",texture)
fadeInVideo(renderable.material)
}
}
val videoFile = File(requireContext().filesDir,"${augmentedImage.name}-video.mp4")
mediaPlayer = MediaPlayer.create(requireContext(),Uri.fromFile(videoFile)).also {
it.isLooping = true
it.setSurface(texture!!.surface)
it.start()
}
arFragment!!.arSceneView.scene.addChild(anchorNode)
}
}else{
if(matrixDetected){
if(anchorNode != null){
arFragment!!.arSceneView.scene.removeChild(anchorNode!!)
anchorNode = null
}
viewLifecycleOwner.lifecycleScope.launch {
val result = viewModel.endArVideo(deviceID,augmentedImage.name)
if(result.isSuccessful){
Log.d("ttt","stop ar request success")
}else{
Log.d("ttt","stop ar request failed ${result.errorBody()}")
}
}
matrixDetected = false
}
mediaPlayer?.let {
it.setSurface(null)
it.stop()
it.release()
mediaPlayer = null
}
}
}
private fun fadeInVideo(material: Material) {
ValueAnimator.ofFloat(0f, 1f).apply {
duration = 500L
interpolator = LinearInterpolator()
addUpdateListener { v ->
material.setFloat("videoAlpha", v.animatedValue as Float)
}
}.start()
}
I am using MediaProjectionManager for taking screenshots from the ForegroundService. I discovered that the behavior of capturing surface works differently in Android 10 and Android 11.
When I take a screenshot
fun captureBitmap(frame: CropFrames, response: (bitmap: Bitmap) -> Unit){
delayed(50) {
this.frames = frame
projection = mgr!!.getMediaProjection(resultCode, resultData!!)
val cb: MediaProjection.Callback = object : MediaProjection.Callback() {
override fun onStop() {
vdisplay!!.release() //?
response.invoke(latestBitmap!!)
}
}
vdisplay = projection?.createVirtualDisplay(
NAME,
width,
height,
App.densityDpi,
FLAGS,
imageReader.surface,
null,
null
)
projection?.registerCallback(cb, null)
}
}
onImageAvailable triggered
override fun onImageAvailable(reader: ImageReader) {
try {
val image = imageReader.acquireNextImage()
if (image != null) {
val planes = image.planes
val buffer = planes[0].buffer
val pixelStride = planes[0].pixelStride
val rowStride = planes[0].rowStride
val rowPadding = rowStride - pixelStride * width
val bitmapWidth = width + rowPadding / pixelStride
if (latestBitmap == null || latestBitmap!!.width != bitmapWidth || latestBitmap!!.height != height) {
if (latestBitmap != null) {
latestBitmap!!.recycle()
}
latestBitmap = Bitmap.createBitmap(
bitmapWidth,
height, Bitmap.Config.ARGB_8888
)
}
latestBitmap!!.copyPixelsFromBuffer(buffer)
image.close()
handler.parseFrame(frames!!, latestBitmap!!) {
stopCapture()
}
}
} catch (e: Exception) {
e.printStackTrace()
}
}
Then I release it
fun stopCapture() {
if (projection != null) {
projection!!.stop()
vdisplay!!.release()
projection = null
}
}
This flow can be triggered a lot of times per lifecycle, but each call has increased completion time of execution and it looks like short twitches of UI while taking ascreenshot (no main thread calculations). Probably I don't clear something properly? Any suggestions are appreciated. Thanks!
I am trying to capture photos in my app using standard camera app intent (I am NOT interested in using JetpackX or other library to have a viewfinder in my app).
When I had the code in my Fragment like so:
// This is in response to user clicking a button
fun startPhotoTaking() {
val takePictureIntent = Intent(MediaStore.ACTION_IMAGE_CAPTURE)
resultLauncher.launch(takePictureIntent)
}
private var resultLauncher =
registerForActivityResult(ActivityResultContracts.StartActivityForResult()) { result ->
if (result.resultCode == Activity.RESULT_OK) {
val photo = result.data?.extras?.get("data") as? Bitmap
photo?.let {
// ... do whatever
}
}
}
Then the photo Bitmap came back tiny as apparently Android caps intents at 1 Mb , but the orientation was correct.
Since I actually need the original large image, I have modified the code like so:
// This is in response to user clicking a button
fun startPhotoTaking() {
lastUri = getTmpFileUri()
if (lastUri != null) {
resultLauncher.launch(lastUri)
}
}
private fun getTmpFileUri(): Uri {
requireContext().cacheDir.listFiles()?.forEach { it.delete() }
val tmpFile = File
.createTempFile("tmp_image_file", ".jpg", requireContext().cacheDir)
.apply {
createNewFile()
}
return FileProvider.getUriForFile(
MyApplication.instance.applicationContext,
"${BuildConfig.APPLICATION_ID}.provider",
tmpFile
)
}
var lastUri: Uri? = null
private var resultLauncher =
registerForActivityResult(ActivityResultContracts.TakePicture()) { result ->
if (result) {
val photoUri = lastUri
if (photoUri != null) {
val stream = MyApplication.instance.applicationContext.contentResolver
.openInputStream(photoUri)
val photo = BitmapFactory.decodeStream(stream)
stream?.close()
// ... do whatever
// If i try ExifInterface(photoUri.path!!)
}
}
}
Now I do receive the actual large photo, but it is always landscape :(
I tried creating an instance of ExifInterface(photoUri.path) but that throws an exception for me (which I don't quite understand as I am only writing/reading to my own app's cache directory?):
java.io.FileNotFoundException: /cache/tmp_image_file333131952730914647.jpg: open failed: EACCES (Permission denied)
How can I get my photo to retain orientation when saved to file and/or get access to read EXIF parameters so I can rotate it myself?
Update
As a workaround, this did the trick but it's just... ungodly. So am very keen to find better solutions.
val stream = MyApplication.instance.applicationContext.contentResolver
.openInputStream(photoUri)
if (stream != null) {
val tempFile = File.createTempFile("tmp", ".jpg", requireContext().cacheDir)
.apply { createNewFile() }
val fos = FileOutputStream(tempFile)
val buf = ByteArray(8192)
var length: Int
while (stream.read(buf).also { length = it } > 0) {
fos.write(buf, 0, length)
}
fos.close()
val exif = ExifInterface(tempFile.path)
val orientation =
exif.getAttributeInt(ExifInterface.TAG_ORIENTATION, 1)
val matrix = Matrix()
if (orientation == ExifInterface.ORIENTATION_ROTATE_90) {
matrix.postRotate(90f)
} else if (orientation == ExifInterface.ORIENTATION_ROTATE_180) {
matrix.postRotate(180f)
} else if (orientation == ExifInterface.ORIENTATION_ROTATE_270) {
matrix.postRotate(270f)
var photo = BitmapFactory.decodeFile(tempFile.path)
photo = Bitmap.createBitmap(
photo,
0,
0,
photo.width,
photo.height,
matrix,
true
)
}
ExifInterface(photoUri.path)
That isnot an existing path as you have seen.
Better use:
ExifInterface( tmpFile.getAbsolutePath())
You can rotate image to the particular angle like this,
// To rotate image
private fun rotateImage(source: Bitmap, angle: Float): Bitmap? {
val matrix = Matrix()
matrix.postRotate(angle)
return Bitmap.createBitmap(
source, 0, 0, source.width, source.height,
matrix, true
)
}
In your case set angle to 90f to get image in portrait orientation.
My goal is to make Camera2 API to take a new photo and with that photo to have exif data implemented by code, using the ExifInterface. I am stumbled to a small issue though. When I run the app, it runs correctly when I press the capture button and saves the JPEG file that is needed, although when the code tries to add the exif data, it says null. I added everything I need which are the setAttribute() to set up new data and override the data that I need to add and getAttribute() to display it with Log.e() to see the results. But the result on the exif shows that is null. I've made some calculations about the exif data in a separate class to make sure to make it comfortable for me, some people would do on the same class, but made it separately. This is the message in the Logcat:
E/LATITUDE: null
E/LONGITUDE: null
D/Camera2BasicFragment: /storage/emulated/0/Android/data/com.example.camera2apikotlin4/files/pic.jpg
This is the photo and a screenshot of the exif data, literally blank and not saved:
The picture that took with the app
The screenshot that shows with Exif Pilot the exif data
This is where the file variable comes from and where it saves:
private lateinit var file: File
override fun onActivityCreated(savedInstanceState: Bundle?) {
super.onActivityCreated(savedInstanceState)
//val PIC_FILE_NAME = SimpleDateFormat("dd.MM.yyyy-HH:mm:ss", Locale.ENGLISH).format(System.currentTimeMillis()) + ".jpg"
val PIC_FILE_NAME = "pic.jpg"
file = File(requireActivity().getExternalFilesDir(null), PIC_FILE_NAME)
}
Where it's starting the capture process:
#RequiresApi(Build.VERSION_CODES.Q)
private fun process(result: CaptureResult) {
when (state) {
STATE_PREVIEW -> Unit // Do nothing when the camera preview is working normally.
STATE_WAITING_LOCK -> capturePicture(result)
STATE_WAITING_PRECAPTURE -> {
// CONTROL_AE_STATE can be null on some devices
val aeState = result.get(CaptureResult.CONTROL_AE_STATE)
if (aeState == null ||
aeState == CaptureResult.CONTROL_AE_STATE_PRECAPTURE ||
aeState == CaptureRequest.CONTROL_AE_STATE_FLASH_REQUIRED) {
state = STATE_WAITING_NON_PRECAPTURE
}
}
STATE_WAITING_NON_PRECAPTURE -> {
// CONTROL_AE_STATE can be null on some devices
val aeState = result.get(CaptureResult.CONTROL_AE_STATE)
if (aeState == null || aeState != CaptureResult.CONTROL_AE_STATE_PRECAPTURE) {
state = STATE_PICTURE_TAKEN
captureStillPicture()
}
}
}
}
#RequiresApi(Build.VERSION_CODES.Q)
private fun capturePicture(result: CaptureResult) {
val afState = result.get(CaptureResult.CONTROL_AF_STATE)
if (afState == null) {
captureStillPicture()
} else if (afState == CaptureResult.CONTROL_AF_STATE_FOCUSED_LOCKED
|| afState == CaptureResult.CONTROL_AF_STATE_NOT_FOCUSED_LOCKED) {
// CONTROL_AE_STATE can be null on some devices
val aeState = result.get(CaptureResult.CONTROL_AE_STATE)
if (aeState == null || aeState == CaptureResult.CONTROL_AE_STATE_CONVERGED) {
state = STATE_PICTURE_TAKEN
captureStillPicture()
} else {
runPrecaptureSequence()
}
}
}
#RequiresApi(Build.VERSION_CODES.Q)
override fun onCaptureProgressed(session: CameraCaptureSession,
request: CaptureRequest,
partialResult: CaptureResult) {
process(partialResult)
}
#RequiresApi(Build.VERSION_CODES.Q)
override fun onCaptureCompleted(session: CameraCaptureSession,
request: CaptureRequest,
result: TotalCaptureResult) {
process(result)
}
}
This is the separated code geoDegree():
package com.example.camera2apikotlin4
import androidx.exifinterface.media.ExifInterface
class geoDegree {
private var valid: Boolean = true
var latitudeFloat: Double = 0.0
var longitudeFloat: Double = 0.0
fun geoDegree(exif: ExifInterface) {
val attrLATITUDE = exif.getAttribute(ExifInterface.TAG_GPS_LATITUDE)
val attrLATITUDE_REF = exif.getAttribute(ExifInterface.TAG_GPS_LATITUDE_REF)
val attrLONGITUDE = exif.getAttribute(ExifInterface.TAG_GPS_LONGITUDE)
val attrLONGITUDE_REF = exif.getAttribute(ExifInterface.TAG_GPS_LONGITUDE_REF)
if (
(attrLATITUDE != null) &&
(attrLATITUDE_REF != null) &&
(attrLONGITUDE != null) &&
(attrLONGITUDE_REF != null))
{
//Telling the code that the validation is true
valid = true
//If the reference of the latitude is equals to the letter N, it will convert it to degrees, else it will be none
if(attrLATITUDE_REF == "N") {
latitudeFloat = convertToDegree(attrLATITUDE)
} else {
0 - convertToDegree(attrLATITUDE)
}
//If the reference of the longitude is equals to the letter E, it will convert it to degrees, else it will be none
if(attrLONGITUDE_REF == "E") {
longitudeFloat = convertToDegree(attrLONGITUDE)
} else {
0 - convertToDegree(attrLONGITUDE)
}
}
}
//The method function that converting the degrees, using the list of strings
private fun convertToDegree(stringDMS: String): Double {
val result: Double?
val DMS: List<String> = stringDMS.split(",")
//Values in degrees
val stringD: List<String> = DMS[0].split("/")
val D0 = stringD[0].toDouble()
val D1 = stringD[1].toDouble()
val FloatD: Double = D0 / D1
//Values in minutes
val stringM = DMS[1].split("/")
val M0 = stringM[0].toDouble()
val M1 = stringM[1].toDouble()
val FloatM: Double = M0 / M1
//Values in seconds
val stringS = DMS[2].split("/")
val S0 = stringS[0].toDouble()
val S1 = stringS[1].toDouble()
val FloatS = S0 / S1
/**Overall results to display on, when combining the Float Degrees
* and calculating with Minutes about 60, Seconds with 3600 and overall to be as Float type
**/
result = (FloatD + (FloatM/60) + (FloatS/3600))
return result
}
fun isValid(): Boolean {
return valid
}
override fun toString(): String {
return ("$latitudeFloat, $longitudeFloat")
}
fun getLatitudeE6(): Int {
return (latitudeFloat.times(1000000)).toInt()
}
fun getLongitudeE6(): Int {
return (longitudeFloat.times(1000000)).toInt()
}
}
This is the main code that is in the imageCapture:
/**
* Capture a still picture. This method should be called when we get a response in
* [.captureCallback] from both [.lockFocus].
*/
#RequiresApi(Build.VERSION_CODES.Q)
private fun captureStillPicture() {
try {
if (activity == null || cameraDevice == null) return
val rotation = requireActivity().windowManager.defaultDisplay.rotation
// This is the CaptureRequest.Builder that we use to take a picture.
val captureBuilder = cameraDevice?.createCaptureRequest(
CameraDevice.TEMPLATE_STILL_CAPTURE)?.apply {
imageReader?.surface?.let { addTarget(it) }
// Sensor orientation is 90 for most devices, or 270 for some devices (eg. Nexus 5X)
// We have to take that into account and rotate JPEG properly.
// For devices with orientation of 90, we return our mapping from ORIENTATIONS.
// For devices with orientation of 270, we need to rotate the JPEG 180 degrees.
set(CaptureRequest.JPEG_ORIENTATION,
(ORIENTATIONS.get(rotation) + sensorOrientation + 270) % 360)
// Use the same AE and AF modes as the preview.
set(CaptureRequest.CONTROL_AF_MODE,
CaptureRequest.CONTROL_AF_MODE_CONTINUOUS_PICTURE)
}?.also { setAutoFlash(it) }
val captureCallback = object : CameraCaptureSession.CaptureCallback() {
override fun onCaptureCompleted(session: CameraCaptureSession,
request: CaptureRequest,
result: TotalCaptureResult) {
Toast.makeText(context, "Saved: $file", Toast.LENGTH_SHORT).show()
Log.d(TAG, file.toString())
unlockFocus()
}
}.apply {
val exif = ExifInterface(file)
geoDegree().geoDegree(exif)
exif.setAttribute(ExifInterface.TAG_GPS_LATITUDE, "${geoDegree().getLatitudeE6()}")
exif.setAttribute(ExifInterface.TAG_GPS_LONGITUDE, "${geoDegree().getLongitudeE6()}")
exif.saveAttributes()
geoDegree().isValid()
val exifLatitude = exif.getAttribute(ExifInterface.TAG_GPS_LATITUDE)
val exifLongitude = exif.getAttribute(ExifInterface.TAG_GPS_LONGITUDE)
Log.e("LATITUDE", "$exifLatitude")
Log.e("LONGITUDE", "$exifLongitude")
}
captureSession?.apply {
stopRepeating()
abortCaptures()
captureBuilder?.build()?.let { capture(it, captureCallback, null) }
}
} catch (e: CameraAccessException) {
Log.e(TAG, e.toString())
}
}
Any tip, trick, advice, anything really to help out the small issue to be fixed. It's maybe easy for you, but I am still learning on how to do it. Thank you in advance.
If the issue is happening in Android API >= 29 then consider that from 29 and above the geo location is redacted as this is now considered sensitive information. This means that if you wish to read the geo location back, then you need to first request the original Uri, and use the obtained Uri with ExifInterface or opening a stream, otherwise there is no way to get the geo location back:
https://developer.android.com/reference/android/provider/MediaStore#setRequireOriginal(android.net.Uri)
originalUri = MediaStore.setRequireOriginal(photoUri)
In addition you will need to also have/request the ACCESS_MEDIA_LOCATION permission. Without it your app has no right to read the geo location.
Assuming you have code that uses the ImageReader you set as the capture target and saves an image to the File you've set up, the main thing I see missing is that you're not putting any location information into the JPEG image.
The camera API does not have direct access to location information; if you want EXIF metadata with location in it in the final JPEG, you have to give it to the camera API yourself first:
https://developer.android.com/reference/kotlin/android/hardware/camera2/CaptureRequest#jpeg_gps_location
For that, you need to get a Location object (which requires one of the LOCATION permissions), and at that point, I'm not sure if you really need to parse the EXIF for a location instead of just using Location object to begin with.
I have added CameraX to my ongoing development app a while ago. I know it was in alpha but I was ready to make the change when beta or final release will be available.
So I started working on it today. I have updated from
implementation 'androidx.camera:camera-core:1.0.0-alpha04'
implementation 'androidx.camera:camera-camera2:1.0.0-alpha04'
to this:
implementation 'androidx.camera:camera-core:1.0.0-beta01'
implementation 'androidx.camera:camera-camera2:1.0.0-beta01'
implementation 'androidx.camera:camera-lifecycle:1.0.0-beta01'
My Previous Working Code (alpha-04):
class ScannerX : AppCompatActivity() {
private lateinit var context: Context
var isOtpAuthCode = true
private val immersiveFlagTimeout = 500L
private val flagsFullscreen = View.SYSTEM_UI_FLAG_LOW_PROFILE or View.SYSTEM_UI_FLAG_FULLSCREEN or View.SYSTEM_UI_FLAG_LAYOUT_STABLE or
View.SYSTEM_UI_FLAG_IMMERSIVE_STICKY or View.SYSTEM_UI_FLAG_LAYOUT_HIDE_NAVIGATION or View.SYSTEM_UI_FLAG_HIDE_NAVIGATION
private var preview: Preview? = null
private var lensFacing = CameraX.LensFacing.BACK
private var imageAnalyzer: ImageAnalysis? = null
private lateinit var analyzerThread: HandlerThread
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_scanner_x)
context = this
btnCancel.setOnClickListener {
finish()
}
analyzerThread = if (GoogleApiAvailability.getInstance().isGooglePlayServicesAvailable(context) == ConnectionResult.SUCCESS) {
HandlerThread("BarcodeFirebaseAnalyzer").apply { start() }
} else {
HandlerThread("BarcodeZxingAnalyzer").apply { start() }
}
Dexter.withActivity(this)
.withPermissions(Manifest.permission.CAMERA)
.withListener(object : MultiplePermissionsListener {
override fun onPermissionsChecked(report: MultiplePermissionsReport?) {
textureView.post {
val metrics = DisplayMetrics().also { textureView.display.getRealMetrics(it) }
val screenAspectRatio = Rational(metrics.widthPixels, metrics.heightPixels)
val previewConfig = PreviewConfig.Builder().apply {
setLensFacing(lensFacing)
// We request aspect ratio but no resolution to let CameraX optimize our use cases
setTargetAspectRatio(screenAspectRatio)
// Set initial target rotation, we will have to call this again if rotation changes
// during the lifecycle of this use case
setTargetRotation(textureView.display.rotation)
}.build()
val analyzerConfig = ImageAnalysisConfig.Builder().apply {
setLensFacing(lensFacing)
// Use a worker thread for image analysis to prevent preview glitches
setCallbackHandler(Handler(analyzerThread.looper))
// In our analysis, we care more about the latest image than analyzing *every* image
setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE)
// Set initial target rotation, we will have to call this again if rotation changes
// during the lifecycle of this use case
setTargetRotation(textureView.display.rotation)
}.build()
preview = AutoFitPreviewBuilder.build(previewConfig, textureView)
imageAnalyzer = ImageAnalysis(analyzerConfig).apply {
analyzer = if (GoogleApiAvailability.getInstance().isGooglePlayServicesAvailable(context) == ConnectionResult.SUCCESS) {
BarcodeFirebaseAnalyzer { qrCode ->
if (isOtpAuthCode) {
if (qrCode.startsWith("otpauth")) {
toAddAuth(qrCode)
}
} else {
toAddAuth(qrCode)
}
}
} else {
BarcodeZxingAnalyzer { qrCode ->
if (isOtpAuthCode) {
if (qrCode.startsWith("otpauth")) {
toAddAuth(qrCode)
}
} else {
toAddAuth(qrCode)
}
}
}
}
// Apply declared configs to CameraX using the same lifecycle owner
CameraX.bindToLifecycle(this#ScannerX, preview, imageAnalyzer)
}
}
override fun onPermissionRationaleShouldBeShown(permissions: MutableList<PermissionRequest>?, token: PermissionToken?) {
//
}
}).check()
}
override fun onStart() {
super.onStart()
// Before setting full screen flags, we must wait a bit to let UI settle; otherwise, we may
// be trying to set app to immersive mode before it's ready and the flags do not stick
textureView.postDelayed({
textureView.systemUiVisibility = flagsFullscreen
}, immersiveFlagTimeout)
}
override fun onDestroy() {
analyzerThread.quit()
super.onDestroy()
}
private fun toAddAuth(scannedCode: String) {
if (CameraX.isBound(imageAnalyzer)) {
CameraX.unbind(imageAnalyzer)
}
val intent = Intent()
intent.putExtra("scanResult", scannedCode)
setResult(RESULT_OK, intent)
finish()
}
companion object {
private const val RESULT_OK = 666
}
}
And the code I have changed is as follows (beta-01):
class ScannerX : AppCompatActivity() {
private lateinit var context: Context
var isOtpAuthCode = true
private val immersiveFlagTimeout = 500L
private val flagsFullscreen = View.SYSTEM_UI_FLAG_LOW_PROFILE or View.SYSTEM_UI_FLAG_FULLSCREEN or View.SYSTEM_UI_FLAG_LAYOUT_STABLE or
View.SYSTEM_UI_FLAG_IMMERSIVE_STICKY or View.SYSTEM_UI_FLAG_LAYOUT_HIDE_NAVIGATION or View.SYSTEM_UI_FLAG_HIDE_NAVIGATION
private var preview: Preview? = null
private var lensFacing = CameraSelector.DEFAULT_BACK_CAMERA
private var imageAnalyzer: ImageAnalysis? = null
private lateinit var analysisExecutor: ExecutorService
private lateinit var processCameraProvider: ListenableFuture<ProcessCameraProvider>
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_scanner_x)
context = this
btnCancel.setOnClickListener {
finish()
}
Dexter.withActivity(this)
.withPermissions(Manifest.permission.CAMERA)
.withListener(object : MultiplePermissionsListener {
override fun onPermissionsChecked(report: MultiplePermissionsReport?) {
textureView.post {
analysisExecutor = Executors.newSingleThreadExecutor()
processCameraProvider = ProcessCameraProvider.getInstance(context)
preview = Preview.Builder()
.setTargetAspectRatio(AspectRatio.RATIO_16_9)
.setTargetRotation(textureView.display.rotation)
.build()
imageAnalyzer = ImageAnalysis.Builder()
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.setTargetRotation(textureView.display.rotation)
.build()
if (GoogleApiAvailability.getInstance().isGooglePlayServicesAvailable(context) == ConnectionResult.SUCCESS) {
imageAnalyzer?.apply {
setAnalyzer(analysisExecutor, BarcodeFirebaseAnalyzer { qrCode ->
if (isOtpAuthCode) {
if (qrCode.startsWith("otpauth")) {
toAddAuth(qrCode)
}
} else {
toAddAuth(qrCode)
}
})
}
} else {
imageAnalyzer?.apply {
setAnalyzer(analysisExecutor, BarcodeZxingAnalyzer { qrCode ->
if (isOtpAuthCode) {
if (qrCode.startsWith("otpauth")) {
toAddAuth(qrCode)
}
} else {
toAddAuth(qrCode)
}
})
}
}
processCameraProvider.get().bindToLifecycle(this#ScannerX, lensFacing, imageAnalyzer)
}
}
override fun onPermissionRationaleShouldBeShown(permissions: MutableList<PermissionRequest>?, token: PermissionToken?) {
//
}
}).check()
}
override fun onStart() {
super.onStart()
// Before setting full screen flags, we must wait a bit to let UI settle; otherwise, we may
// be trying to set app to immersive mode before it's ready and the flags do not stick
textureView.postDelayed({
textureView.systemUiVisibility = flagsFullscreen
}, immersiveFlagTimeout)
}
override fun onDestroy() {
if (!analysisExecutor.isShutdown) {
analysisExecutor.shutdown()
}
super.onDestroy()
}
private fun toAddAuth(scannedCode: String) {
/*if (CameraX.isBound(imageAnalyzer)) {
CameraX.unbind(imageAnalyzer)
}*/
val intent = Intent()
intent.putExtra("scanResult", scannedCode)
setResult(RESULT_OK, intent)
finish()
}
companion object {
private const val RESULT_OK = 666
}
}
After I upgraded there were so many changes in library and now I cant make it work.
I also cant use Google Provided AutoFitPreview Class along with initial alpha release of this library. This was not necessary even with alpha04 since the only problem without this class was camera view little bit stretched out but scanning and analyzing worked properly.
/**
* Builder for [Preview] that takes in a [WeakReference] of the view finder and [PreviewConfig],
* then instantiates a [Preview] which automatically resizes and rotates reacting to config changes.
*/
class AutoFitPreviewBuilder private constructor(config: PreviewConfig, viewFinderRef: WeakReference<TextureView>) {
/** Public instance of preview use-case which can be used by consumers of this adapter */
val useCase: Preview
/** Internal variable used to keep track of the use case's output rotation */
private var bufferRotation: Int = 0
/** Internal variable used to keep track of the view's rotation */
private var viewFinderRotation: Int? = null
/** Internal variable used to keep track of the use-case's output dimension */
private var bufferDimens: Size = Size(0, 0)
/** Internal variable used to keep track of the view's dimension */
private var viewFinderDimens: Size = Size(0, 0)
/** Internal variable used to keep track of the view's display */
private var viewFinderDisplay: Int = -1
/** Internal reference of the [DisplayManager] */
private lateinit var displayManager: DisplayManager
/**
* We need a display listener for orientation changes that do not trigger a configuration
* change, for example if we choose to override config change in manifest or for 180-degree
* orientation changes.
*/
private val displayListener = object : DisplayManager.DisplayListener {
override fun onDisplayAdded(displayId: Int) = Unit
override fun onDisplayRemoved(displayId: Int) = Unit
override fun onDisplayChanged(displayId: Int) {
val viewFinder = viewFinderRef.get() ?: return
if (displayId == viewFinderDisplay) {
val display = displayManager.getDisplay(displayId)
val rotation = getDisplaySurfaceRotation(display)
updateTransform(viewFinder, rotation, bufferDimens, viewFinderDimens)
}
}
}
init {
// Make sure that the view finder reference is valid
val viewFinder = viewFinderRef.get() ?:
throw IllegalArgumentException("Invalid reference to view finder used")
// Initialize the display and rotation from texture view information
viewFinderDisplay = viewFinder.display.displayId
viewFinderRotation = getDisplaySurfaceRotation(viewFinder.display) ?: 0
// Initialize public use-case with the given config
useCase = Preview(config)
// Every time the view finder is updated, recompute layout
useCase.onPreviewOutputUpdateListener = Preview.OnPreviewOutputUpdateListener {
val viewFinderI = viewFinderRef.get() ?: return#OnPreviewOutputUpdateListener
Log.d(TAG, "Preview output changed. " +
"Size: ${it.textureSize}. Rotation: ${it.rotationDegrees}")
// To update the SurfaceTexture, we have to remove it and re-add it
val parent = viewFinderI.parent as ViewGroup
parent.removeView(viewFinderI)
parent.addView(viewFinderI, 0)
// Update internal texture
viewFinderI.surfaceTexture = it.surfaceTexture
// Apply relevant transformations
bufferRotation = it.rotationDegrees
val rotation = getDisplaySurfaceRotation(viewFinderI.display)
updateTransform(viewFinderI, rotation, it.textureSize, viewFinderDimens)
}
// Every time the provided texture view changes, recompute layout
viewFinder.addOnLayoutChangeListener { view, left, top, right, bottom, _, _, _, _ ->
val viewFinderII = view as TextureView
val newViewFinderDimens = Size(right - left, bottom - top)
Log.d(TAG, "View finder layout changed. Size: $newViewFinderDimens")
val rotation = getDisplaySurfaceRotation(viewFinderII.display)
updateTransform(viewFinderII, rotation, bufferDimens, newViewFinderDimens)
}
// Every time the orientation of device changes, recompute layout
// NOTE: This is unnecessary if we listen to display orientation changes in the camera
// fragment and call [Preview.setTargetRotation()] (like we do in this sample), which will
// trigger [Preview.OnPreviewOutputUpdateListener] with a new
// [PreviewOutput.rotationDegrees]. CameraX Preview use case will not rotate the frames for
// us, it will just tell us about the buffer rotation with respect to sensor orientation.
// In this sample, we ignore the buffer rotation and instead look at the view finder's
// rotation every time [updateTransform] is called, which gets triggered by
// [CameraFragment] display listener -- but the approach taken in this sample is not the
// only valid one.
displayManager = viewFinder.context
.getSystemService(Context.DISPLAY_SERVICE) as DisplayManager
displayManager.registerDisplayListener(displayListener, null)
// Remove the display listeners when the view is detached to avoid holding a reference to
// it outside of the Fragment that owns the view.
// NOTE: Even though using a weak reference should take care of this, we still try to avoid
// unnecessary calls to the listener this way.
viewFinder.addOnAttachStateChangeListener(object : View.OnAttachStateChangeListener {
override fun onViewAttachedToWindow(view: View?) =
displayManager.registerDisplayListener(displayListener, null)
override fun onViewDetachedFromWindow(view: View?) =
displayManager.unregisterDisplayListener(displayListener)
})
}
/** Helper function that fits a camera preview into the given [TextureView] */
private fun updateTransform(textureView: TextureView?, rotation: Int?, newBufferDimens: Size, newViewFinderDimens: Size) {
// This should not happen anyway, but now the linter knows
val textureViewI = textureView ?: return
if (rotation == viewFinderRotation &&
Objects.equals(newBufferDimens, bufferDimens) &&
Objects.equals(newViewFinderDimens, viewFinderDimens)) {
// Nothing has changed, no need to transform output again
return
}
if (rotation == null) {
// Invalid rotation - wait for valid inputs before setting matrix
return
} else {
// Update internal field with new inputs
viewFinderRotation = rotation
}
if (newBufferDimens.width == 0 || newBufferDimens.height == 0) {
// Invalid buffer dimens - wait for valid inputs before setting matrix
return
} else {
// Update internal field with new inputs
bufferDimens = newBufferDimens
}
if (newViewFinderDimens.width == 0 || newViewFinderDimens.height == 0) {
// Invalid view finder dimens - wait for valid inputs before setting matrix
return
} else {
// Update internal field with new inputs
viewFinderDimens = newViewFinderDimens
}
val matrix = Matrix()
Log.d(TAG, "Applying output transformation.\n" +
"View finder size: $viewFinderDimens.\n" +
"Preview output size: $bufferDimens\n" +
"View finder rotation: $viewFinderRotation\n" +
"Preview output rotation: $bufferRotation")
// Compute the center of the view finder
val centerX = viewFinderDimens.width / 2f
val centerY = viewFinderDimens.height / 2f
// Correct preview output to account for display rotation
matrix.postRotate(-viewFinderRotation!!.toFloat(), centerX, centerY)
// Buffers are rotated relative to the device's 'natural' orientation: swap width and height
val bufferRatio = bufferDimens.height / bufferDimens.width.toFloat()
val scaledWidth: Int
val scaledHeight: Int
// Match longest sides together -- i.e. apply center-crop transformation
if (viewFinderDimens.width > viewFinderDimens.height) {
scaledHeight = viewFinderDimens.width
scaledWidth = (viewFinderDimens.width * bufferRatio).roundToInt()
} else {
scaledHeight = viewFinderDimens.height
scaledWidth = (viewFinderDimens.height * bufferRatio).roundToInt()
}
// Compute the relative scale value
val xScale = scaledWidth / viewFinderDimens.width.toFloat()
val yScale = scaledHeight / viewFinderDimens.height.toFloat()
// Scale input buffers to fill the view finder
matrix.preScale(xScale, yScale, centerX, centerY)
// Finally, apply transformations to our TextureView
textureViewI.setTransform(matrix)
}
companion object {
private val TAG = AutoFitPreviewBuilder::class.java.simpleName
/** Helper function that gets the rotation of a [Display] in degrees */
fun getDisplaySurfaceRotation(display: Display?) = when(display?.rotation) {
Surface.ROTATION_0 -> 0
Surface.ROTATION_90 -> 90
Surface.ROTATION_180 -> 180
Surface.ROTATION_270 -> 270
else -> null
}
/**
* Main entry point for users of this class: instantiates the adapter and returns an instance
* of [Preview] which automatically adjusts in size and rotation to compensate for
* config changes.
*/
fun build(config: PreviewConfig, viewFinder: TextureView) =
AutoFitPreviewBuilder(config, WeakReference(viewFinder)).useCase
}
}
Please Help