How to convert bitmap streaming to video files in android - android

I am using camerax so I am able to get bitmap from camerax imageanalysyis usecase
private val analysis = object : ImageAnalysis.Analyzer {
override fun analyze(image: ImageProxy) {
showLogD("hello rotation before","${image.imageInfo.rotationDegrees}")
val yBuffer = image.planes[0].buffer // Y
val vuBuffer = image.planes[2].buffer // VU
val ySize = yBuffer.remaining()
val vuSize = vuBuffer.remaining()
val nv21 = ByteArray(ySize + vuSize)
yBuffer.get(nv21, 0, ySize)
vuBuffer.get(nv21, ySize, vuSize)
val yuvImage = YuvImage(nv21, ImageFormat.NV21, image.width, image.height, null)
val out = ByteArrayOutputStream()
yuvImage.compressToJpeg(Rect(0, 0, yuvImage.width, yuvImage.height), 100, out)
val imageBytes = out.toByteArray()
var myBitmap= BitmapFactory.decodeByteArray(imageBytes, 0, imageBytes.size)
val matrix = Matrix()
matrix.postRotate(90f)
myBitmap= Bitmap.createBitmap(myBitmap, 0, 0, myBitmap.width, myBitmap.height, matrix, true);
}
I want to do video recording. but I dont know how to convert bitmap to video files

Related

Not displaying recyclerview image in kotlin PDF

I have managed to export my recyclerview in a PDF file but I am realizing that the image that is as a header is not showing in the pdf, I only know the questions and the answers that are the check boxes
I have tried the following code to show the recyclerview in pdf.
This is my activity
`
fun getScreenshotFromRecyclerView(view: RecyclerView): Bitmap? {
val adapter = view.adapter
var bigBitmap: Bitmap? = null
if (adapter != null) {
val size = listado2.size
var height = 0
val paint = Paint()
var iHeight = 0
val maxMemory = (Runtime.getRuntime().maxMemory() / 1024).toInt()
// Use 1/8th of the available memory for this memory cache.
val cacheSize = maxMemory / 8
val bitmaCache =
LruCache<String, Bitmap>(cacheSize)
for (i in 0 until size) {
val holder =
adapter.createViewHolder(view, adapter.getItemViewType(i))
adapter.onBindViewHolder(holder, i)
holder.itemView.measure(
View.MeasureSpec.makeMeasureSpec(
view.width,
View.MeasureSpec.EXACTLY
),
View.MeasureSpec.makeMeasureSpec(
0,
View.MeasureSpec.UNSPECIFIED
)
)
holder.itemView.layout(
0,
0,
holder.itemView.measuredWidth,
holder.itemView.measuredHeight
)
holder.itemView.isDrawingCacheEnabled = true
holder.itemView.buildDrawingCache()
val drawingCache = holder.itemView.drawingCache
if (drawingCache != null) {
bitmaCache.put(i.toString(), drawingCache)
}
// holder.itemView.setDrawingCacheEnabled(false);
// holder.itemView.destroyDrawingCache();
height += holder.itemView.measuredHeight
}
bigBitmap =
Bitmap.createBitmap(view.measuredWidth, height, Bitmap.Config.ARGB_8888)
val bigCanvas = Canvas(bigBitmap)
bigCanvas.drawColor(Color.WHITE)
for (i in 0 until size) {
val bitmap = bitmaCache[i.toString()]
bigCanvas.drawBitmap(bitmap!!, 0f, iHeight.toFloat(), paint)
iHeight += bitmap.height
bitmap.recycle()
}
}
return bigBitmap
}
`
This is my CreateFile.kt class
`
open fun salvarPDF(bitmap: Bitmap, nombreDeArchivo : String): String?{
archivo = File(pasta, "$nombreDeArchivo.pdf")
val archivoPDF = PdfDocument()
val pageInfo: PdfDocument.PageInfo = PageInfo.Builder(bitmap.width, bitmap.height, 1).create()
val pagina : PdfDocument.Page = archivoPDF.startPage(pageInfo)
val canvas : Canvas = pagina.canvas
canvas.drawBitmap(bitmap,null, Rect(0,0,bitmap.width, bitmap.height),null)
archivoPDF.finishPage(pagina)
try {
archivo!!.createNewFile()
val streamDeSalidad: OutputStream = FileOutputStream(archivo)
archivoPDF.writeTo(streamDeSalidad)
streamDeSalidad.close()
archivoPDF.close()
}catch (e: IOException){
return "error en crear$e"
}
return "creado"
}
`
This is the result in PDF
But it should come out as follows.
Any idea where in the code I am failing.

Why CameraX library distorted picture the pictures or make them blurry?

So I have implement CameraX in my new application but the pictures when saving are looking very bad with distorsion. Like the image is blurry and with lines everywhere. I tried multipile converters from imageProxy to Bitmap but the result is the same. I tested on 2 devices: Samsung A72 and Samsung S20 FE.
// Camera Provider Future ------------
cameraProviderFuture.addListener({
imagePreview = Preview.Builder().apply {
setTargetAspectRatio(AspectRatio.RATIO_16_9)
}.build()
imageCapture = ImageCapture.Builder().apply {
setCaptureMode(ImageCapture.CAPTURE_MODE_MINIMIZE_LATENCY)
setTargetAspectRatio(AspectRatio.RATIO_16_9)
setFlashMode(ImageCapture.FLASH_MODE_AUTO)
}.build()
val cameraProvider = cameraProviderFuture.get()
cameraProvider.unbindAll()
cameraProvider.bindToLifecycle(this, cameraSelector, imagePreview, imageCapture)
binding.dtcCameraPreview.implementationMode = PreviewView.ImplementationMode.COMPATIBLE
imagePreview?.setSurfaceProvider(binding.dtcCameraPreview.surfaceProvider)
}, ContextCompat.getMainExecutor(this))
// Function to convert ImageProxy.image to Bitmap -----------
private fun Image.toBitmap(rotationDegrees: Int): Bitmap {
val buffer = planes[0].buffer
val bytes = ByteArray(buffer.remaining())
buffer.get(bytes)
val bitmap = BitmapFactory.decodeByteArray(bytes, 0, bytes.size, null)
val matrix = Matrix()
matrix.postRotate(rotationDegrees.toFloat())
return Bitmap.createBitmap(bitmap, 0, 0, bitmap.width, bitmap.height, matrix, true)
}
// imageCapture ---------------
imageCapture?.takePicture(ContextCompat.getMainExecutor(this), object :
ImageCapture.OnImageCapturedCallback() {
#SuppressLint("UnsafeOptInUsageError")
override fun onCaptureSuccess(imageProxy: ImageProxy) {
val stream = ByteArrayOutputStream()
imageProxy.image?.toBitmap(90)?.compress(
Bitmap.CompressFormat.JPEG,
100,
stream
)
IntentHelper.addObject(stream.toByteArray(), "dtc_camera_data")
val result = Intent().putExtra(
DTCCameraContract.DTC_CAMERA_ACTION,
DTCCameraAction(false, isPictureTaken = true)
)
setResult(Activity.RESULT_OK, result)
imageProxy.close()
finish()
}

How to convert camera image to use it in tflite

I have an android app in Kotlin which depends on CameraX to analyze images and pass them to tensorflow lite image detection model ( not classification ). The problem is we get YUV or RGBA Output images and we need an argb bitmap to pass to tflite model to detect objects we have been struggling for weeks and we have a deadline soon help please.
ImageAnalysis.Analyzer {
private val supportedImages = listOf(ImageFormat.FLEX_RGBA_8888, ImageFormat.FLEX_RGB_888)
private var data = ""
#SuppressLint("UnsafeOptInUsageError")
override fun analyze(image: ImageProxy) {
if (image.format in supportedImages) {
val options = ObjectDetector.ObjectDetectorOptions.builder()
.setScoreThreshold(0.2F)
.setBaseOptions(BaseOptions.builder().build())
.build()
val objectDetector =
ObjectDetector.createFromFileAndOptions(
context, Model.createModel(context, "model.tflite").path, options
)
val bitmap = toBitmap(image)
val tfImage = TensorImage.fromBitmap(bitmap)
val results: List<Detection> = objectDetector.detect(tfImage)
for (result in results) {
val categories = result.categories
for (category in categories) {
val label = category.label
val boundingBox = result.boundingBox
data += "label : $label | bounding : $boundingBox"
}
}
listener(data)
image.close()
}
}
private fun toBitmap(image: ImageProxy): Bitmap {
val r = image.planes[0].buffer.int
val g = image.planes[1].buffer.int
val b = image.planes[2].buffer.int
val a = image.planes[3].buffer.int
var color = Color.argb(a, r, g, b)
val rect = Rect(0, 0, 1, 1)
color = ColorUtils.compositeColors(color, Color.WHITE)
val bitmap = Bitmap.createBitmap(rect.width(), rect.height(), Bitmap.Config.ARGB_8888)
val canvas = Canvas(bitmap)
val paint = Paint()
paint.color = color
canvas.drawRect(rect, paint)
savePic(bitmap = bitmap!!, display_name = Random(100000L).toString())
return bitmap
}
private fun savePic(bitmap: Bitmap, display_name: String): Boolean {
val mediaCollection = latest {
MediaStore.Images.Media.getContentUri(MediaStore.VOLUME_EXTERNAL_PRIMARY)
} ?: MediaStore.Images.Media.EXTERNAL_CONTENT_URI
val contentValues = ContentValues().apply {
put(MediaStore.Images.Media.DISPLAY_NAME, "${display_name}.jpg")
put(MediaStore.Images.Media.MIME_TYPE, "image/jpeg")
put(MediaStore.Images.Media.WIDTH, bitmap.width)
put(MediaStore.Images.Media.HEIGHT, bitmap.height)
}
return try {
context.contentResolver.insert(mediaCollection, contentValues)?.also { uri ->
context.contentResolver.openOutputStream(uri).use { outputStream ->
if (!bitmap.compress(Bitmap.CompressFormat.JPEG, 100, outputStream)) {
throw IOException("could not save bitmap!")
}
}
} ?: throw IOException("could not create media store entry")
return true
} catch (e: IOException) {
e.printStackTrace()
false
}
}
}```
Here is the function that converts YUV_420_888 imageproxy into argb format bitmap.
I know a late answer but providing for others.
fun imageToBitmap(image: Image): Bitmap {
val planes: Array<Image.Plane> = image.planes
val yBuffer: ByteBuffer = planes[0].buffer
val uBuffer: ByteBuffer = planes[1].buffer
val vBuffer: ByteBuffer = planes[2].buffer
val ySize = yBuffer.remaining()
val uSize = uBuffer.remaining()
val vSize = vBuffer.remaining()
val nv21 = ByteArray(ySize + uSize + vSize)
//U and V are swapped
yBuffer[nv21, 0, ySize]
vBuffer[nv21, ySize, vSize]
uBuffer[nv21, ySize + vSize, uSize]
val yuvImage = YuvImage(nv21, ImageFormat.NV21, image.width, image.height, null)
val out = ByteArrayOutputStream()
yuvImage.compressToJpeg(Rect(0, 0, yuvImage.width, yuvImage.height), 75, out)
val imageBytes: ByteArray = out.toByteArray()
return BitmapFactory.decodeByteArray(imageBytes, 0, imageBytes.size)
}
Provide imageproxy from cameraX analyser to this function and it will provide the bitmap.

How group of renderscript filters use for Adjustment with seekbar

I Want to like this with renderscript filters a example gif is given below
My problem
In my code my code all renderscript filters running well but problem is when i am changing filter then resest the bitmap then update the filter value with bitmap .
class Filters(private val context: Context) {
private val TAG = BrightnessFilter::class.java.simpleName
private var rs: RenderScript = RenderScript.create(context)
private var brightnessScript: ScriptC_brightness = ScriptC_brightness(rs)
private var contrastScript: ScriptC_contrast = ScriptC_contrast(rs)
private var saturationFilter: ScriptC_saturation = ScriptC_saturation(rs)
private var convolution: ScriptIntrinsicConvolve3x3 =
ScriptIntrinsicConvolve3x3.create(rs, Element.U8_4(rs))
private var vignetteFilter: ScriptC_vignette = ScriptC_vignette(rs)
fun setBrightnessX(values: Float, inputImage: Bitmap): Bitmap {
val outBitmap = Bitmap.createBitmap(inputImage)
val tempIn = Allocation.createFromBitmap(rs, inputImage)
val tempOut = Allocation.createFromBitmap(rs, outBitmap)
brightnessScript.invoke_setBright(values)
brightnessScript.forEach_brightness(tempIn, tempOut)
tempOut.copyTo(outBitmap)
return outBitmap
}
fun setContrastX(contrast: Float, inputImage: Bitmap): Bitmap {
val outBitmap = Bitmap.createBitmap(inputImage)
val tempIn = Allocation.createFromBitmap(rs, inputImage)
val tempOut = Allocation.createFromBitmap(rs, outBitmap)
contrastScript._contrast = contrast
contrastScript.forEach_contrastness(tempIn, tempOut)
tempOut.copyTo(outBitmap)
tempIn.destroy()
tempOut.destroy()
return outBitmap
}
fun setSaturationX(saturation: Float, inputImage: Bitmap): Bitmap {
val outBitmap = Bitmap.createBitmap(inputImage)
val tempIn = Allocation.createFromBitmap(rs, inputImage)
val tempOut = Allocation.createFromBitmap(rs, outBitmap)
saturationFilter._saturationValue = saturation
saturationFilter.forEach_saturation(tempIn, tempOut)
tempOut.copyTo(outBitmap)
tempIn.destroy()
tempOut.destroy()
return outBitmap
}
fun sharpenX(sharpen: Float, bitmap: Bitmap): Bitmap {
val matrix = floatArrayOf(
0f, -sharpen, 0f,
-sharpen, 1 + 4 * sharpen, -sharpen,
0f, -sharpen, 0f
)
return applySharpenConvolveX(bitmap, matrix)
}
private fun applySharpenConvolveX(inputImage: Bitmap, coefficients: FloatArray): Bitmap {
val outBitmap = Bitmap.createBitmap(inputImage)
val tempIn = Allocation.createFromBitmap(rs, inputImage)
val tempOut = Allocation.createFromBitmap(rs, outBitmap)
convolution.setInput(tempIn)
convolution.setCoefficients(coefficients)
convolution.forEach(tempOut)
tempOut.copyTo(outBitmap)
return outBitmap
}
fun vignetteX(values: Float, inputImage: Bitmap): Bitmap {
vignetteFilter.setScale(values)
return vignetteFilter.x(inputImage)
}
}
For Slider Value change currentFilter is called and update the output Bitmap with ImageView
private fun currentFilter(values: Float): Bitmap {
return when (_recentTab.value!!) {
FilterName.BRIGHTNESS -> _filter.value!!.setBrightnessX(values, mInputImage)
FilterName.CONTRAST -> _filter.value!!.setContrastX(values, mInputImage)
FilterName.SATURATION -> _filter.value!!.setSaturationX(values, mInputImage)
FilterName.SHARPEN -> _filter.value!!.sharpenX(values, mInputImage)
FilterName.VIGNETTE -> _filter.value!!.vignetteX(values, mInputImage)
}
}

Is there a way to crop Image/ImageProxy (before passing to MLKit's analyzer)?

I'm using CameraX's Analyzer use case with the MLKit's BarcodeScanner. I would like to crop portion of the image received from the camera, before passing it to the scanner.
What I'm doing right now is I convert ImageProxy (that I recieve in the Analyzer) to a Bitmap, crop it and then pass it to the BarcodeScanner. The downside is that it's not a very fast and efficient process.
I've also noticed the warning I get in the Logcat when running this code:
ML Kit has detected that you seem to pass camera frames to the
detector as a Bitmap object. This is inefficient. Please use
YUV_420_888 format for camera2 API or NV21 format for (legacy) camera
API and directly pass down the byte array to ML Kit.
It would be nice to not to do ImageProxy conversion, but how do I crop the rectangle I want to analyze?
What I've already tried is to set a cropRect field of the Image (imageProxy.image.cropRect) class, but it doesn't seem to affect the end result.
Yes, it's true that if you use ViewPort and set viewport to yours UseCases(imageCapture or imageAnalysis as here https://developer.android.com/training/camerax/configuration) you can get only information about crop rectangle especially if you use ImageAnalysis(because if you use imageCapture, for on-disk the image is cropped before saving and it doesn't work for ImageAnalysis and if you use imageCapture without saving on disk) and here solution how I solved this problem:
First of all set view port for use cases as here: https://developer.android.com/training/camerax/configuration
Get cropped bitmap to analyze
override fun analyze(imageProxy: ImageProxy) {
val mediaImage = imageProxy.image
if (mediaImage != null && mediaImage.format == ImageFormat.YUV_420_888) {
croppedBitmap(mediaImage, imageProxy.cropRect).let { bitmap ->
requestDetectInImage(InputImage.fromBitmap(bitmap, rotation))
.addOnCompleteListener { imageProxy.close() }
}
} else {
imageProxy.close()
}
}
private fun croppedBitmap(mediaImage: Image, cropRect: Rect): Bitmap {
val yBuffer = mediaImage.planes[0].buffer // Y
val vuBuffer = mediaImage.planes[2].buffer // VU
val ySize = yBuffer.remaining()
val vuSize = vuBuffer.remaining()
val nv21 = ByteArray(ySize + vuSize)
yBuffer.get(nv21, 0, ySize)
vuBuffer.get(nv21, ySize, vuSize)
val yuvImage = YuvImage(nv21, ImageFormat.NV21, mediaImage.width, mediaImage.height, null)
val outputStream = ByteArrayOutputStream()
yuvImage.compressToJpeg(cropRect, 100, outputStream)
val imageBytes = outputStream.toByteArray()
return BitmapFactory.decodeByteArray(imageBytes, 0, imageBytes.size)
}
Possibly there is a loss in conversion speed, but on my devices I did not notice the difference. I set 100 quality in method compressToJpeg, but mb if set less quality it can improve speed, it need test.
upd: May 02 '21 :
I found another way without convert to jpeg and then to bitmap. This should be a faster way.
Set viewport as previous.
Convert YUV_420_888 to NV21, then crop and analyze.
override fun analyze(imageProxy: ImageProxy) {
val mediaImage = imageProxy.image
if (mediaImage != null && mediaImage.format == ImageFormat.YUV_420_888) {
croppedNV21(mediaImage, imageProxy.cropRect).let { byteArray ->
requestDetectInImage(
InputImage.fromByteArray(
byteArray,
imageProxy.cropRect.width(),
imageProxy.cropRect.height(),
rotation,
IMAGE_FORMAT_NV21,
)
)
.addOnCompleteListener { imageProxy.close() }
}
} else {
imageProxy.close()
}
}
private fun croppedNV21(mediaImage: Image, cropRect: Rect): ByteArray {
val yBuffer = mediaImage.planes[0].buffer // Y
val vuBuffer = mediaImage.planes[2].buffer // VU
val ySize = yBuffer.remaining()
val vuSize = vuBuffer.remaining()
val nv21 = ByteArray(ySize + vuSize)
yBuffer.get(nv21, 0, ySize)
vuBuffer.get(nv21, ySize, vuSize)
return cropByteArray(nv21, mediaImage.width, cropRect)
}
private fun cropByteArray(array: ByteArray, imageWidth: Int, cropRect: Rect): ByteArray {
val croppedArray = ByteArray(cropRect.width() * cropRect.height())
var i = 0
array.forEachIndexed { index, byte ->
val x = index % imageWidth
val y = index / imageWidth
if (cropRect.left <= x && x < cropRect.right && cropRect.top <= y && y < cropRect.bottom) {
croppedArray[i] = byte
i++
}
}
return croppedArray
}
First crop fun I took from here: Android: How to crop images using CameraX?
And I found also another crop fun, it seems that it is more complicated:
private fun cropByteArray(src: ByteArray, width: Int, height: Int, cropRect: Rect, ): ByteArray {
val x = cropRect.left * 2 / 2
val y = cropRect.top * 2 / 2
val w = cropRect.width() * 2 / 2
val h = cropRect.height() * 2 / 2
val yUnit = w * h
val uv = yUnit / 2
val nData = ByteArray(yUnit + uv)
val uvIndexDst = w * h - y / 2 * w
val uvIndexSrc = width * height + x
var srcPos0 = y * width
var destPos0 = 0
var uvSrcPos0 = uvIndexSrc
var uvDestPos0 = uvIndexDst
for (i in y until y + h) {
System.arraycopy(src, srcPos0 + x, nData, destPos0, w) //y memory block copy
srcPos0 += width
destPos0 += w
if (i and 1 == 0) {
System.arraycopy(src, uvSrcPos0, nData, uvDestPos0, w) //uv memory block copy
uvSrcPos0 += width
uvDestPos0 += w
}
}
return nData
}
Second crop fun I took from here:
https://www.programmersought.com/article/75461140907/
I would be glad if someone can help improve the code.
I'm still improving the way to do it. But this will work for me now
CameraX crop image before sending to analyze
<androidx.constraintlayout.widget.ConstraintLayout
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:paddingBottom="#dimen/_40sdp">
<androidx.camera.view.PreviewView
android:id="#+id/previewView"
android:layout_width="match_parent"
android:layout_height="0dp"
app:layout_constraintDimensionRatio="1:1"
app:layout_constraintEnd_toEndOf="parent"
app:layout_constraintStart_toStartOf="parent"
app:layout_constraintTop_toTopOf="parent" /></androidx.constraintlayout.widget.ConstraintLayout>
Cropping an image into 1:1 before passing it to analyze
override fun onCaptureSuccess(image: ImageProxy) {
super.onCaptureSuccess(image)
var bitmap: Bitmap = imageProxyToBitmap(image)
val dimension: Int = min(bitmap.width, bitmap.height)
bitmap = ThumbnailUtils.extractThumbnail(bitmap, dimension, dimension)
imageView.setImageBitmap(bitmap) //Here you can pass the crop[from the center] image to analyze
image.close()
}
**Function for converting into bitmap **
private fun imageProxyToBitmap(image: ImageProxy): Bitmap {
val buffer: ByteBuffer = image.planes[0].buffer
val bytes = ByteArray(buffer.remaining())
buffer.get(bytes)
return BitmapFactory.decodeByteArray(bytes, 0, bytes.size)
}
You would use ImageProxy.SetCroprect to get the rect and then use CropRect to set it.
For example if you had imageProxy, you would do : ImageProxy.setCropRect(Rect) and then you would do ImageProxy.CropRect.

Categories

Resources