I want to crop an image without using any library.
I am taking reference from https://stackoverflow.com/a/6909144 and try to change the value but can't figure out the solution
Bitmap bitmap = BitmapUtil.getBitmap(path);
Log.d(TAG,"bitmap width : "+bitmap.getWidth()+" height: "+bitmap.getHeight());
if (bitmap.getWidth() >= bitmap.getHeight()){
Toast.makeText(this,"Height Greater",Toast.LENGTH_SHORT).show();
Log.d(TAG,"Greater : Height");
textView.setText("Height Greater");
bitmap = Bitmap.createBitmap(
bitmap,
bitmap.getWidth()/2 - bitmap.getHeight()/2,
0,
bitmap.getHeight(),
bitmap.getHeight()
);
}else{
Toast.makeText(this,"Width Greater",Toast.LENGTH_SHORT).show();
Log.d(TAG,"Greater : Width");
textView.setText("Width Greater");
bitmap = Bitmap.createBitmap(
bitmap,
0,
bitmap.getHeight()/2 - bitmap.getWidth()/2,
bitmap.getWidth(),
bitmap.getWidth()
);
}
I want a crop bitmap image within the rectangle.
For efficiently creating bitmaps, try this
import android.graphics.Bitmap
import android.graphics.BitmapFactory
import java.io.ByteArrayOutputStream
/**
*
* ImageScalingUtils responsible for compressing the bitmap efficiently
*/
object ImageScalingUtils {
/**
* Utility function for decoding an image resource. The decoded bitmap will
* be optimized for further scaling to the requested destination dimensions
* and scaling logic.
*
* #param dstWidth Width of destination area
* #param dstHeight Height of destination area
* #param scalingLogic Logic to use to avoid image stretching
* #return Decoded bitmap
*/
fun decodeBitmap(bm: Bitmap, dstWidth: Int, dstHeight: Int,
scalingLogic: ScalingLogic): Bitmap {
val stream = ByteArrayOutputStream()
bm.compress(Bitmap.CompressFormat.PNG, 100, stream)
val byteArray = stream.toByteArray()
val options = BitmapFactory.Options()
options.inJustDecodeBounds = true
BitmapFactory.decodeByteArray(byteArray, 0, byteArray.size, options)
options.inJustDecodeBounds = false
options.inSampleSize = calculateSampleSize(options.outWidth, options.outHeight, dstWidth,
dstHeight, scalingLogic)
return BitmapFactory.decodeByteArray(byteArray, 0, byteArray.size, options)
}
/**
* ScalingLogic defines how scaling should be carried out if source and
* destination image has different aspect ratio.
*
* CROP: Scales the image the minimum amount while making sure that at least
* one of the two dimensions fit inside the requested destination area.
* Parts of the source image will be cropped to realize this.
*
* FIT: Scales the image the minimum amount while making sure both
* dimensions fit inside the requested destination area. The resulting
* destination dimensions might be adjusted to a smaller size than
* requested.
*/
enum class ScalingLogic {
CROP, FIT
}
/**
* Calculate optimal down-sampling factor given the dimensions of a source
* image, the dimensions of a destination area and a scaling logic.
*
* #param srcWidth Width of source image
* #param srcHeight Height of source image
* #param dstWidth Width of destination area
* #param dstHeight Height of destination area
* #param scalingLogic Logic to use to avoid image stretching
* #return Optimal down scaling sample size for decoding
*/
private fun calculateSampleSize(srcWidth: Int, srcHeight: Int, dstWidth: Int, dstHeight: Int,
scalingLogic: ScalingLogic): Int {
if (scalingLogic == ScalingLogic.FIT) {
val srcAspect = srcWidth.toFloat() / srcHeight.toFloat()
val dstAspect = dstWidth.toFloat() / dstHeight.toFloat()
return if (srcAspect > dstAspect) {
srcWidth / dstWidth
} else {
srcHeight / dstHeight
}
} else {
val srcAspect = srcWidth.toFloat() / srcHeight.toFloat()
val dstAspect = dstWidth.toFloat() / dstHeight.toFloat()
return if (srcAspect > dstAspect) {
srcHeight / dstHeight
} else {
srcWidth / dstWidth
}
}
}
}
Scaled Picture class to scale your picture
import android.content.ContentResolver
import android.graphics.Bitmap
import android.graphics.BitmapFactory
import android.graphics.Matrix
import android.graphics.RectF
import android.net.Uri
import androidx.exifinterface.media.ExifInterface
import java.io.FileNotFoundException
import java.io.IOException
import java.io.InvalidObjectException
/**
*
* ScaledPicture responsible for compressing the bitmap efficiently
*/
class ScaledPicture(private var uri: Uri?, private var resolver: ContentResolver) {
private var path: String? = null
private var orientation: Matrix? = null
private var storedHeight: Int = 0
private var storedWidth: Int = 0
#Throws(IOException::class)
private fun getInformation(): Boolean {
/*if (getInformationFromMediaDatabase())
return true;*/
return getInformationFromFileSystem()
}
/* Support for file managers and dropbox */
#Throws(IOException::class)
private fun getInformationFromFileSystem(): Boolean {
path = uri?.path
if (path == null)
return false
val exif = ExifInterface(path.toString())
val orientation = exif.getAttributeInt(ExifInterface.TAG_ORIENTATION,
ExifInterface.ORIENTATION_NORMAL)
this.orientation = Matrix()
when (orientation) {
ExifInterface.ORIENTATION_NORMAL -> {
}
ExifInterface.ORIENTATION_FLIP_HORIZONTAL -> this.orientation?.setScale(-1f, 1f)
ExifInterface.ORIENTATION_ROTATE_180 -> this.orientation?.setRotate(180f)
ExifInterface.ORIENTATION_FLIP_VERTICAL -> this.orientation?.setScale(1f, -1f)
ExifInterface.ORIENTATION_TRANSPOSE -> {
this.orientation?.setRotate(90f)
this.orientation?.postScale(-1f, 1f)
}
ExifInterface.ORIENTATION_ROTATE_90 -> this.orientation?.setRotate(90f)
ExifInterface.ORIENTATION_TRANSVERSE -> {
this.orientation?.setRotate(-90f)
this.orientation?.postScale(-1f, 1f)
}
ExifInterface.ORIENTATION_ROTATE_270 -> this.orientation?.setRotate(-90f)
}/* Identity matrix */
return true
}
#Throws(IOException::class)
private fun getStoredDimensions(): Boolean {
val input = resolver.openInputStream(uri)
val options = BitmapFactory.Options()
options.inJustDecodeBounds = true
BitmapFactory.decodeStream(resolver.openInputStream(uri), null, options)
/* The input stream could be reset instead of closed and reopened if it were possible
to reliably wrap the input stream on a buffered stream, but it's not possible because
decodeStream() places an upper read limit of 1024 bytes for a reset to be made (it calls
mark(1024) on the stream). */
input?.close()
if (options.outHeight <= 0 || options.outWidth <= 0)
return false
storedHeight = options.outHeight
storedWidth = options.outWidth
return true
}
#Throws(IOException::class)
fun getBitmap(reqWidth: Int, reqHeight: Int): Bitmap {
val heightWidth = 1000
if (!getInformation())
throw FileNotFoundException()
if (!getStoredDimensions())
throw InvalidObjectException(null)
val rect = RectF(0f, 0f, storedWidth.toFloat(), storedHeight.toFloat())
orientation?.mapRect(rect)
var width = rect.width().toInt()
var height = rect.height().toInt()
var subSample = 1
while (width > heightWidth || height > heightWidth) {
width /= 2
height /= 2
subSample *= 2
}
if (width == 0 || height == 0)
throw InvalidObjectException(null)
val options = BitmapFactory.Options()
options.inSampleSize = subSample
val subSampled = BitmapFactory.decodeStream(resolver.openInputStream(uri), null, options)
val picture: Bitmap
if (orientation?.isIdentity == false) {
picture = Bitmap.createBitmap(subSampled, 0, 0, options.outWidth, options.outHeight,
orientation, false)
subSampled.recycle()
} else
picture = subSampled
return ImageScalingUtils.decodeBitmap(picture, reqWidth, reqHeight, ImageScalingUtils.ScalingLogic.CROP)
}
}
Copy the above classes to your project, and use it like this
var bitmap = ScaledPicture(mSelectedUri, contentResolver).getBitmap(800, 800)
Pass your desired height and width
Related
I did lot of reading tried so many different methods available. CameraX is producing yuv_420_888 format Image object and provides it to the ImageAnalysis.
However, there is no way to convert this to a bytebuffer in order to scale, convert to bitmap and run detection operations. I tried following and numerous other proposed techniques.
Converting ImageProxy to Bitmap
All those created grayscale (even after using all 3 planes) and some overlay color shade image. It also created glitchy outputs in-between frames sometime which I could not figure out a reason.
What’s the proper way to get a simple byte array so that it can be converted to bitmap later?
Also how to get cameraX authors attention?
fun imageProxyToByteArray(image: ImageProxy): ByteArray {
val yuvBytes = ByteArray(image.width * (image.height + image.height / 2))
val yPlane = image.planes[0].buffer
val uPlane = image.planes[1].buffer
val vPlane = image.planes[2].buffer
yPlane.get(yuvBytes, 0, image.width * image.height)
val chromaRowStride = image.planes[1].rowStride
val chromaRowPadding = chromaRowStride - image.width / 2
var offset = image.width * image.height
if (chromaRowPadding == 0) {
uPlane.get(yuvBytes, offset, image.width * image.height / 4)
offset += image.width * image.height / 4
vPlane.get(yuvBytes, offset, image.width * image.height / 4)
} else {
for (i in 0 until image.height / 2) {
uPlane.get(yuvBytes, offset, image.width / 2)
offset += image.width / 2
if (i < image.height / 2 - 2) {
uPlane.position(uPlane.position() + chromaRowPadding)
}
}
for (i in 0 until image.height / 2) {
vPlane.get(yuvBytes, offset, image.width / 2)
offset += image.width / 2
if (i < image.height / 2 - 1) {
vPlane.position(vPlane.position() + chromaRowPadding)
}
}
}
return yuvBytes
}
You can use this class ripped from Mlkit Pose Detection.
Mlkit pose detection: BitmapUtils.java
object ImageProxyUtils {
fun getByteArray(image: ImageProxy): ByteArray? {
image.image?.let {
val nv21Buffer = yuv420ThreePlanesToNV21(
it.planes, image.width, image.height
)
return ByteArray(nv21Buffer.remaining()).apply {
nv21Buffer.get(this)
}
}
return null
}
private fun yuv420ThreePlanesToNV21(
yuv420888planes: Array<Plane>,
width: Int,
height: Int
): ByteBuffer {
val imageSize = width * height
val out = ByteArray(imageSize + 2 * (imageSize / 4))
if (areUVPlanesNV21(yuv420888planes, width, height)) {
yuv420888planes[0].buffer[out, 0, imageSize]
val uBuffer = yuv420888planes[1].buffer
val vBuffer = yuv420888planes[2].buffer
vBuffer[out, imageSize, 1]
uBuffer[out, imageSize + 1, 2 * imageSize / 4 - 1]
} else {
unpackPlane(yuv420888planes[0], width, height, out, 0, 1)
unpackPlane(yuv420888planes[1], width, height, out, imageSize + 1, 2)
unpackPlane(yuv420888planes[2], width, height, out, imageSize, 2)
}
return ByteBuffer.wrap(out)
}
private fun areUVPlanesNV21(planes: Array<Plane>, width: Int, height: Int): Boolean {
val imageSize = width * height
val uBuffer = planes[1].buffer
val vBuffer = planes[2].buffer
val vBufferPosition = vBuffer.position()
val uBufferLimit = uBuffer.limit()
vBuffer.position(vBufferPosition + 1)
uBuffer.limit(uBufferLimit - 1)
val areNV21 =
vBuffer.remaining() == 2 * imageSize / 4 - 2 && vBuffer.compareTo(uBuffer) == 0
vBuffer.position(vBufferPosition)
uBuffer.limit(uBufferLimit)
return areNV21
}
private fun unpackPlane(
plane: Plane,
width: Int,
height: Int,
out: ByteArray,
offset: Int,
pixelStride: Int
) {
val buffer = plane.buffer
buffer.rewind()
val numRow = (buffer.limit() + plane.rowStride - 1) / plane.rowStride
if (numRow == 0) {
return
}
val scaleFactor = height / numRow
val numCol = width / scaleFactor
var outputPos = offset
var rowStart = 0
for (row in 0 until numRow) {
var inputPos = rowStart
for (col in 0 until numCol) {
out[outputPos] = buffer[inputPos]
outputPos += pixelStride
inputPos += plane.pixelStride
}
rowStart += plane.rowStride
}
}
}
You just need to use imageProxy.image?.toBitmap() to convert imageProxy and then convert bitmap to bytearray as follow:
Here's an example:
private fun takePhoto() {
camera_capture_button.isEnabled = false
// Get a stable reference of the modifiable image capture use case
val imageCapture = imageCapture ?: return
imageCapture.takePicture(
ContextCompat.getMainExecutor(this),
object : ImageCapture.OnImageCapturedCallback() {
#SuppressLint("UnsafeExperimentalUsageError")
override fun onCaptureSuccess(imageProxy: ImageProxy) {
val bitmapImage = imageProxy.image?.toBitmap()
val stream = ByteArrayOutputStream()
bitmapImage.compress(Bitmap.CompressFormat.PNG, 90, stream)
val image = stream.toByteArray()
}
override fun onError(exception: ImageCaptureException) {
super.onError(exception)
}
})
}
I found that solution (https://itnext.io/converting-pytorch-float-tensor-to-android-rgba-bitmap-with-kotlin-ffd4602a16b6) but when I tried to convert that way I found that the size of inputTensor.dataAsFloatArray is more than bitmap.width*bitmap.height. How works converting tensor to float array or is there any other possible method to convert pytorch tensor to bitmap?
val inputTensor = TensorImageUtils.bitmapToFloat32Tensor(
bitmap,
TensorImageUtils.TORCHVISION_NORM_MEAN_RGB, TensorImageUtils.TORCHVISION_NORM_STD_RGB
)
// Float array size is 196608 when width and height are 256x256 = 65536
val res = floatArrayToGrayscaleBitmap(inputTensor.dataAsFloatArray, bitmap.width, bitmap.height)
fun floatArrayToGrayscaleBitmap (
floatArray: FloatArray,
width: Int,
height: Int,
alpha :Byte = (255).toByte(),
reverseScale :Boolean = false
) : Bitmap {
// Create empty bitmap in RGBA format (even though it says ARGB but channels are RGBA)
val bmp = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888)
val byteBuffer = ByteBuffer.allocate(width*height*4)
Log.d("App", floatArray.size.toString() + " " + (width * height * 4).toString())
// mapping smallest value to 0 and largest value to 255
val maxValue = floatArray.max() ?: 1.0f
val minValue = floatArray.min() ?: 0.0f
val delta = maxValue-minValue
var tempValue :Byte
// Define if float min..max will be mapped to 0..255 or 255..0
val conversion = when(reverseScale) {
false -> { v: Float -> ((v-minValue)/delta*255).toByte() }
true -> { v: Float -> (255-(v-minValue)/delta*255).toByte() }
}
// copy each value from float array to RGB channels and set alpha channel
floatArray.forEachIndexed { i, value ->
tempValue = conversion(value)
byteBuffer.put(4*i, tempValue)
byteBuffer.put(4*i+1, tempValue)
byteBuffer.put(4*i+2, tempValue)
byteBuffer.put(4*i+3, alpha)
}
bmp.copyPixelsFromBuffer(byteBuffer)
return bmp
}
None of the answers were able to produce the output I wanted, so this is what I came up with - it is basically only reverse engineered version of what happenes in TensorImageUtils.bitmapToFloat32Tensor().
Please note that this function only works if you are using MemoryFormat.CONTIGUOUS (which is default) in TensorImageUtils.bitmapToFloat32Tensor().
fun tensor2Bitmap(input: FloatArray, width: Int, height: Int, normMeanRGB: FloatArray, normStdRGB: FloatArray): Bitmap? {
val pixelsCount = height * width
val pixels = IntArray(pixelsCount)
val output = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888)
val conversion = { v: Float -> ((v.coerceIn(0.0f, 1.0f))*255.0f).roundToInt()}
val offset_g = pixelsCount
val offset_b = 2 * pixelsCount
for (i in 0 until pixelsCount) {
val r = conversion(input[i] * normStdRGB[0] + normMeanRGB[0])
val g = conversion(input[i + offset_g] * normStdRGB[1] + normMeanRGB[1])
val b = conversion(input[i + offset_b] * normStdRGB[2] + normMeanRGB[2])
pixels[i] = 255 shl 24 or (r.toInt() and 0xff shl 16) or (g.toInt() and 0xff shl 8) or (b.toInt() and 0xff)
}
output.setPixels(pixels, 0, width, 0, 0, width, height)
return output
}
Example usage then could be as follows:
tensor2Bitmap(outputTensor.dataAsFloatArray, bitmap.width, bitmap.height, TensorImageUtils.TORCHVISION_NORM_MEAN_RGB, TensorImageUtils.TORCHVISION_NORM_STD_RGB)
// I faced the same problem, and I found the function itself
TensorImageUtils.bitmapToFloat32Tensor()
tortures the RGB colorspace. You should try to convert yuv to a bitmap and use
TensorImageUtils.bitmapToFloat32Tensor
instead for NOW.
// I modified the code from phillies (up) to get the coloful bitmap. Note that the format of an output tensor is typically NCHW.
// Here's my function in Kotlin. Hopefully it works in your case:
private fun floatArrayToBitmap(floatArray: FloatArray, width: Int, height: Int) : Bitmap {
// Create empty bitmap in ARGB format
val bmp: Bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888)
val pixels = IntArray(width * height * 4)
// mapping smallest value to 0 and largest value to 255
val maxValue = floatArray.max() ?: 1.0f
val minValue = floatArray.min() ?: -1.0f
val delta = maxValue-minValue
// Define if float min..max will be mapped to 0..255 or 255..0
val conversion = { v: Float -> ((v-minValue)/delta*255.0f).roundToInt()}
// copy each value from float array to RGB channels
for (i in 0 until width * height) {
val r = conversion(floatArray[i])
val g = conversion(floatArray[i+width*height])
val b = conversion(floatArray[i+2*width*height])
pixels[i] = rgb(r, g, b) // you might need to import for rgb()
}
bmp.setPixels(pixels, 0, width, 0, 0, width, height)
return bmp
}
Hopefully future releases of PyTorch Mobile will fix this bug.
I am using Android's default camera to capture my intent. The images that come out are of really good quality and I cannot seem to find a way to lower the quality of the images.
Is that even possible without implementing a custom Camera ?
Is it possible to set like size limit of maximum 2MB or something like that?
Or just take the image in the lowest quality possible as the images in my application do not need to be of good quality.
public class ImageCaptureIntent {
public interface ImageCaptureResultListener {
void onImageCaptured(File image);
void onImageCaptureError(Exception exception);
}
static final int IMAGE_CAPTURE_REQUEST = 1;
private enum BundleKeys {
IMAGE_FILE
}
private File imageFile;
public void onSaveInstanceState(#NonNull Bundle outState) {
if (imageFile != null) {
outState.putString(BundleKeys.IMAGE_FILE.name(), imageFile.getAbsolutePath());
}
}
public void onRestoreInstanceState(#NonNull Bundle savedInstanceState) {
if (savedInstanceState.containsKey(BundleKeys.IMAGE_FILE.name())) {
imageFile = new File(savedInstanceState.getString(BundleKeys.IMAGE_FILE.name()));
}
}
private static File createTempFile(File directory) throws IOException {
String timestamp = new SimpleDateFormat("yyyyMMdd_HHmmss").format(new Date());
String filePrefix = "IMG_" + timestamp + "_";
File file = File.createTempFile(filePrefix,".jpg", directory);
if (file == null) {
throw new IOException("Could not create a temp file");
}
return file;
}
public boolean initiateImageCapture(ImageCaptureResultListener listener, Activity activity, File directory) {
if (listener == null) {
return false;
}
Intent captureIntent = new Intent(MediaStore.ACTION_IMAGE_CAPTURE);
if (captureIntent.resolveActivity(activity.getPackageManager()) == null) {
listener.onImageCaptureError(new ActivityNotFoundException("No app for ACTION_IMAGE_CAPTURE"));
return false;
}
try {
this.imageFile = createTempFile(directory);
} catch (IOException e) {
listener.onImageCaptureError(e);
return false;
}
Uri imageUri = FileProvider.getUriForFile(activity,activity.getPackageName() + ".fileprovider", this.imageFile);
captureIntent.putExtra(MediaStore.EXTRA_OUTPUT, imageUri);
activity.startActivityForResult(captureIntent, IMAGE_CAPTURE_REQUEST);
return true;
}
public boolean parseActivityResult(ImageCaptureResultListener listener, int requestCode, int resultCode, Intent data) {
if (requestCode != IMAGE_CAPTURE_REQUEST) {
return false;
}
if (listener == null) {
return false;
}
if (resultCode == Activity.RESULT_OK) {
listener.onImageCaptured(imageFile);
} else {
listener.onImageCaptureError(new RuntimeException("Image capturing was cancelled"));
}
return true;
}
}
EDIT
I am not using Bitmaps in my application. I am taking images and then sending them to the backend. In perfect scenario I would like to capture low quality images and then save them to the phone if possible. If that is not possible then I would like to at least send the compressed images to backend.
When you get the path from intent then use it.
CompressBitMap().execute(Uri.fromFile(File(mImagePath)))
inner class CompressBitMap : AsyncTask<Uri, Int, File>() {
override fun doInBackground(vararg p0: Uri?): File? {
val bitmap: Bitmap?
val filename = "${Date().time}profile.png"
val fileDir = File(Environment.getExternalStorageDirectory(), getString(R.string.app_name))
if (!fileDir.exists()) {
fileDir.mkdir()
}
val destPath = File(fileDir, filename)
val outPutStream = FileOutputStream(destPath)
try {
bitmap = ScaledPicture(p0[0], activity.contentResolver).getBitmap(400, 400)
bitmap.compress(Bitmap.CompressFormat.PNG, 100, outPutStream)
outPutStream.flush()
outPutStream.close()
} catch (e: Exception) {
e.printStackTrace()
}
return destPath
}
override fun onPostExecute(result: File?) {
super.onPostExecute(result)
result?.let {
mImagePath = result.absolutePath
setProfileImage(mImagePath, image_circle, null)
}
}
}
ScaledPicture and ImageScalingUtil are two important classes for reduce the size of image.
ScalePicture:=>
import android.content.ContentResolver
import android.graphics.Bitmap
import android.graphics.BitmapFactory
import android.graphics.Matrix
import android.graphics.RectF
import android.media.ExifInterface
import android.net.Uri
import com.silverskysoft.skysalon.imageUtils.ImageScalingUtils
import java.io.FileNotFoundException
import java.io.IOException
import java.io.InvalidObjectException
class ScaledPicture(private var uri: Uri?, private var resolver: ContentResolver) {
private var path: String? = null
private var orientation: Matrix? = null
private var storedHeight: Int = 0
private var storedWidth: Int = 0
#Throws(IOException::class)
private fun getInformation(): Boolean {
/*if (getInformationFromMediaDatabase())
return true;*/
return getInformationFromFileSystem()
}
/* Support for file managers and dropbox */
#Throws(IOException::class)
private fun getInformationFromFileSystem(): Boolean {
path = uri?.path
if (path == null)
return false
val exif = ExifInterface(path.toString())
val orientation = exif.getAttributeInt(ExifInterface.TAG_ORIENTATION,
ExifInterface.ORIENTATION_NORMAL)
this.orientation = Matrix()
when (orientation) {
ExifInterface.ORIENTATION_NORMAL -> {
}
ExifInterface.ORIENTATION_FLIP_HORIZONTAL -> this.orientation?.setScale(-1f, 1f)
ExifInterface.ORIENTATION_ROTATE_180 -> this.orientation?.setRotate(180f)
ExifInterface.ORIENTATION_FLIP_VERTICAL -> this.orientation?.setScale(1f, -1f)
ExifInterface.ORIENTATION_TRANSPOSE -> {
this.orientation?.setRotate(90f)
this.orientation?.postScale(-1f, 1f)
}
ExifInterface.ORIENTATION_ROTATE_90 -> this.orientation?.setRotate(90f)
ExifInterface.ORIENTATION_TRANSVERSE -> {
this.orientation?.setRotate(-90f)
this.orientation?.postScale(-1f, 1f)
}
ExifInterface.ORIENTATION_ROTATE_270 -> this.orientation?.setRotate(-90f)
}/* Identity matrix */
return true
}
#Throws(IOException::class)
private fun getStoredDimensions(): Boolean {
val input = resolver.openInputStream(uri)
val options = BitmapFactory.Options()
options.inJustDecodeBounds = true
BitmapFactory.decodeStream(resolver.openInputStream(uri), null, options)
/* The input stream could be reset instead of closed and reopened if it were possible
to reliably wrap the input stream on a buffered stream, but it's not possible because
decodeStream() places an upper read limit of 1024 bytes for a reset to be made (it calls
mark(1024) on the stream). */
input?.close()
if (options.outHeight <= 0 || options.outWidth <= 0)
return false
storedHeight = options.outHeight
storedWidth = options.outWidth
return true
}
#Throws(IOException::class)
fun getBitmap(reqWidth: Int, reqHeight: Int): Bitmap {
val heightWidth = 1000
if (!getInformation())
throw FileNotFoundException()
if (!getStoredDimensions())
throw InvalidObjectException(null)
val rect = RectF(0f, 0f, storedWidth.toFloat(), storedHeight.toFloat())
orientation?.mapRect(rect)
var width = rect.width().toInt()
var height = rect.height().toInt()
var subSample = 1
while (width > heightWidth || height > heightWidth) {
width /= 2
height /= 2
subSample *= 2
}
if (width == 0 || height == 0)
throw InvalidObjectException(null)
val options = BitmapFactory.Options()
options.inSampleSize = subSample
val subSampled = BitmapFactory.decodeStream(resolver.openInputStream(uri), null, options)
val picture: Bitmap
if (orientation?.isIdentity == false) {
picture = Bitmap.createBitmap(subSampled, 0, 0, options.outWidth, options.outHeight,
orientation, false)
subSampled.recycle()
} else
picture = subSampled
return ImageScalingUtils.decodeBitmap(picture, reqWidth, reqHeight, ImageScalingUtils.ScalingLogic.CROP)
}
}
ImageScalingUtils:=>
import android.graphics.Bitmap
import android.graphics.BitmapFactory
import java.io.ByteArrayOutputStream
/**
* Created by Avinash on 7/8/19.
* ImageScalingUtils responsible for compressing the bitmap efficiently
*/
object ImageScalingUtils {
/**
* Utility function for decoding an image resource. The decoded bitmap will
* be optimized for further scaling to the requested destination dimensions
* and scaling logic.
*
* #param dstWidth Width of destination area
* #param dstHeight Height of destination area
* #param scalingLogic Logic to use to avoid image stretching
* #return Decoded bitmap
*/
fun decodeBitmap(bm: Bitmap, dstWidth: Int, dstHeight: Int,
scalingLogic: ScalingLogic): Bitmap {
val stream = ByteArrayOutputStream()
bm.compress(Bitmap.CompressFormat.PNG, 100, stream)
val byteArray = stream.toByteArray()
val options = BitmapFactory.Options()
options.inJustDecodeBounds = true
BitmapFactory.decodeByteArray(byteArray, 0, byteArray.size, options)
options.inJustDecodeBounds = false
options.inSampleSize = calculateSampleSize(options.outWidth, options.outHeight, dstWidth,
dstHeight, scalingLogic)
return BitmapFactory.decodeByteArray(byteArray, 0, byteArray.size, options)
}
/**
* ScalingLogic defines how scaling should be carried out if source and
* destination image has different aspect ratio.
*
* CROP: Scales the image the minimum amount while making sure that at least
* one of the two dimensions fit inside the requested destination area.
* Parts of the source image will be cropped to realize this.
*
* FIT: Scales the image the minimum amount while making sure both
* dimensions fit inside the requested destination area. The resulting
* destination dimensions might be adjusted to a smaller size than
* requested.
*/
enum class ScalingLogic {
CROP, FIT
}
/**
* Calculate optimal down-sampling factor given the dimensions of a source
* image, the dimensions of a destination area and a scaling logic.
*
* #param srcWidth Width of source image
* #param srcHeight Height of source image
* #param dstWidth Width of destination area
* #param dstHeight Height of destination area
* #param scalingLogic Logic to use to avoid image stretching
* #return Optimal down scaling sample size for decoding
*/
private fun calculateSampleSize(srcWidth: Int, srcHeight: Int, dstWidth: Int, dstHeight: Int,
scalingLogic: ScalingLogic): Int {
if (scalingLogic == ScalingLogic.FIT) {
val srcAspect = srcWidth.toFloat() / srcHeight.toFloat()
val dstAspect = dstWidth.toFloat() / dstHeight.toFloat()
return if (srcAspect > dstAspect) {
srcWidth / dstWidth
} else {
srcHeight / dstHeight
}
} else {
val srcAspect = srcWidth.toFloat() / srcHeight.toFloat()
val dstAspect = dstWidth.toFloat() / dstHeight.toFloat()
return if (srcAspect > dstAspect) {
srcHeight / dstHeight
} else {
srcWidth / dstWidth
}
}
}
}
You should not use a file and fileprovider. Leave all empty.
Then you will get a Bitmap of a thumbnail in onActivityResult.
Bitmap bitmap = (Bitmap)data.getData();
With Android CameraX Analyzer ImageProxy uses ImageReader under the hood with a default YUV_420_888 image format.
I'd like to convert it in OpenCV Mat in order to use OpenCV inside my analyzer:
override fun analyze(imageProxy: ImageProxy, rotationDegrees: Int) {
try {
imageProxy.image?.let {
// ImageProxy uses an ImageReader under the hood:
// https://developer.android.com/reference/androidx/camera/core/ImageProxy.html
// That has a default format of YUV_420_888 if not changed that's the default
// Android camera format.
// https://developer.android.com/reference/android/graphics/ImageFormat.html#YUV_420_888
// https://developer.android.com/reference/android/media/ImageReader.html
// Sanity check
if (it.format == ImageFormat.YUV_420_888
&& it.planes.size == 3
) {
// TODO - convert ImageProxy.image to Mat
} else {
// Manage other image formats
// TODO - https://developer.android.com/reference/android/media/Image.html
}
}
} catch (ise: IllegalStateException) {
ise.printStackTrace()
}
}
How can I do that?
Looking at OpenCV JavaCamera2Frame class in its GitHub repo you can write an Image extension function like that:
(ported to Kotlin)
// Ported from opencv private class JavaCamera2Frame
fun Image.yuvToRgba(): Mat {
val rgbaMat = Mat()
if (format == ImageFormat.YUV_420_888
&& planes.size == 3) {
val chromaPixelStride = planes[1].pixelStride
if (chromaPixelStride == 2) { // Chroma channels are interleaved
assert(planes[0].pixelStride == 1)
assert(planes[2].pixelStride == 2)
val yPlane = planes[0].buffer
val uvPlane1 = planes[1].buffer
val uvPlane2 = planes[2].buffer
val yMat = Mat(height, width, CvType.CV_8UC1, yPlane)
val uvMat1 = Mat(height / 2, width / 2, CvType.CV_8UC2, uvPlane1)
val uvMat2 = Mat(height / 2, width / 2, CvType.CV_8UC2, uvPlane2)
val addrDiff = uvMat2.dataAddr() - uvMat1.dataAddr()
if (addrDiff > 0) {
assert(addrDiff == 1L)
Imgproc.cvtColorTwoPlane(yMat, uvMat1, rgbaMat, Imgproc.COLOR_YUV2RGBA_NV12)
} else {
assert(addrDiff == -1L)
Imgproc.cvtColorTwoPlane(yMat, uvMat2, rgbaMat, Imgproc.COLOR_YUV2RGBA_NV21)
}
} else { // Chroma channels are not interleaved
val yuvBytes = ByteArray(width * (height + height / 2))
val yPlane = planes[0].buffer
val uPlane = planes[1].buffer
val vPlane = planes[2].buffer
yPlane.get(yuvBytes, 0, width * height)
val chromaRowStride = planes[1].rowStride
val chromaRowPadding = chromaRowStride - width / 2
var offset = width * height
if (chromaRowPadding == 0) {
// When the row stride of the chroma channels equals their width, we can copy
// the entire channels in one go
uPlane.get(yuvBytes, offset, width * height / 4)
offset += width * height / 4
vPlane.get(yuvBytes, offset, width * height / 4)
} else {
// When not equal, we need to copy the channels row by row
for (i in 0 until height / 2) {
uPlane.get(yuvBytes, offset, width / 2)
offset += width / 2
if (i < height / 2 - 1) {
uPlane.position(uPlane.position() + chromaRowPadding)
}
}
for (i in 0 until height / 2) {
vPlane.get(yuvBytes, offset, width / 2)
offset += width / 2
if (i < height / 2 - 1) {
vPlane.position(vPlane.position() + chromaRowPadding)
}
}
}
val yuvMat = Mat(height + height / 2, width, CvType.CV_8UC1)
yuvMat.put(0, 0, yuvBytes)
Imgproc.cvtColor(yuvMat, rgbaMat, Imgproc.COLOR_YUV2RGBA_I420, 4)
}
}
return rgbaMat
}
And so you can write:
override fun analyze(imageProxy: ImageProxy, rotationDegrees: Int) {
try {
imageProxy.image?.let {
// ImageProxy uses an ImageReader under the hood:
// https://developer.android.com/reference/androidx/camera/core/ImageProxy.html
// That has a default format of YUV_420_888 if not changed that's the default
// Android camera format.
// https://developer.android.com/reference/android/graphics/ImageFormat.html#YUV_420_888
// https://developer.android.com/reference/android/media/ImageReader.html
// Sanity check
if (it.format == ImageFormat.YUV_420_888
&& it.planes.size == 3
) {
val rgbaMat = it.yuvToRgba()
} else {
// Manage other image formats
// TODO - https://developer.android.com/reference/android/media/Image.html
}
}
} catch (ise: IllegalStateException) {
ise.printStackTrace()
}
}
private Mat convertYUVtoMat(#NonNull Image img) {
byte[] nv21;
ByteBuffer yBuffer = img.getPlanes()[0].getBuffer();
ByteBuffer uBuffer = img.getPlanes()[1].getBuffer();
ByteBuffer vBuffer = img.getPlanes()[2].getBuffer();
int ySize = yBuffer.remaining();
int uSize = uBuffer.remaining();
int vSize = vBuffer.remaining();
nv21 = new byte[ySize + uSize + vSize];
yBuffer.get(nv21, 0, ySize);
vBuffer.get(nv21, ySize, vSize);
uBuffer.get(nv21, ySize + vSize, uSize);
Mat yuv = new Mat(img.getHeight() + img.getHeight()/2, img.getWidth(), CvType.CV_8UC1);
yuv.put(0, 0, nv21);
Mat rgb = new Mat();
Imgproc.cvtColor(yuv, rgb, Imgproc.COLOR_YUV2RGB_NV21, 3);
Core.rotate(rgb, rgb, Core.ROTATE_90_CLOCKWISE);
return rgb;
}
This method converts the Camerax API YUV_420_888 image to OpenCV's Mat (RGB) object.
(Working 2021)
#shadowsheep solution is just fine if you need to get OpenCV Mat.
but if you want to get Bitmap and don't want to add opencv library into you project you can take a look at RenderScript solution in android/camera-samples repo
Also I made single Java file library at github. It will be useful if you want to get correct ByteBuffer without any row or pixel strides for futher processing (for instance with neural network engine).
I also compared all these approaches. OpenCV is the fastest.
I'm trying to scale the images showed in the textview but i just can't.
I'm using this code but no matter what, it shows the image cropped inside the container or doesn't show at all.
int width, height;
DisplayMetrics metrics = new DisplayMetrics();
metrics = Resources.getSystem().getDisplayMetrics();
int originalWidthScaled = (int) (result.getIntrinsicWidth() * metrics.density);
int originalHeightScaled = (int) (result.getIntrinsicHeight() * metrics.density);
if (originalWidthScaled > metrics.widthPixels) {
height = result.getIntrinsicHeight() * metrics.widthPixels
/ result.getIntrinsicWidth();
width = metrics.widthPixels;
} else {
height = originalHeightScaled;
width = originalWidthScaled;
}
urlDrawable.drawable = result;
urlDrawable.setBounds(0, 0, 0+width, 0+height);
// change the reference of the current drawable to the result
// from the HTTP call
// redraw the image by invalidating the container
container.invalidate();
// For ICS
container.setHeight(
container.getHeight() +
result.getIntrinsicHeight());
// Pre ICS
container.setEllipsize(null);
I answer myself i've changed
if (originalWidthScaled > metrics.widthPixels) {
height = result.getIntrinsicHeight() * metrics.widthPixels
/ result.getIntrinsicWidth();
width = metrics.widthPixels;
}
for
if (originalWidthScaled > (metrics.widthPixels * 70) / 100) {
width = (metrics.widthPixels * 70) / 100;
height = result.getIntrinsicHeight() * width
/ result.getIntrinsicWidth();
}
And now it occupies the 70% of the space of the screen which is exactly the max size of the container
For anyone who still looking for an answer using new APIs, this custom implementation of ImageGetter should allow you to scale up the image which will occupy the device display width, scale down if the given image is larger than the device display width or retain its original dimension if smaller.
/**
* Custom ImageGetter for [HtmlCompat.fromHtml] which accept both Url and Base64 from img tag.
* */
class HtmlImageGetter(
private val scope: LifecycleCoroutineScope,
private val res: Resources,
private val glide: RequestManager,
private val htmlTextView: AppCompatTextView,
#DrawableRes
private val errorImage: Int = 0,
private val matchParent: Boolean = true
) : ImageGetter {
override fun getDrawable(source: String): Drawable {
val holder = BitmapDrawablePlaceHolder(res, null)
scope.launch(Dispatchers.IO) {
runCatching {
glide
.asBitmap()
.load(
if (source.matches(Regex("data:image.*base64.*")))
Base64.decode(
source.replace("data:image.*base64".toRegex(), ""),
Base64.DEFAULT
) // Image tag used Base64
else
source // Image tag used URL
)
.submit()
.get()
}
.onSuccess { setDrawable(holder, it) }
.onFailure {
if (errorImage != 0)
BitmapFactory.decodeResource(res, errorImage)?.let {
setDrawable(holder, it)
}
}
}
return holder
}
private suspend fun setDrawable(holder: BitmapDrawablePlaceHolder, bitmap: Bitmap) {
val drawable = BitmapDrawable(res, bitmap)
val width: Int
val height: Int
val metrics = res.displayMetrics
val displayWidth = metrics.widthPixels - (htmlTextView.paddingStart + htmlTextView.paddingEnd + htmlTextView.marginStart + htmlTextView.marginEnd) * 100 / 100
val imageWidthScaled = (drawable.intrinsicWidth * metrics.density)
val imageHeightScaled = (drawable.intrinsicHeight * metrics.density)
// Scale up if matchParent is true
// Scale down if matchParent is false
if (matchParent || imageWidthScaled > displayWidth) {
width = displayWidth
height = (drawable.intrinsicHeight * width / drawable.intrinsicWidth)
}
else {
height = imageHeightScaled.roundToInt()
width = imageWidthScaled.roundToInt()
}
drawable.setBounds(0, 0, width, height)
holder.setDrawable(drawable)
holder.setBounds(0, 0, width, height)
withContext(Dispatchers.Main) { htmlTextView.text = htmlTextView.text }
}
internal class BitmapDrawablePlaceHolder(res: Resources, bitmap: Bitmap?) :
BitmapDrawable(res, bitmap) {
private var drawable: Drawable? = null
override fun draw(canvas: Canvas) {
drawable?.run { draw(canvas) }
}
fun setDrawable(drawable: Drawable) {
this.drawable = drawable
}
}
}