Android: created bitmap has wrong colors - android

I'm porting ios app to android and have issue with bitmap colors while creating it from byte array. This code works perfectly on ios (C# - Xamarin):
const int bitsPerComponent = 8;
const int bytePerPixel = 4;
var bytesPerRow = bytePerPixel * BarcodeImageWidth;
var colorSpace = CGColorSpace.CreateDeviceRGB();
var context = new CGBitmapContext(byteArray,
BarcodeImageWidth, BarcodeImageHeight,
bitsPerComponent, bytesPerRow,
colorSpace, CGImageAlphaInfo.NoneSkipFirst);
BarcodeImageView.Image = new UIImage(context.ToImage());
And this code on android makes bitmap with wrong colors:
var bitmap = Bitmap.CreateBitmap(barcode.Width, barcode.Height, Bitmap.Config.Argb8888);
bitmap.CopyPixelsFromBuffer(ByteBuffer.Wrap(imageBytes));
barcode.SetImageBitmap(bitmap);

Fixed that by manually created pixel array with skipped first (alpha) byte.
int pixelsCount = imageBytes.Length / 4;
int[] pixels = new int[pixelsCount];
for (int i = 0; i < pixelsCount; i++)
{
var offset = 4 * i;
int r = imageBytes[offset + 1];
int g = imageBytes[offset + 2];
int b = imageBytes[offset + 3];
pixels[i] = Color.Rgb(r, g, b);
}
var bitmap = Bitmap.CreateBitmap(pixels, barcode.Width, barcode.Height, Bitmap.Config.Argb8888);
barcode.SetImageBitmap(bitmap);

Related

Xamarin tf.lite input objects

Im trying to reproduce tensorflow object detection on xamarin.
private MappedByteBuffer LoadModelFile()
{
AssetFileDescriptor fileDescriptor = Assets.OpenFd("detect.tflite");
FileInputStream inputStream = new FileInputStream(fileDescriptor.FileDescriptor);
FileChannel fileChannel = inputStream.Channel;
long startOffset = fileDescriptor.StartOffset;
long declaredLength = fileDescriptor.DeclaredLength;
return fileChannel.Map(FileChannel.MapMode.ReadOnly, startOffset, declaredLength);
}
View view = (View) sender;
MappedByteBuffer buffer = LoadModelFile();
Interpreter interpreter = new Interpreter(buffer);
var sr = new StreamReader(Assets.Open("labels.txt"));
var labels = sr.ReadToEnd()
.Split('\n')
.Select(s => s.Trim())
.Where(s => !string.IsNullOrEmpty(s))
.ToList();
var bitmap = BitmapFactory.DecodeResource(Resources, 2130837608);
var resizedBitmap = Bitmap.CreateScaledBitmap(bitmap, 1000, 750, false)
.Copy(Bitmap.Config.Argb8888, false);
float[][][][] imgData = null;
imgData = new float[1][][][];
imgData[0] = new float[1000][][];
for (int i = 0; i < imgData[0].Length; i++)
{
imgData[0][i] = new float[750][];
for (int j = 0; j < imgData[0][i].Length; j++)
{
imgData[0][i][j] = new float[3];
}
}
var intValuess = new int[1000 * 750];
resizedBitmap.GetPixels(intValuess, 0, 1000, 0, 0, 1000, 750);
int pixels = 0;
for (int i = 0; i < imgData[0].Length; i++)
{
for (int j = 0; j < imgData[0][i].Length; j++)
{
var val = intValuess[pixels++];
imgData[0][i][j][0] = (float)((val >> 16) & 0xFF);
imgData[0][i][j][1] = (float)((val >> 8) & 0xFF);
imgData[0][i][j][2] = (float)(val & 0xFF);
}
}
var outputs = new float[labels.Count];
interpreter.Run(imgData, outputs);
but i have error "cannot convert float[][][][] to Java.Lang.Object in line interpreter.Run(imgData, outputs);
How i can convert float[][][][] to Java.Lang.Object or where i can find tensorflow lite with xamarin examples.
I know it has been a while since you asked this question but maybe my response can be useful to someone.
I am also trying to use Xamarin with tflite, to run a simple CNN.
Here is my code:
private MappedByteBuffer LoadModelFile()
{
var assets = Application.Context.Assets;
AssetFileDescriptor fileDescriptor = assets.OpenFd("seed_model_no_qt.tflite");
FileInputStream inputStream = new FileInputStream(fileDescriptor.FileDescriptor);
FileChannel fileChannel = inputStream.Channel;
long startOffset = fileDescriptor.StartOffset;
long declaredLength = fileDescriptor.DeclaredLength;
return fileChannel.Map(FileChannel.MapMode.ReadOnly, startOffset, declaredLength);
}
private string Classify(MediaFile mediaFile)
{
var assets = Application.Context.Assets;
Bitmap bp = BitmapFactory.DecodeStream(mediaFile.GetStream());
var resizedBitmap = Bitmap.CreateScaledBitmap(bp, 1280, 1280, false).Copy(Bitmap.Config.Argb8888, false);
var bufint = new int[1280 * 1280];
resizedBitmap.GetPixels(bufint, 0, 1280, 0, 0, 1280, 1280);
int pixels = 0;
var input_buffer = new byte[4 * 1280 * 1280 * 3];
for(int i = 0; i < 1280; i++)
{
for(int k = 0; k < 1280; k++)
{
int val = bufint[pixels++];
Array.Copy(BitConverter.GetBytes(((val >> 16) & 0xFF) * (1f / 255f)), 0, input_buffer, (i * 1280 + k) * 12, 4);
Array.Copy(BitConverter.GetBytes(((val >> 8) & 0xFF) * (1f / 255f)), 0, input_buffer, (i * 1280 + k) * 12 + 4, 4);
Array.Copy(BitConverter.GetBytes((val & 0xFF) * (1f / 255f)), 0, input_buffer, (i * 1280 + k) * 12 + 8, 4);
}
}
var bytebuffer = Java.Nio.ByteBuffer.Wrap(input_buffer);
var output = Java.Nio.ByteBuffer.AllocateDirect(4*160*160);
interpreter.Run(bytebuffer, output);
var buffer = new byte[4 * 160 * 160];
Marshal.Copy(output.GetDirectBufferAddress(), buffer, 0, 4 * 160 * 160);
float sum = 0.0f;
for(int i = 0; i < 160*160; i++)
{
sum += BitConverter.ToSingle(buffer, i * 4);
}
return "Count : " + ((int)(sum/255)).ToString();
}
I reused your LoadModelFile() function as it is. The code takes an image from a mediaFile (coming from the phone camera), then resizes it to 1280x1280 rgb image before feeding it to a CNN as an array of float32 values.
Your float[][][][] to Java.Lang.Object issue came from the interpreter.Run() method expecting a Java Object. Some people online solve it by giving a Java.Nio.ByteBuffer as a parameter, instead of an array. It implies some bitwise manipulations but the Run method does accept the ByteBuffer object.
When filling the ByteBuffer, I advise you not to use its methods such as PutFloat(), but to fill a byte[] buffer and then use the Java.Nio.ByteBuffer.Wrap() method as I did. Using ByteBuffer's methods seemed to imply large performance issues in my case.
Same thing happens when manipulating the output of my CNN (a 160x160 heatmap of float32 values). Using ByteBuffer.Get() method to access the values was very slow. Instead, use Marshal.Copy to store the values into a byte array, then get back the float values with BitConverter.ToSingle.

How do I edit and save a large bitmap in Android? Currently writing it out as tiles but it is slow to join them back together

I'm using renderscript on Android to edit photos, currently due to the texture size limit and memory limits on Android the app will crash if I try anything too large eg photos taken with the devices camera.
My first thought to get around this was to use BitmapRegionDecoder and tile the large photo into manageable pieces, edit them through renderscript and save them one at a time, then stitch it all together using PNGJ - a PNG decoding and encoding library that allows writing PNG images to disk in parts so I don't have the full image in memory.
This works fine but stitching it together takes a rather long time - around 1 minute at a guess.
Are there any other solutions I should consider? I can change to JPEG if there is a solution there, but I haven't found it yet. Basically I'm looking for the other side of a BitmapRegionDecoder, a BitmapRegionEncoder.
Just to be clear, I do not want to resize the image.
Load the image in horizontal stripes using BitmapRegionDecoder. The code below assumes that it is PNG and uses PNGJ to copy the metadata to new image, but adding support for JPEG should not be too difficult.
Process each stripe with Renderscript.
Save it using PNGJ. Do not use high compression or it will slow down to a crawl.
PNG version of this image (4850x3635px) takes 12 seconds on Nexus 5 with a trivial RS filter (desaturation).
void processPng(String forig,String fdest) {
try {
Allocation inAllocation = null;
Allocation outAllocation = null;
final int block_height = 64;
FileInputStream orig = new FileInputStream(forig);
FileInputStream orig2 = new FileInputStream(forig);
FileOutputStream dest = new FileOutputStream(fdest);
BitmapRegionDecoder decoder = BitmapRegionDecoder.newInstance(orig, false);
Rect blockRect = new Rect();
PngReader pngr = new PngReader(orig2);
PngWriter pngw = new PngWriter(dest, pngr.imgInfo);
pngw.copyChunksFrom(pngr.getChunksList());
// keep compression quick
pngw.getPixelsWriter().setDeflaterCompLevel(1);
int channels = 3; // needles to say, this should not be hardcoded
int width = pngr.imgInfo.samplesPerRow / channels;
int height = pngr.imgInfo.rows;
pngr.close(); // don't need it anymore
blockRect.left = 0;
blockRect.right = width;
BitmapFactory.Options options = new BitmapFactory.Options();
options.inPreferredConfig = Bitmap.Config.ARGB_8888;
Bitmap blockBitmap;
byte []bytes = new byte[width * block_height * 4];
byte []byteline = new byte[width * channels];
for (int row = 0; row <= height / block_height; row++) {
int h;
// are we nearing the end?
if((row + 1) * block_height <= height)
h = block_height;
else {
h = height - row * block_height;
// so that new, smaller Allocations are created
inAllocation = outAllocation = null;
}
blockRect.top = row * block_height;
blockRect.bottom = row * block_height + h;
blockBitmap = decoder.decodeRegion(blockRect, options);
if(inAllocation == null)
inAllocation = Allocation.createFromBitmap(mRS, blockBitmap);
if(outAllocation == null)
{
Type.Builder TypeDir = new Type.Builder(mRS, Element.U8_4(mRS));
TypeDir.setX(width).setY(h);
outAllocation = Allocation.createTyped(mRS, TypeDir.create());
}
inAllocation.copyFrom(blockBitmap);
mScript.forEach_saturation(inAllocation, outAllocation);
outAllocation.copyTo(bytes);
int idx = 0;
for(int raster = 0; raster < h; raster++) {
for(int m = 0; m < width; m++)
{
byteline[m * channels] = bytes[idx++];
byteline[m * channels + 1] = bytes[idx++];
byteline[m * channels + 2] = bytes[idx++];
idx++;
}
ImageLineByte line = new ImageLineByte(pngr.imgInfo, byteline);
pngw.writeRow(line);
}
}
pngw.end();
} catch (IOException e)
{
Log.d("BIG", "File io problem");
}
}
Based on #MiloslawSmyk answer, this is the version to load a large JPEG and save it with PNGJ:
fun processPng(forig: String, fdest: String) {
try {
val blockHeight = 64
val orig = FileInputStream(forig)
val dest = FileOutputStream(fdest)
val decoder = BitmapRegionDecoder.newInstance(orig, false)
val blockRect = Rect()
val channels = 3 // needles to say, this should not be hardcoded
val sizeOptions = BitmapFactory.Options().apply {
inJustDecodeBounds = true
}
BitmapFactory.decodeFile(forig, sizeOptions)
val height: Int = sizeOptions.outHeight
val width: Int = sizeOptions.outWidth
val pngw = PngWriter(dest, ImageInfo(width, height, 8, false))
// keep compression quick
pngw.pixelsWriter.deflaterCompLevel = 1
blockRect.left = 0
blockRect.right = width
val options = BitmapFactory.Options().apply {
inPreferredConfig = Bitmap.Config.ARGB_8888
}
var blockBitmap: Bitmap
val byteLine = ByteArray(width * channels)
for (row in 0..height / blockHeight) {
// are we nearing the end?
val h: Int = if ((row + 1) * blockHeight <= height)
blockHeight
else {
height - row * blockHeight
}
blockRect.top = row * blockHeight
blockRect.bottom = row * blockHeight + h
blockBitmap = decoder.decodeRegion(blockRect, options)
// convert bitmap into byte array
val size = blockBitmap.rowBytes * blockBitmap.height
val byteBuffer = ByteBuffer.allocate(size)
blockBitmap.copyPixelsToBuffer(byteBuffer)
val bytes = byteBuffer.array()
var idx = 0
for (raster in 0 until h) {
for (m in 0 until width) {
byteLine[m * channels] = bytes[idx++]
byteLine[m * channels + 1] = bytes[idx++]
byteLine[m * channels + 2] = bytes[idx++]
idx++
}
val line = ImageLineByte(pngw.imgInfo, byteLine)
pngw.writeRow(line)
}
}
pngw.end()
} catch (e: IOException) {
Log.d("BIG", "File io problem")
}
}

Is it possible to set version while generating qr code using zxing lib in android?

While generating the qr code for android using zxing library is it possible to set the version number like version 4 or any other version .
Any guidance or link would be appreciable .
Thank you.
Yes check the EncodedHintType map:
private Bitmap stringToQRCode(String text, int width, int height) {
BitMatrix bitMatrix;
try {
HashMap<EncodeHintType, Object> map = new HashMap<>();
map.put(EncodeHintType.CHARACTER_SET, "utf-8");
map.put(EncodeHintType.ERROR_CORRECTION, ErrorCorrectionLevel.M);
map.put(EncodeHintType.QR_VERSION, 9); // (1-40)
map.put(EncodeHintType.MARGIN, 2); // pixels
bitMatrix = new MultiFormatWriter().encode(text, BarcodeFormat.QR_CODE, width, height, map);
int bitMatrixWidth = bitMatrix.getWidth();
int bitMatrixHeight = bitMatrix.getHeight();
int[] pixels = new int[bitMatrixWidth * bitMatrixHeight];
int colorWhite = 0xFFFFFFFF;
int colorBlack = 0xFF000000;
for (int y = 0; y < bitMatrixHeight; y++) {
int offset = y * bitMatrixWidth;
for (int x = 0; x < bitMatrixWidth; x++) {
pixels[offset + x] = bitMatrix.get(x, y) ? colorBlack : colorWhite;
}
}
Bitmap bitmap = Bitmap.createBitmap(bitMatrixWidth, bitMatrixHeight, Bitmap.Config.ARGB_4444);
bitmap.setPixels(pixels, 0, width, 0, 0, bitMatrixWidth, bitMatrixHeight);
return bitmap;
} catch (Exception i) {
i.printStackTrace();
return null;
}
}
No. There would be no real point to this. The version can't be lower than what is required to encode the data, and setting it higher just makes a denser QR code that's slightly harder to read.

How to apply the grayscale effect to the Bitmap image?

I want to convert the bitmap image into the Grayscale.For that i am using the NDK to boost the app performance.I have apply the other effect successfully.
Problem ::
The code which i am using to apply the grayscale is taken from the C##.So,i want to convert it into the NDK.I am unable to do that part ..
For reference, i am putting the code of how i have applied the cyan effect to image.
Below pasted code is working fine.
void applyCyano(Bitmap* bitmap) {
//Cache to local variables
unsigned char* red = (*bitmap).red;
unsigned char* green = (*bitmap).green;
unsigned char* blue = (*bitmap).blue;
unsigned int length = (*bitmap).width * (*bitmap).height;
register unsigned int i;
register unsigned char grey, r, g, b;
for (i = length; i--;) {
grey = ((red[i] * 0.222f) + (green[i] * 0.222f) + (blue[i] * 0.222f));
r = componentCeiling(61.0f + grey);
g = componentCeiling(87.0f + grey);
b = componentCeiling(136.0f + grey);
grey = blackAndWhite(red[i], green[i], blue[i]);
red[i] = overlayPixelComponents(grey, r, 0.9f);
green[i] = overlayPixelComponents(grey, g, 0.9f);
blue[i] = overlayPixelComponents(grey, b, 0.9f);
}
}
The code to appply Grayscale effect(taken from C## example over the net) ::
void applyGrayscaleNatively(Bitmap* original)
{
//create an empty bitmap the same size as original
Bitmap newBitmap = new Bitmap(original.Width, original.Height);
//lock the original bitmap in memory
BitmapData originalData = original.LockBits(
new Rectangle(0, 0, original.Width, original.Height),
ImageLockMode.ReadOnly, PixelFormat.Format24bppRgb);
//lock the new bitmap in memory
BitmapData newData = newBitmap.LockBits(
new Rectangle(0, 0, original.Width, original.Height),
ImageLockMode.WriteOnly, PixelFormat.Format24bppRgb);
//set the number of bytes per pixel
int pixelSize = 3;
for (int y = 0; y < original.Height; y++)
{
//get the data from the original image
byte* oRow = (byte*)originalData.Scan0 + (y * originalData.Stride);
//get the data from the new image
byte* nRow = (byte*)newData.Scan0 + (y * newData.Stride);
for (int x = 0; x < original.Width; x++)
{
//create the grayscale version
byte grayScale =
(byte)((oRow[x * pixelSize] * .11) + //B
(oRow[x * pixelSize + 1] * .59) + //G
(oRow[x * pixelSize + 2] * .3)); //R
//set the new image's pixel to the grayscale version
nRow[x * pixelSize] = grayScale; //B
nRow[x * pixelSize + 1] = grayScale; //G
nRow[x * pixelSize + 2] = grayScale; //R
}
}
//unlock the bitmaps
newBitmap.UnlockBits(newData);
original.UnlockBits(originalData);
}
What i want to do ::
I have taken this project from the here which have set the different effect to the image but not the grayscale.so how to apply the grayscale to the code so that all the other funcationality of the project won't stop.
Let me know if you need anything from me .
Many Thanks in Advance..
Please help me to resovle this issue as just because of this am not able to go further in my project.
Use the following function to convert bitmap to its grayscale equivalent in Android instead of converting the C# version
public Bitmap toGrayscale(Bitmap bmpOriginal){
int width, height;
height = bmpOriginal.getHeight();
width = bmpOriginal.getWidth();
Bitmap bmpGrayscale = Bitmap.createBitmap(width, height, Bitmap.Config.RGB_565);
Canvas c = new Canvas(bmpGrayscale);
Paint paint = new Paint();
ColorMatrix cm = new ColorMatrix();
cm.setSaturation(0);
ColorMatrixColorFilter f = new ColorMatrixColorFilter(cm);
paint.setColorFilter(f);
c.drawBitmap(bmpOriginal, 0, 0, paint);
return bmpGrayscale;
}
I solved the issue ::
Here is my solution ...
void applyGrayscaleNatively(Bitmap* bitmap)
{
register unsigned int i;
unsigned int length = (*bitmap).width * (*bitmap).height;
register unsigned char grey;
unsigned char* red = (*bitmap).red;
unsigned char* green = (*bitmap).green;
unsigned char* blue = (*bitmap).blue;
float matrix[4][4];
identMatrix(matrix);
float saturation = 1.0f;
saturateMatrix(matrix, &saturation);
applyMatrix(bitmap, matrix);
for (i = length; i--;) {
float value;
getBrightness(red[i], green[i], blue[i], &value);
grey = grayScale(red[i], green[i], blue[i]);
red[i] = grey;
green[i] = grey;
blue[i] = grey;
}
}

Android byte array to Bitmap How to

How can I convert byte array received using socket.
The C++ client send image data which is of type uchar.
At the android side I am receiving this uchar array as byte[] which is ranges from -128 to +127.
What I wanted to do is that receives this data and display it. For that I was trying to convert to Bitmap using BitmapFactory.decodeByteArray(), but no luck I am getting null Bitmap. Am I doing right or any other method available.
Thanks in advance....
From the comments to the answers above, it seems like you want to create a Bitmap object from a stream of RGB values, not from any image format like PNG or JPEG.
This probably means that you know the image size already. In this case, you could do something like this:
byte[] rgbData = ... // From your server
int nrOfPixels = rgbData.length / 3; // Three bytes per pixel.
int pixels[] = new int[nrOfPixels];
for(int i = 0; i < nrOfPixels; i++) {
int r = data[3*i];
int g = data[3*i + 1];
int b = data[3*i + 2];
pixels[i] = Color.rgb(r,g,b);
}
Bitmap bitmap = Bitmap.createBitmap(pixels, width, height, Bitmap.Config.ARGB_8888);
I've been using it like below in one of my projects and so far it's been pretty solid. I'm not sure how picky it is as far as it not being compressed as a PNG though.
byte[] bytesImage;
Bitmap bmpOld; // Contains original Bitmap
Bitmap bmpNew;
ByteArrayOutputStream baoStream = new ByteArrayOutputStream();
bmpOld.compress(Bitmap.CompressFormat.PNG, 100, baoStream);
bytesImage = baoStream.toByteArray();
bmpNew = BitmapFactory.decodeByteArray(bytesImage, 0, bytesImage.length);
edit: I've adapted the code from this post to use RGB, so the code below should work for you. I haven't had a chance to test it yet so it may need some adjusting.
Byte[] bytesImage = {0,1,2, 0,1,2, 0,1,2, 0,1,2};
int intByteCount = bytesImage.length;
int[] intColors = new int[intByteCount / 3];
int intWidth = 2;
int intHeight = 2;
final int intAlpha = 255;
if ((intByteCount / 3) != (intWidth * intHeight)) {
throw new ArrayStoreException();
}
for (int intIndex = 0; intIndex < intByteCount - 2; intIndex = intIndex + 3) {
intColors[intIndex / 3] = (intAlpha << 24) | (bytesImage[intIndex] << 16) | (bytesImage[intIndex + 1] << 8) | bytesImage[intIndex + 2];
}
Bitmap bmpImage = Bitmap.createBitmap(intColors, intWidth, intHeight, Bitmap.Config.ARGB_8888);
InputStream is = new java.net.URL(urldisplay).openStream();
byte[] colors = IOUtils.toByteArray(is);
int nrOfPixels = colors.length / 3; // Three bytes per pixel.
int pixels[] = new int[nrOfPixels];
for(int i = 0; i < nrOfPixels; i++) {
int r = (int)(0xFF & colors[3*i]);
int g = (int)(0xFF & colors[3*i+1]);
int b = (int)(0xFF & colors[3*i+2]);
pixels[i] = Color.rgb(r,g,b);
}
imageBitmap = Bitmap.createBitmap(pixels, width, height,Bitmap.Config.ARGB_4444);
bmImage.setImageBitmap(imageBitmap );

Categories

Resources