How to solve Index out of bounds? - android

I try to follow up the tutorial from opencv, the difference is I'm using Android. This is the link.
I got an error out of bounds, in the line of when it tries to get the new value for new Mat
newImageData[(y * mRgba.cols() + x) * mRgba.channels() + c] = saturate(alpha * pixelValue + beta);
So here is all the code method I used.
public Bitmap brightAndContrast(Bitmap bitmap){
double alpha = 2.0;
int beta = 50;
Mat mRgba = new Mat();
Mat mResult = Mat.zeros(mRgba.size(), mRgba.type());
int width = bitmap.getWidth();
int height = bitmap.getHeight();
Bitmap resultBitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
Utils.bitmapToMat(bitmap, mRgba);
byte[] imageData = new byte[(int) (mRgba.total()*mRgba.channels())];
mRgba.get(0, 0, imageData);
byte[] newImageData = new byte[(int) (mResult.total()*mResult.channels())];
for (int y = 0; y < mRgba.rows(); y++) {
for (int x = 0; x < mRgba.cols(); x++) {
for (int c = 0; c < mRgba.channels(); c++) {
double pixelValue = imageData[(y * mRgba.cols() + x) * mRgba.channels() + c];
pixelValue = pixelValue < 0 ? pixelValue + 256 : pixelValue;
newImageData[(y * mRgba.cols() + x) * mRgba.channels() + c] = saturate(alpha * pixelValue + beta); // this is where the line 101
}
}
}
mResult.put(0, 0, newImageData);
Utils.matToBitmap(mResult, resultBitmap);
return resultBitmap;
}
private byte saturate(double val) {
int iVal = (int) Math.round(val);
iVal = iVal > 255 ? 255 : (iVal < 0 ? 0 : iVal);
return (byte) iVal;
}
So why does it get out of bounds?
And this is the error I get
java.lang.ArrayIndexOutOfBoundsException: length=0; index=0
at com.wannatry.tryapps.OpenCVConverter.brightAndContrast(OpenCVConverter.java:101)
at com.wannatry.tryapps.MainActivity$15.onClick(MainActivity.java:232)
at android.view.View.performClick(View.java:5740)
at android.view.View$PerformClick.run(View.java:22947)
EDITED
this is the C++ code from the tutorial
new_image.at<Vec3b>(y,x)[c] = saturate_cast<uchar>( alpha*image.at<Vec3b>(y,x)[c] + beta );
and it translate as this code
double pixelValue = imageData[(y * image.cols() + x) * image.channels() + c]; pixelValue = pixelValue < 0 ? pixelValue + 256 : pixelValue; newImageData[(y * image.cols() + x) * image.channels() + c] = saturate(alpha * pixelValue + beta);

Related

Otsu histogram self implementation

I tried to make my own implementation of Otsu. I already read some source code from java and some sites that explains the formula and tried to implement it. I want to share this to ask if anyone can help me or at least tell about what can I do or improve.
I already coded get width, height and the background and foreground weight, mean, variance, and within class variance.
Note that I have not implemented how to set or find the exact threshold or even change the picture to black-white(binarize) using within class variance. If you can help me, feel welcome to. I also see some java codes that has treshhold = i or treshhold = t but I can't see how they made the image to black-white.
Here is my code:
Otsu.java
Bitmap tempImg = (Bitmap) original;
Bitmap OImg = Bitmap.createBitmap(tempImg.getWidth(), tempImg.getHeight(), tempImg.getConfig());
int width = tempImg.getWidth();
int height = tempImg.getHeight();
int A, R, G, B,colorPixel;
for (int x = 0; x < width; x++) { //original image to grayscale
for (int y = 0; y < height; y++) {
colorPixel = tempImg.getPixel(x, y);
A = Color.alpha(colorPixel);
R = Color.red(colorPixel);
G = Color.green(colorPixel);
B = Color.blue(colorPixel);
R = (R + G + B) / 3;
G = R;
B = R;
OImg.setPixel(x, y, Color.argb(A, R, G,B ));
}
}
return OImg;
}
public static Bitmap Botsu(Bitmap gImg){
Bitmap tempImg = (Bitmap) gImg;
Bitmap BWimg = Bitmap.createBitmap(tempImg.getWidth(), tempImg.getHeight(), tempImg.getConfig());
int width = tempImg.getWidth();
int height = tempImg.getHeight();
int A, R, G, B, colorPixel;
// histo-thresh
double Wcv = 0;
int[] Bx = new int[256];
int[] By = new int[256];
int[] Fx = new int[256];
int[] Fy = new int[256];
double Bw =0, Bm =0, Bv =0, Bp = 0;
double Fw =0, Fm =0, Fv =0, Fp = 0;
int c = 0, ImgPix = 0, ImgPixB = 0, ImgPixF = 0, newPixel = 0;
// pixel check for histogram
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
colorPixel = tempImg.getPixel(x, y);
A = Color.alpha(colorPixel);
R = Color.red(colorPixel);
G = Color.green(colorPixel);
B = Color.blue(colorPixel);
int gray = (int) (0.2989 * R + 0.5870 * G + 0.1140 * B);
if (gray > 128) { // white - foreground
for (int z=0; z<Fx.length; z++){
if (Fx[z] == gray){
c++;
}
}
if (c==1){
Fy[gray] = Fy[gray]+1; //y axis - counter for pixels for each x
}
else{
Fx[x] = gray; //x axis - 0-255
Fy[gray] = Fy[gray]+1;
}
}//By[Bx[x]]
else{ // black - background
for (int z=0; z<Bx.length; z++){
if (Bx[z] == gray){
c++;
}
}
if (c==1){
By[gray] = By[gray]+1; //y axis - counter for pixels for each x
}
else{
Bx[x] = gray; //x axis - 0-255
By[gray] = By[gray]+1;
}
}
}
}
for (int b=0; b<By.length; b++){
ImgPixB = ImgPixB + By[b];
}
for (int f=0; f<Fy.length; f++){
ImgPixF = ImgPixF + Fy[f];
}
ImgPix = ImgPixB + ImgPixF;
//bg part hist
for (int i=0; i<By.length; i++){ //weight
Bw = Bw + By[i];
}
Bw = Bw/ImgPix;
for (int i=0; i<By.length; i++){ //pixel sum
Bp = Bp + By[i];
}
for (int i = 0; i<Bx.length; i++){ //mean
Bm = Bm + (Bx[i]*By[Bx[i]]);
}
Bm = Bm/Bp;
for (int i=0; i<Bx.length; i++){ //variance
Bv = Bv + (Math.pow((Bx[i]-Bm),2)*By[Bx[i]]); // (Bx[i]-Bm) * (Bx[i]-Bm)
}
Bv = Bv/Bp;
//fg part hist
for (int i=0; i<Fy.length; i++){ //weight
Fw = Fw + Fy[i];
}
Fw = Fw/ImgPix;
for (int i=0; i<Fy.length; i++){ //pixel sum
Fp = Fp + Fy[i];
}
for (int i = 0; i<Fx.length; i++){ //mean
Fm = Fm + (Fx[i]*Fy[Fx[i]]);
}
Fm = Fm/Fp;
for (int i=0; i<Fx.length; i++){ //variance
Fv = Fv + (Math.pow((Fx[i]-Fm),2)*Fy[Fx[i]]); // (Bx[i]-Bm) * (Bx[i]-Bm)
}
Fv = Fv/Fp;
// within class variance
Wcv = (Bw * Bv) + (Fw * Fv);
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
colorPixel = tempImg.getPixel(x, y);
A = Color.alpha(colorPixel);
R = Color.red(colorPixel);
G = Color.green(colorPixel);
B = Color.blue(colorPixel);
//int gray = (int) (0.2989 * R + 0.5870 * G + 0.1140 * B);
int gray2 = (int) (Wcv * R + Wcv * G + Wcv * B);
if (gray2 > 128) {
gray2 = 255;
}
else if (gray2 <129){
gray2 = 0;
}
BWimg.setPixel(x, y, Color.argb(A, gray2, gray2, gray2));
}
}
return BWimg;
x[z] is for x-axis andy[gray] is for y-axis. I based this on the graph on Lab Book
x = 0-255
y = how many pixels is on a certain color shade
feel free to send more samples that can help me.
OUTPUT: (I added 2 function with 3 output that has an output. Other value will only return few black dots or just white image.)
if (gray2 > 128) {
gray2 = 255;
}
else if (gray2 < 129){
gray2 = 0;
}
if (gray2 > 64 && gray2 < 129) {
gray2 = 255;
}
else if (gray2 < 65){
gray2 = 0;
}

How to convert & rotate raw NV21 array image (android.media.Image) from front cam portrait mode in onImageAvailable (android Camera2)?

Note: All info in my post only goes for Samsung Galaxy S7 device. I do not know how emulators and other devices behave.
In onImageAvailable I convert continuously each image to a NV21 byte array and forward it to an API expecting raw NV21 format.
This is how I initialize the image reader and receive the images:
private void openCamera() {
...
mImageReader = ImageReader.newInstance(WIDTH, HEIGHT,
ImageFormat.YUV_420_888, 1); // only 1 for best performance
mImageReader.setOnImageAvailableListener(
mOnImageAvailableListener, mBackgroundHandler);
...
}
private final ImageReader.OnImageAvailableListener mOnImageAvailableListener
= new ImageReader.OnImageAvailableListener() {
#Override
public void onImageAvailable(ImageReader reader) {
Image image = reader.acquireLatestImage();
if (image != null) {
byte[] data = convertYUV420ToNV21_ALL_PLANES(image); // this image is turned 90 deg using front cam in portrait mode
byte[] data_rotated = rotateNV21_working(data, WIDTH, HEIGHT, 270);
ForwardToAPI(data_rotated); // image data is being forwarded to api and received later on
image.close();
}
}
};
The function converting the image to raw NV21 (from here), working fine, the image is (due to android?) turned by 90 degrees when using front cam in portrait mode:
(I modified it, slightly according to comments of Alex Cohn)
private byte[] convertYUV420ToNV21_ALL_PLANES(Image imgYUV420) {
byte[] rez;
ByteBuffer buffer0 = imgYUV420.getPlanes()[0].getBuffer();
ByteBuffer buffer1 = imgYUV420.getPlanes()[1].getBuffer();
ByteBuffer buffer2 = imgYUV420.getPlanes()[2].getBuffer();
// actually here should be something like each second byte
// however I simply get the last byte of buffer 2 and the entire buffer 1
int buffer0_size = buffer0.remaining();
int buffer1_size = buffer1.remaining(); // / 2 + 1;
int buffer2_size = 1;//buffer2.remaining(); // / 2 + 1;
byte[] buffer0_byte = new byte[buffer0_size];
byte[] buffer1_byte = new byte[buffer1_size];
byte[] buffer2_byte = new byte[buffer2_size];
buffer0.get(buffer0_byte, 0, buffer0_size);
buffer1.get(buffer1_byte, 0, buffer1_size);
buffer2.get(buffer2_byte, buffer2_size-1, buffer2_size);
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
try {
// swap 1 and 2 as blue and red colors are swapped
outputStream.write(buffer0_byte);
outputStream.write(buffer2_byte);
outputStream.write(buffer1_byte);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
rez = outputStream.toByteArray();
return rez;
}
Hence "data" needs to be rotated. Using this function (from here), I get a weird 3-times interlaced picture error:
public static byte[] rotateNV21(byte[] input, int width, int height, int rotation) {
byte[] output = new byte[input.length];
boolean swap = (rotation == 90 || rotation == 270);
// **EDIT:** in portrait mode & front cam this needs to be set to true:
boolean yflip = true;// (rotation == 90 || rotation == 180);
boolean xflip = (rotation == 270 || rotation == 180);
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
int xo = x, yo = y;
int w = width, h = height;
int xi = xo, yi = yo;
if (swap) {
xi = w * yo / h;
yi = h * xo / w;
}
if (yflip) {
yi = h - yi - 1;
}
if (xflip) {
xi = w - xi - 1;
}
output[w * yo + xo] = input[w * yi + xi];
int fs = w * h;
int qs = (fs >> 2);
xi = (xi >> 1);
yi = (yi >> 1);
xo = (xo >> 1);
yo = (yo >> 1);
w = (w >> 1);
h = (h >> 1);
// adjust for interleave here
int ui = fs + (w * yi + xi) * 2;
int uo = fs + (w * yo + xo) * 2;
// and here
int vi = ui + 1;
int vo = uo + 1;
output[uo] = input[ui];
output[vo] = input[vi];
}
}
return output;
}
Resulting into this picture:
Note: it is still the same cup, however you see it 3-4 times.
Using another suggested rotate function from here gives the proper result:
public static byte[] rotateNV21_working(final byte[] yuv,
final int width,
final int height,
final int rotation)
{
if (rotation == 0) return yuv;
if (rotation % 90 != 0 || rotation < 0 || rotation > 270) {
throw new IllegalArgumentException("0 <= rotation < 360, rotation % 90 == 0");
}
final byte[] output = new byte[yuv.length];
final int frameSize = width * height;
final boolean swap = rotation % 180 != 0;
final boolean xflip = rotation % 270 != 0;
final boolean yflip = rotation >= 180;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
final int yIn = j * width + i;
final int uIn = frameSize + (j >> 1) * width + (i & ~1);
final int vIn = uIn + 1;
final int wOut = swap ? height : width;
final int hOut = swap ? width : height;
final int iSwapped = swap ? j : i;
final int jSwapped = swap ? i : j;
final int iOut = xflip ? wOut - iSwapped - 1 : iSwapped;
final int jOut = yflip ? hOut - jSwapped - 1 : jSwapped;
final int yOut = jOut * wOut + iOut;
final int uOut = frameSize + (jOut >> 1) * wOut + (iOut & ~1);
final int vOut = uOut + 1;
output[yOut] = (byte)(0xff & yuv[yIn]);
output[uOut] = (byte)(0xff & yuv[uIn]);
output[vOut] = (byte)(0xff & yuv[vIn]);
}
}
return output;
}
The result is fine now:
The top image shows the direct stream using a texture view's surface and adding it to the captureRequestBuilder. The bottom image shows the raw image data after rotating.
The questions are:
Does this hack in "convertYUV420ToNV21_ALL_PLANES" work on any
device/emulator?
Why does rotateNV21 not work, while rotateNV21_working works fine.
Edit: The mirror issue is fixed, see code comment. The squeeze issue is fixed, it was caused by the API it gets forwarded.
The actual open issue is a proper not too expensive function, converting and rotating an image into raw NV21 working on any device.
Here is the code to convert the Image to NV21 byte[]. This will work when the imgYUV420 U and V planes have pixelStride=1 (as on emulator) or pixelStride=2 (as on Nexus):
private byte[] convertYUV420ToNV21_ALL_PLANES(Image imgYUV420) {
assert(imgYUV420.getFormat() == ImageFormat.YUV_420_888);
Log.d(TAG, "image: " + imgYUV420.getWidth() + "x" + imgYUV420.getHeight() + " " + imgYUV420.getFormat());
Log.d(TAG, "planes: " + imgYUV420.getPlanes().length);
for (int nplane = 0; nplane < imgYUV420.getPlanes().length; nplane++) {
Log.d(TAG, "plane[" + nplane + "]: length " + imgYUV420.getPlanes()[nplane].getBuffer().remaining() + ", strides: " + imgYUV420.getPlanes()[nplane].getPixelStride() + " " + imgYUV420.getPlanes()[nplane].getRowStride());
}
byte[] rez = new byte[imgYUV420.getWidth() * imgYUV420.getHeight() * 3 / 2];
ByteBuffer buffer0 = imgYUV420.getPlanes()[0].getBuffer();
ByteBuffer buffer1 = imgYUV420.getPlanes()[1].getBuffer();
ByteBuffer buffer2 = imgYUV420.getPlanes()[2].getBuffer();
int n = 0;
assert(imgYUV420.getPlanes()[0].getPixelStride() == 1);
for (int row = 0; row < imgYUV420.getHeight(); row++) {
for (int col = 0; col < imgYUV420.getWidth(); col++) {
rez[n++] = buffer0.get();
}
}
assert(imgYUV420.getPlanes()[2].getPixelStride() == imgYUV420.getPlanes()[1].getPixelStride());
int stride = imgYUV420.getPlanes()[1].getPixelStride();
for (int row = 0; row < imgYUV420.getHeight(); row += 2) {
for (int col = 0; col < imgYUV420.getWidth(); col += 2) {
rez[n++] = buffer1.get();
rez[n++] = buffer2.get();
for (int skip = 1; skip < stride; skip++) {
if (buffer1.remaining() > 0) {
buffer1.get();
}
if (buffer2.remaining() > 0) {
buffer2.get();
}
}
}
}
Log.w(TAG, "total: " + rez.length);
return rez;
}
optimized Java code is available here.
As you can see, it is very easy to change this code to produce a rotated image in a single step:
private byte[] rotateYUV420ToNV21(Image imgYUV420) {
Log.d(TAG, "image: " + imgYUV420.getWidth() + "x" + imgYUV420.getHeight() + " " + imgYUV420.getFormat());
Log.d(TAG, "planes: " + imgYUV420.getPlanes().length);
for (int nplane = 0; nplane < imgYUV420.getPlanes().length; nplane++) {
Log.d(TAG, "plane[" + nplane + "]: length " + imgYUV420.getPlanes()[nplane].getBuffer().remaining() + ", strides: " + imgYUV420.getPlanes()[nplane].getPixelStride() + " " + imgYUV420.getPlanes()[nplane].getRowStride());
}
byte[] rez = new byte[imgYUV420.getWidth() * imgYUV420.getHeight() * 3 / 2];
ByteBuffer buffer0 = imgYUV420.getPlanes()[0].getBuffer();
ByteBuffer buffer1 = imgYUV420.getPlanes()[1].getBuffer();
ByteBuffer buffer2 = imgYUV420.getPlanes()[2].getBuffer();
int width = imgYUV420.getHeight();
assert(imgYUV420.getPlanes()[0].getPixelStride() == 1);
for (int row = imgYUV420.getHeight()-1; row >=0; row--) {
for (int col = 0; col < imgYUV420.getWidth(); col++) {
rez[col*width+row] = buffer0.get();
}
}
int uv_offset = imgYUV420.getWidth()*imgYUV420.getHeight();
assert(imgYUV420.getPlanes()[2].getPixelStride() == imgYUV420.getPlanes()[1].getPixelStride());
int stride = imgYUV420.getPlanes()[1].getPixelStride();
for (int row = imgYUV420.getHeight() - 2; row >= 0; row -= 2) {
for (int col = 0; col < imgYUV420.getWidth(); col += 2) {
rez[uv_offset+col/2*width+row] = buffer1.get();
rez[uv_offset+col/2*width+row+1] = buffer2.get();
for (int skip = 1; skip < stride; skip++) {
if (buffer1.remaining() > 0) {
buffer1.get();
}
if (buffer2.remaining() > 0) {
buffer2.get();
}
}
}
}
Log.w(TAG, "total rotated: " + rez.length);
return rez;
}
I sincerely recommend the site http://rawpixels.net/ to see the actual structure of your raw images.
With OpenCV and Android Camera API 2 this task is very fast and you don't need YUV420toNV21 Java conversion, and with OpenCV this convertion is 4x more fast:
Java side:
//Starts a builtin camera with api camera 2
public void startCamera() {
CameraManager manager = (CameraManager) AppData.getAppContext().getSystemService(Context.CAMERA_SERVICE);
try {
String pickedCamera = getCamera(manager);
manager.openCamera(pickedCamera, cameraStateCallback, null);
// set image format on YUV
mImageReader = ImageReader.newInstance(mWidth,mHeight, ImageFormat.YUV_420_888, 4);
mImageReader.setOnImageAvailableListener(onImageAvailableListener, null);
Log.d(TAG, "imageReader created");
} catch (CameraAccessException e) {
Log.e(TAG, e.getMessage());
}
}
//Listens for frames and send them to be processed
protected ImageReader.OnImageAvailableListener onImageAvailableListener = new ImageReader.OnImageAvailableListener() {
#Override
public void onImageAvailable(ImageReader reader) {
Image image = null;
try {
image = reader.acquireLatestImage();
ByteBuffer buffer = image.getPlanes()[0].getBuffer();
byte[] frameData = new byte[buffer.capacity()];
buffer.get(frameData);
// Native process (see below)
processAndRotateFrame(frameData);
image.close();
} catch (Exception e) {
Logger.e(TAG, "imageReader exception: "+e.getMessage());
} finally {
if (image != null) {
image.close();
}
}
}
};
Native side (NDK or Cmake):
JNIEXPORT jint JNICALL com_android_mvf_Utils_ProccessAndRotateFrame
(JNIEnv *env, jobject object, jint width, jint height, jbyteArray frame, jint rotation) {
// load data from JAVA side
jbyte *pFrameData = env->GetByteArrayElements(frame, 0);
// convert array to Mat, for example GRAY or COLOR
Mat mGray(height, width, cv::IMREAD_GRAYSCALE, (unsigned char *)pFrameData);
// rotate image
rotateMat(mGray, rotation);
int objects = your_function(env, mGray);
env->ReleaseByteArrayElements(frame, pFrameData, 0);
return objects;
}
void rotateMat(cv::Mat &matImage, int rotFlag) {
if (rotFlag != 0 && rotFlag != 360) {
if (rotFlag == 90) {
cv::transpose(matImage, matImage);
cv::flip(matImage, matImage, 1);
} else if (rotFlag == 270 || rotFlag == -90) {
cv::transpose(matImage, matImage);
cv::flip(matImage, matImage, 0);
} else if (rotFlag == 180) {
cv::flip(matImage, matImage, -1);
}
}
}

Android JavaCV create IplImage from Camera to use with ColorHistogram

I am using JavaCV in Android.
In my code, I have created a ImageComparator(class of OpenCV CookBook
http://code.google.com/p/javacv/source/browse/OpenCV2_Cookbook/src/opencv2_cookbook/chapter04/ImageComparator.scala?repo=examples
http://code.google.com/p/javacv/wiki/OpenCV2_Cookbook_Examples_Chapter_4) Object and use that object to compare images. If I use file from SD card the comparator is working.
File referenceImageFile = new File(absPath1); // Read an image.
IplImage reference = Util.loadOrExit(referenceImageFile,CV_LOAD_IMAGE_COLOR);
comparator = new ImageComparator(reference);
comparator = new ImageComparator(reference);
But from Camera Preview, when I am creating IplImage it is not working. I am getting the following Exception during comparison "score" calculation.
score = referenceComparator.compare(grayImage) / imageSize;
java.lang.RuntimeException: /home/saudet/android/OpenCV-2.4.2/modules/core/src/convert.cpp:1196: error: (-215) i < src.channels() in function void cvSplit(const void*, void*, void*, void*, void*)
For CameraPreview I am using the code from FacePreview to create IplImage.But it create Image in grayScale.
int f = SUBSAMPLING_FACTOR;
if (grayImage == null || grayImage.width() != width / f
|| grayImage.height() != height / f) {
grayImage = IplImage.create(width / f, height / f, IPL_DEPTH_8U, 1);
}
int imageWidth = grayImage.width();
int imageHeight = grayImage.height();
int dataStride = f * width;
int imageStride = grayImage.widthStep();
ByteBuffer imageBuffer = grayImage.getByteBuffer();
for (int y = 0; y < imageHeight; y++) {
int dataLine = y * dataStride;
int imageLine = y * imageStride;
for (int x = 0; x < imageWidth; x++) {
imageBuffer.put(imageLine + x, data[dataLine + f * x]);
}
}
How to create a Color IplImage from Camera to use with ImageComparator?
The below code seems to be working fine.
public void onPreviewFrame(final byte[] data, final Camera camera) {
try {
Camera.Size size = camera.getParameters().getPreviewSize();
processImage(data, size.width, size.height);
camera.addCallbackBuffer(data);
} catch (RuntimeException e) {
// The camera has probably just been released, ignore.
Log.d("Exception", " " + e);
}
}
protected void processImage(byte[] data, int width, int height) {
score.clear();
// First, downsample our image
int f = SUBSAMPLING_FACTOR;
IplImage _4image = IplImage.create(width, height, IPL_DEPTH_8U, f);
int[] _temp = new int[width * height];
if (_4image != null) {
decodeYUV420SP(_temp, data, width, height);
_4image.getIntBuffer().put(_temp);
}
//bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
//bitmap.copyPixelsFromBuffer(_4image.getByteBuffer());
Log.d("CompareAndroid", "processImage");
int imageSize = _4image.width() * _4image.height();
Iterator<ImageComparator> iterator = reference_List.iterator();
// Compute histogram match and normalize by image size.
// 1 means perfect match.
while(iterator.hasNext()){
score.add(((ImageComparator) iterator.next()).compare(_4image) / imageSize);
}
Log.d("CompareImages", "Score Size "+score.size());
postInvalidate();
}
This code seems to be working fine.
private void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width,
int height) {
int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0)
y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0)
r = 0;
else if (r > 262143)
r = 262143;
if (g < 0)
g = 0;
else if (g > 262143)
g = 262143;
if (b < 0)
b = 0;
else if (b > 262143)
b = 262143;
rgb[yp] = 0xff000000 | ((b << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((r >> 10) & 0xff);
}
}
}
I haven't tested it, but something like this should work:
IplImage yuvimage = IplImage.create(width, height * 3 / 2, IPL_DEPTH_8U, 2);
IplImage rgbimage = IplImage.create(width, height, IPL_DEPTH_8U, 3);
cvCvtColor(yuvimage, rgbimage, CV_YUV2BGR_NV21);

How to save video file using canvas on android?

Currently I'm working on video effects like thermal, mono, etc.. for that I'm using preview callback with canvas stuff.
Can anyone please tell me how to save this video?
Below I have my callback code:
mCamera.setPreviewCallback(new PreviewCallback() {
public void onPreviewFrame(byte[] data1, Camera _camera)
{
Camera.Parameters parameters = _camera.getParameters();
parameters.setColorEffect(Camera.Parameters.EFFECT_NONE);
previewSize = parameters.getPreviewSize();
int frameSize = previewSize.width * previewSize.height;
int height = previewSize.height;
int width = previewSize.width;
int pixel;
int A, R, G, B;
final double GS_RED = 0.299;
final double GS_GREEN = 0.587;
final double GS_BLUE = 0.114;
int[] rgba = new int[frameSize+1];
data = data1;
for (int i = 0; i < height; i++)
for(int j = 0; j < width; j++)
{
int y = (0xff & ((int) data[i * previewSize.width + j]));
int u = (0xff & ((int) data[frameSize + (i >> 1) * previewSize.width + (j & ~1) + 0]));
int v = (0xff & ((int) data[frameSize + (i >> 1) * previewSize.width + (j & ~1) + 1]));
y = y < 16 ? 16 : y;
int r = Math.round(1.164f * (y - 16) + 1.596f * (v - 128));
int g = Math.round(1.164f * (y - 16) - 0.813f * (v - 128) - 0.391f * (u - 128));
int b = Math.round(1.164f * (y - 16) + 2.018f * (u - 128));
r = r < 0 ? 0 : (r > 255 ? 255 : r);
g = g < 0 ? 0 : (g > 255 ? 255 : g);
b = b < 0 ? 0 : (b > 255 ? 255 : b);
rgba[i * previewSize.width + j] = 0xff000000 + (b << 16) + (g<< 8) + r;
}
Bitmap bmp = Bitmap.createBitmap(width, height,
Bitmap.Config.RGB_565);
bmp.setPixels(rgba, 0 , width , 0, 0, width, height);
// scan through every single pixel
for(int x = 0; x < width; ++x) {
for(int y = 0; y < height; ++y) {
// get one pixel color
pixel = bmp.getPixel(x, y);
// retrieve color of all channels
A = Color.alpha(pixel);
R = Color.red(pixel);
G = Color.green(pixel);
B = Color.blue(pixel);
// take conversion up to one single value
R = G = B = (int)(GS_RED * R + GS_GREEN * G + GS_BLUE * B);
// set new pixel color to output bitmap
bmp.setPixel(x, y, Color.argb(A, R, G, B));
}
}
canvas = mHolder.lockCanvas();
if (canvas != null)
{
canvas.drawBitmap(bmp, (canvas.getWidth() - width) / 4, (canvas.getHeight() - height) / 4, null);
mHolder.unlockCanvasAndPost(canvas);
}
bmp.recycle();
}
});
You can save each frames and then can merge them with ffmpeg to encode , to create a video file.

How to convert RGB565 to YUV420SP faster on android?

I need display a jpeg picture, and convert it to YUV420SP. First I use SkBitmap to parse jpeg and display it, then I use the code below to convert RGB565 to YUV420SP on android, but it spend 75ms to convert a 640*480 RGB565 picture, so anybody know the faster way to convert RGB565 to YUV420SP on android? or faster way to convert jpeg file to YUV420SP on android?
// Convert from RGB to YUV420
int RGB2YUV_YR[256], RGB2YUV_YG[256], RGB2YUV_YB[256];
int RGB2YUV_UR[256], RGB2YUV_UG[256], RGB2YUV_UBVR[256];
int RGB2YUV_VG[256], RGB2YUV_VB[256];
//
// Table used for RGB to YUV420 conversion
//
void InitLookupTable()
{
static bool hasInited = false;
if(hasInited)
return ;
hasInited = true;
int i;
for (i = 0; i < 256; i++)
RGB2YUV_YR[i] = (float) 65.481 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_YG[i] = (float) 128.553 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_YB[i] = (float) 24.966 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_UR[i] = (float) 37.797 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_UG[i] = (float) 74.203 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_VG[i] = (float) 93.786 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_VB[i] = (float) 18.214 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_UBVR[i] = (float) 112 * (i << 8);
}
int ConvertRGB5652YUV420SP(int w, int h, unsigned char *bmp, unsigned char *yuv)
{
unsigned char *u, *v, *y, *uu, *vv;
unsigned char *pu1, *pu2, *pu3, *pu4;
unsigned char *pv1, *pv2, *pv3, *pv4;
unsigned char rValue = 0, gValue = 0, bValue = 0;
uint16_t* bmpPtr;
int i, j;
printf("ConvertRGB5652YUV420SP begin,w=%d,h=%d,bmp=%p,yuv=%p\n", w, h, bmp, yuv);
struct timeval tpstart,tpend;
gettimeofday(&tpstart,NULL);
InitLookupTable();
gettimeofday(&tpend,NULL);
float timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec;
timeuse/=1000;
printf("InitLookupTable used time=%f\n", timeuse);
gettimeofday(&tpstart,NULL);
uu = new unsigned char[w * h];
vv = new unsigned char[w * h];
if (uu == NULL || vv == NULL || yuv == NULL)
return 0;
y = yuv;
u = uu;
v = vv;
// Get r,g,b pointers from bmp image data....
bmpPtr = (uint16_t*)bmp;
//Get YUV values for rgb values...
for (i = 0; i < h; i++) {
for (j = 0; j < w; j++) {
uint16_t color = *bmpPtr;
unsigned int r = (color>>11) & 0x1f;
unsigned int g = (color>> 5) & 0x3f;
unsigned int b = (color ) & 0x1f;
rValue = (r<<3) | (r>>2);
gValue = (g<<2) | (g>>4);
bValue = (b<<3) | (b>>2);
*y++ = (RGB2YUV_YR[rValue] + RGB2YUV_YG[gValue] + RGB2YUV_YB[bValue] +
1048576) >> 16;
*u++ = (-RGB2YUV_UR[rValue] - RGB2YUV_UG[gValue] + RGB2YUV_UBVR[bValue] +
8388608) >> 16;
*v++ = (RGB2YUV_UBVR[rValue] - RGB2YUV_VG[gValue] - RGB2YUV_VB[bValue] +
8388608) >> 16;
bmpPtr++;
}
}
gettimeofday(&tpend,NULL);
timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec;
timeuse/=1000;
printf("Get YUV values used time=%f\n", timeuse);
gettimeofday(&tpstart,NULL);
// Now sample the U & V to obtain YUV 4:2:0 format
// Get the right pointers...
u = yuv + w * h;
v = u + 1;
// For U
pu1 = uu;
pu2 = pu1 + 1;
pu3 = pu1 + w;
pu4 = pu3 + 1;
// For V
pv1 = vv;
pv2 = pv1 + 1;
pv3 = pv1 + w;
pv4 = pv3 + 1;
// Do sampling....
for (i = 0; i < h; i += 2) {
for (j = 0; j < w; j += 2) {
*u = (*pu1 + *pu2 + *pu3 + *pu4) >> 2;
u += 2;
*v = (*pv1 + *pv2 + *pv3 + *pv4) >> 2;
v += 2;
pu1 += 2;
pu2 += 2;
pu3 += 2;
pu4 += 2;
pv1 += 2;
pv2 += 2;
pv3 += 2;
pv4 += 2;
}
pu1 += w;
pu2 += w;
pu3 += w;
pu4 += w;
pv1 += w;
pv2 += w;
pv3 += w;
pv4 += w;
}
gettimeofday(&tpend,NULL);
timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec;
timeuse/=1000;
printf("Do sampling used time=%f\n", timeuse);
gettimeofday(&tpstart,NULL);
delete uu;
delete vv;
return 1;
}
int main(int argc, char **argv) {
unsigned char bmp[640*480*2] = {0};
unsigned char yuv[(640*480*3)/2] = {0};
struct timeval tpstart,tpend;
gettimeofday(&tpstart,NULL);
ConvertRGB5652YUV420SP(640, 480, bmp, yuv);
gettimeofday(&tpend,NULL);
float timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec;
timeuse/=1000;
printf("ConvertARGB2YUV420SP used time=%f\n", timeuse);
return 0;
}
output on android(armv6):
ConvertRGB5652YUV420SP begin,w=640,h=480,bmp=0xbe7314fc,yuv=0xbe7c74fc
InitLookupTable used time=0.383000
Get YUV values used time=61.394001
Do sampling used time=11.918000
ConvertARGB2YUV420SP used time=74.596001
cpu info:
$ cat /proc/cpuinfo
cat /proc/cpuinfo
Processor : ARMv6-compatible processor rev 5 (v6l)
BogoMIPS : 791.34
Features : swp half thumb fastmult vfp edsp java
CPU implementer : 0x41
CPU architecture: 6TEJ
CPU variant : 0x1
CPU part : 0xb36
CPU revision : 5
Hardware : IMAPX200
Revision : 0000
Serial : 0000000000000000
On ARMv7, use NEON. It will do the job in less than 1ms. (VGA)
If you are stuck with ARMv6, optimize it in ARM assembly. (about 8ms on VGA)
Use fixed-point arithmetic instead of the lookup tables. Get rid of them.
make two masks :
0x001f001f : mask1
0x003f003f : mask2
then load two pixels at once into a 32bit register (which is a lot faster than 16bit read)
and red, mask1, pixel, lsr #11
and grn, mask2, pixel, lsr #5
and blu, mask1, pixel
now you have three registers, each containing two values - one in the lower, and the other in the upper 16 bits.
smulxy instructions will do some miracles from here on. (16bit multiply)
Good luck.
PS : your lookup table isn't that good either. Why are they all in length of 256?
You could reduce them to 32 (r and b related) and 64 (g related) Which will increase the cache hit rate.
Probably that will just do for the targeted 40ms without resorting to assembly.
Yes, cache-misses are THAT painful.
I have found a faster way in skia, it runs about 40ms.
#include "SkColorPriv.h"
#include "SkBitmap.h"
#include "SkCanvas.h"
#include "SkStream.h"
using namespace android;
// taken from jcolor.c in libjpeg
#if 0 // 16bit - precise but slow
#define CYR 19595 // 0.299
#define CYG 38470 // 0.587
#define CYB 7471 // 0.114
#define CUR -11059 // -0.16874
#define CUG -21709 // -0.33126
#define CUB 32768 // 0.5
#define CVR 32768 // 0.5
#define CVG -27439 // -0.41869
#define CVB -5329 // -0.08131
#define CSHIFT 16
#else // 8bit - fast, slightly less precise
#define CYR 77 // 0.299
#define CYG 150 // 0.587
#define CYB 29 // 0.114
#define CUR -43 // -0.16874
#define CUG -85 // -0.33126
#define CUB 128 // 0.5
#define CVR 128 // 0.5
#define CVG -107 // -0.41869
#define CVB -21 // -0.08131
#define CSHIFT 8
#endif
static void rgb2yuv_32(uint8_t dst[], SkPMColor c) {
int r = SkGetPackedR32(c);
int g = SkGetPackedG32(c);
int b = SkGetPackedB32(c);
int y = ( CYR*r + CYG*g + CYB*b ) >> CSHIFT;
int u = ( CUR*r + CUG*g + CUB*b ) >> CSHIFT;
int v = ( CVR*r + CVG*g + CVB*b ) >> CSHIFT;
dst[0] = SkToU8(y);
dst[1] = SkToU8(u + 128);
dst[2] = SkToU8(v + 128);
}
static void rgb2yuv_32_x(uint8_t *py, uint8_t *pu, uint8_t *pv, SkPMColor c) {
int r = SkGetPackedR32(c);
int g = SkGetPackedG32(c);
int b = SkGetPackedB32(c);
if(py != NULL){
int y = ( CYR*r + CYG*g + CYB*b ) >> CSHIFT;
*py = SkToU8(y);
}
if(pu != NULL){
int u = ( CUR*r + CUG*g + CUB*b ) >> CSHIFT;
*pu = SkToU8(u + 128);
}
if(pv != NULL){
int v = ( CVR*r + CVG*g + CVB*b ) >> CSHIFT;
*pv = SkToU8(v + 128);
}
}
static void rgb2yuv_4444(uint8_t dst[], U16CPU c) {
int r = SkGetPackedR4444(c);
int g = SkGetPackedG4444(c);
int b = SkGetPackedB4444(c);
int y = ( CYR*r + CYG*g + CYB*b ) >> (CSHIFT - 4);
int u = ( CUR*r + CUG*g + CUB*b ) >> (CSHIFT - 4);
int v = ( CVR*r + CVG*g + CVB*b ) >> (CSHIFT - 4);
dst[0] = SkToU8(y);
dst[1] = SkToU8(u + 128);
dst[2] = SkToU8(v + 128);
}
static void rgb2yuv_4444_x(uint8_t *py, uint8_t *pu, uint8_t *pv, U16CPU c) {
int r = SkGetPackedR4444(c);
int g = SkGetPackedG4444(c);
int b = SkGetPackedB4444(c);
if(py != NULL){
int y = ( CYR*r + CYG*g + CYB*b ) >> (CSHIFT - 4);
*py = SkToU8(y);
}
if(pu != NULL){
int u = ( CUR*r + CUG*g + CUB*b ) >> (CSHIFT - 4);
*pu = SkToU8(u + 128);
}
if(pv != NULL){
int v = ( CVR*r + CVG*g + CVB*b ) >> (CSHIFT - 4);
*pv = SkToU8(v + 128);
}
}
static void rgb2yuv_16(uint8_t dst[], U16CPU c) {
int r = SkGetPackedR16(c);
int g = SkGetPackedG16(c);
int b = SkGetPackedB16(c);
int y = ( 2*CYR*r + CYG*g + 2*CYB*b ) >> (CSHIFT - 2);
int u = ( 2*CUR*r + CUG*g + 2*CUB*b ) >> (CSHIFT - 2);
int v = ( 2*CVR*r + CVG*g + 2*CVB*b ) >> (CSHIFT - 2);
dst[0] = SkToU8(y);
dst[1] = SkToU8(u + 128);
dst[2] = SkToU8(v + 128);
}
static void rgb2yuv_16_x(uint8_t *py, uint8_t *pu, uint8_t *pv, U16CPU c) {
int r = SkGetPackedR16(c);
int g = SkGetPackedG16(c);
int b = SkGetPackedB16(c);
if(py != NULL){
int y = ( 2*CYR*r + CYG*g + 2*CYB*b ) >> (CSHIFT - 2);
*py = SkToU8(y);
}
if(pu != NULL){
int u = ( 2*CUR*r + CUG*g + 2*CUB*b ) >> (CSHIFT - 2);
*pu = SkToU8(u + 128);
}
if(pv != NULL){
int v = ( 2*CVR*r + CVG*g + 2*CVB*b ) >> (CSHIFT - 2);
*pv = SkToU8(v + 128);
}
}
int ConvertRGB5652YUV420SPBySkia(SkBitmap* bmp, unsigned char* dst) {
if(!bmp || !dst || bmp->getConfig() != SkBitmap::kRGB_565_Config)
return -1;
int width = bmp->width();
int height = bmp->height();
void *src = bmp->getPixels();
int src_rowbytes = bmp->rowBytes();
int stride = width;
int dstheight = height;
int i, j;
uint8_t *y_base = (uint8_t *)dst;
uint8_t *cb_base = (uint8_t *)((unsigned int)y_base + stride * dstheight);
uint8_t *cr_base = cb_base + 1;
uint8_t yuv[3];
uint8_t *y = NULL, *cb = NULL, *cr = NULL;
uint16_t *rgb = (uint16_t *)src;
for(i=0; i<height; i++){
rgb = (uint16_t *)((unsigned int)src + i * src_rowbytes);
y = (uint8_t *)((unsigned int)y_base + i * stride);
if((i & 0x1) == 0){
cb = (uint8_t *)((unsigned int)cb_base + ((i>>1) * stride));
cr = cb + 1;
}
for(j=0; j<width; j++){
if(i & 0x1){// valid y and cr
if(j & 0x01){ // only y
rgb2yuv_16_x(y++, NULL, NULL, *rgb++);
}else{ // both y and cr
rgb2yuv_16_x(y++, NULL, cr++, *rgb++);
cr++;
}
}else{// valid y and cb
if(j & 0x01){ // only y
rgb2yuv_16_x(y++, NULL, NULL, *rgb++);
}else{ // both y and cb
rgb2yuv_16_x(y++, cb++, NULL, *rgb++);
cb++;
}
}
}
}
return 0;
}

Categories

Resources