I am building a scanner app, and trying to determine the "preview quality" from the preview callback of the camera. I want to customize the camera's AUTO_FLASH_MODE where it will be turned on if the environment is too dark.
How can I detect if there is a high average of dark pixels? This means (in preview) I am getting darkness and therefore need to turn on the camera's flash light.
Either find out how to access pixel values of your image and calculate the average intensity yourself or use any image processing library to do so.
Dark pixels have low values, bright pixels have high values.
You want to calculate the average of all red, green and blue values divided by three times your pixel count.
Define a threshold for when to turn on the flash, but keep in mind that you have to get a new exposure time then.
Prefer flash over exposure time increase as long exposure times yield higher image noise.
I tried this approach but i think it is taking unnecessary time of processing the bitmap and then get an average screen color,
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
Size cameraResolution = resolution;
PreviewCallback callback = this.callback;
if (cameraResolution != null && callback != null)
{
int format = camera.getParameters().getPreviewFormat();
SourceData source = new SourceData(data, cameraResolution.width, cameraResolution.height, format, getCameraRotation());
callback.onPreview(source);
final int[] rgb = decodeYUV420SP(data, cameraResolution.width, cameraResolution.height);
//Bitmap bmp = decodeBitmap(source.getData());
Bitmap bmp = Bitmap.createBitmap(rgb, cameraResolution.width, cameraResolution.height, Bitmap.Config.ARGB_8888);
if (bmp != null)
{
//bmp = decodeBitmap(source.getData());
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
// bmp.compress(Bitmap.CompressFormat.JPEG, 70, bytes);
Bitmap resizebitmap = Bitmap.createBitmap(bmp,
bmp.getWidth() / 2, bmp.getHeight() / 2, 60, 60);
int color = getAverageColor(resizebitmap);
Log.i("Color Int", color + "");
// int color = resizebitmap.getPixel(resizebitmap.getWidth()/2,resizebitmap.getHeight()/2);
String strColor = String.format("#%06X", 0xFFFFFF & color);
//String colorname = sColorNameMap.get(strColor);
Log.d("strColor", strColor);
Log.i("strColor", color + "");
if(!mIsOn)
{
if (color == -16777216 || color < -16777216)//minimum color code (full dark)
{
mIsOn = true;
setTorch(true);
Log.d("Yahooooo", "" + color);
}
}
Log.i("Pixel Value",
"Top Left pixel: " + Integer.toHexString(color));
}
}
else
{
Log.d(TAG, "Got preview callback, but no handler or resolution available");
}
}
}
private int[] decodeYUV420SP(byte[] yuv420sp, int width, int height)
{
final int frameSize = width * height;
int rgb[]=new int[width*height];
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0) y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0) r = 0; else if (r > 262143) r = 262143;
if (g < 0) g = 0; else if (g > 262143) g = 262143;
if (b < 0) b = 0; else if (b > 262143) b = 262143;
rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) &
0xff00) | ((b >> 10) & 0xff);
}
}
return rgb;
}
private int getAverageColor(Bitmap bitmap)
{
int redBucket = 0;
int greenBucket = 0;
int blueBucket = 0;
int pixelCount = 0;
for (int y = 0; y < bitmap.getHeight(); y++) {
for (int x = 0; x < bitmap.getWidth(); x++) {
int c = bitmap.getPixel(x, y);
pixelCount++;
redBucket += Color.red(c);
greenBucket += Color.green(c);
blueBucket += Color.blue(c);
// does alpha matter?
}
}
int averageColor = Color.rgb(redBucket / pixelCount, greenBucket
/ pixelCount, blueBucket / pixelCount);
return averageColor;
}
Related
In my application I want to save Images to Buffer as soon as user starts the camera to capture the frames of camera before the user try to capture an image. please suggest me a specific way to achieve this in android.
In the old camera api you can access the preview frame data relative easy.
From Android 5.0 has introduced the camera2, which is a lot more complicated to set up the correct handlers and states.
Now the Android 7.0 is released the main question is: are you fine to use old, deprecated code or not? Since some of our Android phones still use 4.2 and 4.4 other 5.0a and 6.0 for me is fine, but I don't know your case.
Here I have just removed a custom color, like how is removing the green screen, but if the user touch the preview screen it will select the desired color, like a brown or a blue, whatever.
private class CameraPreviewCallback implements Camera.PreviewCallback {
private int curColor, curRed, curGreen, curBlue;
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
// conversion:
decodeYUV420SP(mRgb, data, mSurfWidth, mSurfHeight);
// check if need an update for the selected color
if (mRequestColorUpdate) {
int arrayIndex = mTouchY * mSurfWidth + mTouchX;
mChoosedColor = mRgb[arrayIndex];
btRecordVideo.setBackgroundColor(mChoosedColor);
mRequestColorUpdate = false;
updateBackgroundRemovalLimits();
// do my stuff, not important here
}
// remove background:
for (int i = 0; i < mRgb.length; i++) {
curColor = mRgb[i];
// split:
curRed = Color.red(curColor);
curGreen = Color.green(curColor);
curBlue = Color.blue(curColor);
if ((minRed <= curRed) && (curRed <= maxRed) && (minGreen <= curGreen) && (curGreen <= maxGreen) && (minBlue <= curBlue) && (curBlue <= maxBlue)) {
// set alpha to 0 bitwise:
mRgb[i] = curColor & 0x00FFFFFF;
}
}
// update the modified preview screen:
mBitmap.setPixels(mRgb, 0, mSurfWidth, 0, 0, mSurfWidth, mSurfHeight);
if (ivPreviewModified != null) {
ivPreviewModified.setImageBitmap(mBitmap);
}
}
}
You have to add to camera:
camera.setPreviewCallback(new CameraPreviewCallback());
And the camera initialized like this:
#Override
public void onResume() {
super.onResume();
//
camera = Camera.open();
startPreview();
}
And this is from stackoverflow :)
public class YuvToRgb {
// Byte decoder : ---------------------------------------------------------------------
public static final void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width, int height) {
// Pulled directly from:
// http://ketai.googlecode.com/svn/trunk/ketai/src/edu/uic/ketai/inputService/KetaiCamera.java
final int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0)
y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0)
r = 0;
else if (r > 262143)
r = 262143;
if (g < 0)
g = 0;
else if (g > 262143)
g = 262143;
if (b < 0)
b = 0;
else if (b > 262143)
b = 262143;
rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);
}
}
}
}
I want to capture an image from camera only when the background color is white.
Also suggest how to detect background color of an image ?
Is there any library for this?
Thanks in Advance
this long story cannot be short :)
make sure you have set in camera preview:
mCamera.getParameters().setPreviewFormat(ImageFormat.NV21);
you can get on camera callback your image:
Camera.PreviewCallback previewCb = new Camera.PreviewCallback() {
public void onPreviewFrame(byte[] data, Camera camera) {
if(camera == null || extractedColorsBackground.getVisibility() == View.VISIBLE)
return;
Camera.Parameters parameters = camera.getParameters();
Camera.Size size = parameters.getPreviewSize();
Camera.CameraInfo info = new Camera.CameraInfo();
Camera.getCameraInfo(0, info);
if(mBitmapWidth == 0 || mBitmapHeight == 0) {
mBitmapWidth = size.width;
mBitmapHeight = size.height;
}
mCurrentImageRGB = new int[mBitmapWidth*mBitmapHeight];
Recognize.decodeYUV420SP2(mCurrentImageRGB, data, mBitmapWidth, mBitmapHeight);
}
};
and transformer:
public static void decodeYUV420SP2(int[] rgb, byte[] yuv420sp, int width, int height) {
final int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0)
y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0)
r = 0;
else if (r > 262143)
r = 262143;
if (g < 0)
g = 0;
else if (g > 262143)
g = 262143;
if (b < 0)
b = 0;
else if (b > 262143)
b = 262143;
rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);
}
}
}
in mCurrentImageRGB - will be your int array of colors for this image. Now you can count how many is white.
more about white - if will be at least one #FFFFFF image this will be something amazing ) but you must count colors not white but very close to white, for example all that above #BBBBBB coz camera will not give you exact #FFFFFF )) you can see in callback what you get and you can act according situation.
trying to record video from camera by using javaCV,
// recoder settings:
private int imageWidth = 320;
private int imageHeight = 240;
private int frameRate = 30;
recorder = new FFmpegFrameRecorder(ffmpeg_link, imageWidth, imageHeight, 1);
recorder.setFormat("mp4");
recorder.setFrameRate(frameRate);
// frame settings:
IplImage yuvIplimage = null;
yuvIplimage = IplImage.create(320, 320, IPL_DEPTH_16U, 1); //32 not supported
//image reader:
private ImageReader mImageReader;
mImageReader = ImageReader.newInstance(320, 320, ImageFormat.YUV_420_888, 10);
mImageReader.setOnImageAvailableListener(
mOnImageAvailableListener, mBackgroundHandler);
private final ImageReader.OnImageAvailableListener mOnImageAvailableListener
= new ImageReader.OnImageAvailableListener() {
#Override
public void onImageAvailable(ImageReader reader) {
Image image = reader.acquireNextImage();// acquireLatestImage(); - also tried
if (image == null)
return;
final ByteBuffer buffer = image.getPlanes()[0].getBuffer();
byte[] bytes = new byte[buffer.remaining()];
buffer.get(bytes, 0, bytes.length);
if (yuvIplimage != null ) {
// OPTION 1
yuvIplimage.getByteBuffer().put(convertYUV420ToNV21(image));
// OPTION 2
//yuvIplimage.getByteBuffer().put(decodeYUV420SP(bytes,320,320));
try {
if (started) {
recorder.record(yuvIplimage);
}
} catch (Exception e) {
e.printStackTrace();
}
}
image.close();
}
};
Option 1 is to decode image to NV21 by using code:
private byte[] convertYUV420ToNV21(Image imgYUV420) {
byte[] rez;
ByteBuffer buffer0 = imgYUV420.getPlanes()[0].getBuffer();
ByteBuffer buffer2 = imgYUV420.getPlanes()[2].getBuffer();
int buffer0_size = buffer0.remaining();
int buffer2_size = buffer2.remaining();
rez = new byte[buffer0_size + buffer2_size];
buffer0.get(rez, 0, buffer0_size);
buffer2.get(rez, buffer0_size, buffer2_size);
return rez;
}
option 2 is to convert to rgb as if i understand corect:
public byte[] decodeYUV420SP( byte[] yuv420sp, int width, int height) {
final int frameSize = width * height;
byte rgb[]=new byte[width*height];
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0) y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0) r = 0; else if (r > 262143) r = 262143;
if (g < 0) g = 0; else if (g > 262143) g = 262143;
if (b < 0) b = 0; else if (b > 262143) b = 262143;
rgb[yp] = (byte) (0xff000000 | ((r << 6) & 0xff0000)
| ((g >> 2) & 0xff00) | ((b >> 10) & 0xff));
}
}
return rgb; }
it looks incorect also.
wisch is the correct way to conert camera2 image to IplImage?
and is it possible to do it on fly?
If the recorder requires NV21, then converting the image to that instead of RGB is likely the fastest option.
But why don't you just use android.media.MediaRecorder? It's much more efficient and can use the hardware encoders.
But if you need to stick with ffmpeg, your first option is incorrect for many devices. In addition, make sure you remove that buffer.get call earlier - it'll make the rest of the reads from plane 0 not work right, which may be your current problem. Once you read plane 0 once, .remaining() will return 0.
The YUV image has 3 planes, and unless you've checked that the underlying format is actually NV21, you shouldn't blindly assume that., or assume that the row stride is equal to width.
To be safe, you need to look at both row and pixel strides when copying the three planes into the semiplanar byte[].
If you're willing to move your processing over to C++ you could do the conversion like
cv::cvtColor((cv::_InputArray)mNV, (cv::_OutputArray)rgba, CV_YUV2RGBA_NV12, 0);
This will store the image as an RGBA mat and you can save it as normal from there.
I get the images from device camera:
public synchronized void onPreviewFrame(byte[] data, Camera camera) {
//Get image from data
}
Decode the images to RGB:
int[] rgb = decodeYUV420SPtoRGB(data, width, height);
RGB to Bitmap:
Bitmap bitmap = rgbToBitmap(rgb,width, height);
Print images in ImageView to make sure that conversion is fine:
iv.setImageBitmap(bitmap);
Detect num faces:
FaceDetector faceDet = new FaceDetector(bitmap.getWidth(), bitmap.getHeight(), 1);
Face[] faceList = new Face[1];
int faces = faceDet.findFaces(bitmap, faceList);
Log.e("NumFaces: ",faces + "");
Decode functions:
private int[] decodeYUV420SPtoRGB(byte[] yuv420sp, int width, int height) {
if (yuv420sp == null) throw new NullPointerException();
final int frameSize = width * height;
int[] rgb = new int[frameSize];
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & (yuv420sp[yp])) - 16;
if (y < 0) y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0) r = 0;
else if (r > 262143) r = 262143;
if (g < 0) g = 0;
else if (g > 262143) g = 262143;
if (b < 0) b = 0;
else if (b > 262143) b = 262143;
rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);
}
}
return rgb;
}
public static Bitmap rgbToBitmap(int[] rgb, int width, int height) {
if (rgb == null) throw new NullPointerException();
Bitmap bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.RGB_565);
bitmap.setPixels(rgb, 0, width, 0, 0, width, height);
return bitmap;
}
I get always 0 in Log.e("NumFaces: ",faces + ""); While I'm seeing in the imageView faces, real faces and faces in photos. I tried it with physical devices, 2.3 (bad camera) and 4.2 (good camera HD)
Edit: in 4.2 device I am getting 800x480 frames in onPreviewFrame.
Any idea what happens?
I have solved the problem by holding the device in my hands horizontally instead of vertically.
I am using JavaCV in Android.
In my code, I have created a ImageComparator(class of OpenCV CookBook
http://code.google.com/p/javacv/source/browse/OpenCV2_Cookbook/src/opencv2_cookbook/chapter04/ImageComparator.scala?repo=examples
http://code.google.com/p/javacv/wiki/OpenCV2_Cookbook_Examples_Chapter_4) Object and use that object to compare images. If I use file from SD card the comparator is working.
File referenceImageFile = new File(absPath1); // Read an image.
IplImage reference = Util.loadOrExit(referenceImageFile,CV_LOAD_IMAGE_COLOR);
comparator = new ImageComparator(reference);
comparator = new ImageComparator(reference);
But from Camera Preview, when I am creating IplImage it is not working. I am getting the following Exception during comparison "score" calculation.
score = referenceComparator.compare(grayImage) / imageSize;
java.lang.RuntimeException: /home/saudet/android/OpenCV-2.4.2/modules/core/src/convert.cpp:1196: error: (-215) i < src.channels() in function void cvSplit(const void*, void*, void*, void*, void*)
For CameraPreview I am using the code from FacePreview to create IplImage.But it create Image in grayScale.
int f = SUBSAMPLING_FACTOR;
if (grayImage == null || grayImage.width() != width / f
|| grayImage.height() != height / f) {
grayImage = IplImage.create(width / f, height / f, IPL_DEPTH_8U, 1);
}
int imageWidth = grayImage.width();
int imageHeight = grayImage.height();
int dataStride = f * width;
int imageStride = grayImage.widthStep();
ByteBuffer imageBuffer = grayImage.getByteBuffer();
for (int y = 0; y < imageHeight; y++) {
int dataLine = y * dataStride;
int imageLine = y * imageStride;
for (int x = 0; x < imageWidth; x++) {
imageBuffer.put(imageLine + x, data[dataLine + f * x]);
}
}
How to create a Color IplImage from Camera to use with ImageComparator?
The below code seems to be working fine.
public void onPreviewFrame(final byte[] data, final Camera camera) {
try {
Camera.Size size = camera.getParameters().getPreviewSize();
processImage(data, size.width, size.height);
camera.addCallbackBuffer(data);
} catch (RuntimeException e) {
// The camera has probably just been released, ignore.
Log.d("Exception", " " + e);
}
}
protected void processImage(byte[] data, int width, int height) {
score.clear();
// First, downsample our image
int f = SUBSAMPLING_FACTOR;
IplImage _4image = IplImage.create(width, height, IPL_DEPTH_8U, f);
int[] _temp = new int[width * height];
if (_4image != null) {
decodeYUV420SP(_temp, data, width, height);
_4image.getIntBuffer().put(_temp);
}
//bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
//bitmap.copyPixelsFromBuffer(_4image.getByteBuffer());
Log.d("CompareAndroid", "processImage");
int imageSize = _4image.width() * _4image.height();
Iterator<ImageComparator> iterator = reference_List.iterator();
// Compute histogram match and normalize by image size.
// 1 means perfect match.
while(iterator.hasNext()){
score.add(((ImageComparator) iterator.next()).compare(_4image) / imageSize);
}
Log.d("CompareImages", "Score Size "+score.size());
postInvalidate();
}
This code seems to be working fine.
private void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width,
int height) {
int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0)
y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0)
r = 0;
else if (r > 262143)
r = 262143;
if (g < 0)
g = 0;
else if (g > 262143)
g = 262143;
if (b < 0)
b = 0;
else if (b > 262143)
b = 262143;
rgb[yp] = 0xff000000 | ((b << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((r >> 10) & 0xff);
}
}
}
I haven't tested it, but something like this should work:
IplImage yuvimage = IplImage.create(width, height * 3 / 2, IPL_DEPTH_8U, 2);
IplImage rgbimage = IplImage.create(width, height, IPL_DEPTH_8U, 3);
cvCvtColor(yuvimage, rgbimage, CV_YUV2BGR_NV21);