I am capturing frames in OnPreviewFrame() and then processing them in a thread to check if they are valid or not.
public void onPreviewFrame(byte[] data, Camera camera) {
if (imageFormat == ImageFormat.NV21) {
//We only accept the NV21(YUV420) format.
frameCount++;
if (frameCount > 19 && frameCount % 2 == 0) {
Camera.Parameters parameters = camera.getParameters();
FrameModel fModel = new FrameModel(data);
fModel.setPreviewWidth(parameters.getPreviewSize().width);
fModel.setPreviewHeight(parameters.getPreviewSize().height);
fModel.setPicFormat(parameters.getPreviewFormat());
fModel.setFrameCount(frameCount);
validateFrame(fModel);
}
}
}
In validateFrame(), i submit a ValidatorThread runnable instance to a ThreadPoolExecutor with 4 core and max threads, to process the frames parallelly.
public class ValidatorThread implements Runnable {
private FrameModel frame;
public ValidatorThread(FrameModel fModel) {
frame = fModel;
}
#Override
public void run() {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
processNV21Data();
}
private void processNV21Data() {
YuvImage yuv = new YuvImage(frame.getData(), frame.getPicFormat(),
frame.getPreviewWidth(), frame.getPreviewHeight(), null);
frame.releaseData();
ByteArrayOutputStream out = new ByteArrayOutputStream();
yuv.compressToJpeg(new Rect(0, 0, frame.getPreviewWidth(), frame.getPreviewHeight()), 100, out);
byte[] bytes = out.toByteArray();
yuv = null;
try {
if (out != null)
out.close();
out = null;
} catch (IOException e) {
e.printStackTrace();
}
Bitmap baseBitmap = BitmapFactory.decodeByteArray(bytes, 0, bytes.length);
bytes = null;
// rotate bitmap
baseBitmap = rotateImage(baseBitmap, frame.getRotation());
//create copy of original bitmap to use later
Bitmap mCheckedBitmap = baseBitmap.copy(Bitmap.Config.ARGB_8888, true);
// convert base bitmap to greyscale for validation
baseBitmap = toGrayscale(baseBitmap);
boolean isBitmapValid = Util.isBitmapValid(baseBitmap);
if (isBitmapValid) {
baseBitmap.recycle();
mCheckedBitmap.recycle();
frame = null;
} else {
baseBitmap.recycle();
mCheckedBitmap.recycle();
frame = null;
}
}
public Bitmap toGrayscale(Bitmap bmpOriginal) {
int width, height;
height = bmpOriginal.getHeight();
width = bmpOriginal.getWidth();
Bitmap bmpGrayscale = Bitmap.createBitmap(width, height, Bitmap.Config.RGB_565);
Canvas c = new Canvas(bmpGrayscale);
Paint paint = new Paint();
bmpOriginal.recycle();
return bmpGrayscale;
}
private Bitmap rotateImage(final Bitmap source, float angle) {
Matrix matrix = new Matrix();
matrix.postRotate(angle);
Bitmap rotatedBitmap = Bitmap.createBitmap(source, 0, 0, source.getWidth(), source.getHeight(), matrix, true);
source.recycle();
return rotatedBitmap;
}
}
The FrameModel class has such declaration :
public class FrameModel {
private byte[] data;
private int previewWidth;
private int previewHeight;
private int picFormat;
private int frameCount;
public void releaseData() {
data = null;
}
// getters and setters
}
I am getting OutOf Memory error while processing multiple frames.
Can anyone help what memory optimisation does the code need?
You can reduce memory usage if you produce grayscale bitmap from YUV data without going through Jpeg. This will also be significantly faster.
public Bitmap yuv2grayscale(byte[] yuv, int width, int height) {
int[] pixels = new int[width * height];
for (int i = 0; i < height*width; i++) {
int y = yuv[i] & 0xff;
pixels[i] = 0xFF000000 | y << 16 | y << 16 | y;
}
return Bitmap.createBitmap(pixels, width, height, Bitmap.Config.RGB_565);
}
Alternatively, you can create an RGB_565 bitmap without going through int[width*height] pixels array, and manipulate the bitmap pixels in place using NDK.
Related
I am using filter effect to make some effect like changeBackground, here is my processor:
public VideoFrameProcessor build() {
return new VideoFrameProcessor() {
private BitmapVideoFrameConversion convertor = null;
private SelfieSegmentation selfieSegmentation = null;
private SegmentationMask mask;
private int[] pixels;
private int[] bgImagePixels;
private int width;
private int height;
private double backgroundLikelihood;
private Bitmap bitmap;
private VideoFrame outputFrame;
#Override
public VideoFrame process(VideoFrame frame, SurfaceTextureHelper textureHelper) {
if (convertor == null) {
convertor = new BitmapVideoFrameConversion(textureHelper);
}
if (selfieSegmentation == null) {
selfieSegmentation = new SelfieSegmentation();
}
bitmap = convertor.videoFrame2Bitmap(frame);
mask = selfieSegmentation.process(bitmap);
width = mask.getWidth();
height = mask.getHeight();
bitmap = changeBackground(bitmap, mask, width, height);
outputFrame = convertor.bitmap2VideoFrame(bitmap, width, height);
if (bitmap != null) {
bitmap.recycle();
}
return outputFrame;
}
private Bitmap changeBackground(Bitmap bitmap, SegmentationMask mask, int width, int height) {
if (bgImage == null) {
return bitmap;
}
ByteBuffer bufferMask = mask.getBuffer();
backgroundLikelihood = 0;
int length = width * height;
pixels = new int[length];
bgImagePixels = new int[length];
bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
bgImage.getPixels(bgImagePixels, 0, width, 0, 0, width, height);
for (int i = 0; i < length; i++) {
// gets the likely hood of the background for this pixel
backgroundLikelihood = 1 - bufferMask.getFloat();
// sets the pixel to bgImage pixel value if background it is background
if (backgroundLikelihood > .2) {
pixels[i] = bgImagePixels[i];
}
}
bitmap.setPixels(pixels, 0, width, 0, 0, width, height);
return bitmap;
}
};
}
Here is the class BitmapVideoFrameConversion:
public class BitmapVideoFrameConversion {
private YuvFrame yuvFrame = null;
private SurfaceTextureHelper textureHelper;
private YuvConverter yuvConverter = new YuvConverter();
private Matrix transform = new Matrix();
private int[] textures;
private TextureBufferImpl buffer;
private VideoFrame.I420Buffer i420buffer;
private Bitmap newBitmap;
private Matrix matrixToFlip;
public BitmapVideoFrameConversion(SurfaceTextureHelper textureHelper) {
this.textureHelper = textureHelper;
this.textures = new int[1];
}
public Bitmap videoFrame2Bitmap(VideoFrame frame) {
if (yuvFrame == null) {
yuvFrame = new YuvFrame(frame, YuvFrame.PROCESSING_NONE,
frame.getTimestampNs());
} else {
yuvFrame.fromVideoFrame(frame, YuvFrame.PROCESSING_NONE,
frame.getTimestampNs());
}
return createFlippedBitmap(yuvFrame.getBitmap(),true,false);
}
public Bitmap createFlippedBitmap(Bitmap source, boolean xFlip, boolean yFlip) {
matrixToFlip = new Matrix();
matrixToFlip.postScale(xFlip ? -1 : 1, yFlip ? -1 : 1, source.getWidth() / 2f, source.getHeight() / 2f);
return Bitmap.createBitmap(source, 0, 0, source.getWidth(), source.getHeight(), matrixToFlip, true);
}
public VideoFrame bitmap2VideoFrame(Bitmap bitmap, int width, int height) {
if (bitmap == null) {
return null;
}
textures = new int[1];
long start = System.nanoTime();
GLES20.glGenTextures(0, textures, 1);
buffer = new TextureBufferImpl(width, height,
VideoFrame.TextureBuffer.Type.RGB,
textures[0],
transform,
textureHelper.getHandler(),
yuvConverter,
null);
GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MIN_FILTER,
GLES20.GL_NEAREST);
GLES20.glTexParameteri(GLES20.GL_TEXTURE_2D, GLES20.GL_TEXTURE_MAG_FILTER,
GLES20.GL_NEAREST);
GLUtils.texImage2D(GLES20.GL_TEXTURE_2D, 0, bitmap, 0);
i420buffer = yuvConverter.convert(buffer);
long timestamp = System.nanoTime() - start;
VideoFrame videoFrame = new VideoFrame(i420buffer, 180, timestamp);
if (bitmap != null) {
bitmap.recycle();
}
// buffer.release();
return videoFrame;
}
}
I successful display effect into local stream, but the problem is when the effect applied by method:
videoSource.setProcessor( eg: CHangeBackgroundProcessor), the localVideo seem not sent any frame to remote, or sent error(format,....)
I saw in remote, it's stop display any frame right after I applied effect, as you can see here
https://vimeo.com/794775532
Can someone help? I really stuck in this problem
I use camera.addCallbackBuffer(data); to reuse the buffer to avoid out of memory. My code in previewCallback like
checkFace(data, camera);
camera.addCallbackBuffer(data);
In method checkFace just convert data to bitmap then using FaceDetector to check faces. And I have try to use camera.addCallbackBuffer(data);after converting data, but the native memory show in Android Studio profiler like
after my app running about 10 minutes, the "Native" increase from 10MB to 250MB.
When my app running about 4 hours, it will crash and Logcat print :
E/IMemory (17967): cannot map BpMemoryHeap (binder=0x11515160), size=462848, fd=70 (Out of memory)
I think maybe because of the "Native" memory increasingly
CODE:
camera1.setPreviewCallbackWithBuffer(previewCallback1);
camera1.addCallbackBuffer(buffer1);
camera1.startPreview();
...
private Camera.PreviewCallback previewCallback1 = (data, camera) -> {
checkFace(data, camera);
camera.addCallbackBuffer(data);
};
//convert data to bitmap then check face from the bitmap
private void checkFace(byte[] data, Camera camera){
...
...run on new Thread...
Bitmap bitmap = BitmapUtil.ByteToBitmap(data, camera.getParameters().getPreviewSize());
...
FaceDetector detector = new FaceDetector(bitmap.getWidth(), bitmap.getHeight(), numberOfFace);
...then get the result of face detection
}
//convert frame data to bitmap
public static Bitmap ByteToBitmap(byte[] data, Camera.Size previewSize) {
ByteArrayOutputStream baos = null;
Bitmap bitmapOut = null;
try {
int w = previewSize.width;
int h = previewSize.height;
YuvImage yuvimage = new YuvImage(data, ImageFormat.NV21, w, h, null);
baos = new ByteArrayOutputStream();
yuvimage.compressToJpeg(new Rect(0, 0, w, h), 60, baos);
byte[] jdata = baos.toByteArray();
bitmapOut = BitmapFactory.decodeByteArray(jdata, 0, jdata.length);
if (null == bitmapOut) {
return bitmapOut;
}
jdata = null;
yuvimage = null;
Matrix matrix = new Matrix();
matrix.postRotate(90);
bitmapOut = Bitmap.createBitmap(bitmapOut, 0, 0, w, h, matrix, false);
} catch (Exception e) {
} finally {
try {
if (baos != null) {
baos.flush();
baos.close();
}
} catch (Exception e) {
}
}
return bitmapOut;
}
So, what should I do to resolve it ???
I used the ocr sample in this link https://github.com/rmtheis/android-ocr
Every thing is working fine but i want it in Portrait view,I followed the steps in this link , Zxing Camera in Portrait mode on Android, to enable ocr tesstow in Portrait mode . The View is portrait now but the camera is still taking the picture in landscape mode.
Any help ?
final class PreviewCallback implements Camera.PreviewCallback {
private static final String TAG = PreviewCallback.class.getSimpleName();
private final CameraConfigurationManager configManager;
private Handler previewHandler;
private int previewMessage;
PreviewCallback(CameraConfigurationManager configManager) {
this.configManager = configManager;
}
void setHandler(Handler previewHandler, int previewMessage) {
this.previewHandler = previewHandler;
this.previewMessage = previewMessage;
}
// (NV21) format.
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
Point cameraResolution = configManager.getCameraResolution();
Handler thePreviewHandler = previewHandler;
if (cameraResolution != null && thePreviewHandler != null) {
Message message = thePreviewHandler.obtainMessage(previewMessage, cameraResolution.x,
cameraResolution.y, data);
message.sendToTarget();
previewHandler = null;
} else {
Log.d(TAG, "Got preview callback, but no handler or resolution available");
}
}
Are you using the preview data with this method:
public void onPreviewFrame(byte[] data, Camera camera) {}
If yes, then I can help you, since I am doing very similar project (that will be open sourced soon)
here is the code that I am using to rotate the preview image
public static Bitmap getBitmapImageFromYUV(byte[] data, int width,
int height, int degree, Rect rect) {
Bitmap bitmap = getBitmapImageFromYUV(data, width, height, rect);
return rotateBitmap(bitmap, degree,rect);
}
public static Bitmap rotateBitmap(Bitmap source, float angle, Rect rect) {
Matrix matrix = new Matrix();
matrix.postRotate(angle);
source = Bitmap.createBitmap(source, 0, 0, source.getWidth(),
source.getHeight(), matrix, true);
source = Bitmap.createBitmap(source, rect.left, rect.top, rect.width(), rect.height());
if(mShouldSavePreview)
saveBitmap(source);
return source;
}
public static Bitmap getBitmapImageFromYUV(byte[] data, int width,
int height, Rect rect) {
YuvImage yuvimage = new YuvImage(data, ImageFormat.NV21, width, height,
null);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
yuvimage.compressToJpeg(new Rect(0, 0, width, height), 90, baos);
byte[] jdata = baos.toByteArray();
BitmapFactory.Options bitmapFatoryOptions = new BitmapFactory.Options();
bitmapFatoryOptions.inPreferredConfig = Bitmap.Config.ARGB_8888;
Bitmap bmp = BitmapFactory.decodeByteArray(jdata, 0, jdata.length,
bitmapFatoryOptions);
Log.d(TAG,"getBitmapImageFromYUV w:"+bmp.getWidth()+" h:"+bmp.getHeight());
return bmp;
}
guys i found the solution!
Replace the next code in function: ocrDecode(byte[] data, int width, int height) in DecodeHandler.java file
beepManager.playBeepSoundAndVibrate();
activity.displayProgressDialog();
// *************SHARNOUBY CODE
byte[] rotatedData = new byte[data.length];
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++)
rotatedData[x * height + height - y - 1] = data[x + y * width];
}
int tmp = width;
width = height;
height = tmp;
//******************************
// Launch OCR asynchronously, so we get the dialog box displayed
// immediately
new OcrRecognizeAsyncTask(activity, baseApi, rotatedData, width, height)
.execute();
...the problem was in the switch case in the function handleMessage(Message message)
the second case was never triggered which calls the rotation code
I am developing Like an autocad app like an desktop in android using OpenGL ES2.0 . I am drawn some objects in GLSurfaceview,like lines, cirles, and linear dimensioning etc. After drawn objects on GLSurfaceview. i am capture screen of the GLSurfaceview and make the PDF File Conversion. then, open the pdf file, some objects are misssing....
This is my output First-image : my Original output , Second-image : PDF File output...
My Code:
Note: In this code, when i click the button, it will take the screenshot as image and save in sdcard location. i used Boolean condition in ondraw method if condition, why because, renderer class, ondraw method is calling anytime, anyway, this code executed without boolean condition, it saved lots of images in memory card, that's why i put this boolean condition.
MainActivity Class :
protected boolean printOptionEnable = false;
saveImageButton.setOnClickListener( new OnClickListener() {
#Override
public void onClick(View v) {
Log.v("hari", "pan button clicked");
isSaveClick = true;
myRenderer.printOptionEnable = isSaveClick;
}
} );
MyRenderer Class :
int width_surface , height_surface ;
#Override
public void onSurfaceChanged(GL10 gl, int width, int height) {
Log.i("JO", "onSurfaceChanged");
// Adjust the viewport based on geometry changes,
// such as screen rotation
GLES20.glViewport(0, 0, width, height);
float ratio = (float) width / height;
width_surface = width ;
height_surface = height ;
}
//---------------------------------------------------------------------
#Override
public void onDrawFrame(GL10 gl) {
try {
if ( printOptionEnable ) {
printOptionEnable = false ;
Log.i("hari", "printOptionEnable if condition:"+printOptionEnable);
int w = width_surface ;
int h = height_surface ;
Log.i("hari", "w:"+w+"-----h:"+h);
int b[]=new int[(int) (w*h)];
int bt[]=new int[(int) (w*h)];
IntBuffer buffer=IntBuffer.wrap(b);
buffer.position(0);
GLES20.glReadPixels(0, 0, w, h,GLES20.GL_RGBA,GLES20.GL_UNSIGNED_BYTE, buffer);
for(int i=0; i<h; i++)
{
//remember, that OpenGL bitmap is incompatible with Android bitmap
//and so, some correction need.
for(int j=0; j<w; j++)
{
int pix=b[i*w+j];
int pb=(pix>>16)&0xff;
int pr=(pix<<16)&0x00ff0000;
int pix1=(pix&0xff00ff00) | pr | pb;
bt[(h-i-1)*w+j]=pix1;
}
}
Bitmap inBitmap = null ;
if ( inBitmap == null || !inBitmap.isMutable() ||
inBitmap.getWidth() != w || inBitmap.getHeight() != h) {
inBitmap = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);
}
//Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);
inBitmap.copyPixelsFromBuffer(buffer);
//return inBitmap ;
// return Bitmap.createBitmap(bt, w, h, Bitmap.Config.ARGB_8888);
inBitmap = Bitmap.createBitmap(bt, w, h, Bitmap.Config.ARGB_8888);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
inBitmap.compress(CompressFormat.JPEG, 90, bos);
byte[] bitmapdata = bos.toByteArray();
ByteArrayInputStream fis = new ByteArrayInputStream(bitmapdata);
final Calendar c=Calendar.getInstance();
long mytimestamp=c.getTimeInMillis();
String timeStamp=String.valueOf(mytimestamp);
String myfile="hari"+timeStamp+".jpeg";
dir_image=new File(Environment.getExternalStorageDirectory()+File.separator+
"printerscreenshots"+File.separator+"image");
dir_image.mkdirs();
try {
File tmpFile = new File(dir_image,myfile);
FileOutputStream fos = new FileOutputStream(tmpFile);
byte[] buf = new byte[1024];
int len;
while ((len = fis.read(buf)) > 0) {
fos.write(buf, 0, len);
}
fis.close();
fos.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
Log.v("hari", "screenshots:"+dir_image.toString());
}
} catch(Exception e) {
e.printStackTrace() ;
}
}
Please Any one help me..
Thanks Advance
Im using rendering-to-texture to render an image, Im modifying the texture and I want to save the texture as a bitmap. Currently Im using the method GLES20.glReadPixels storing the data in a ByteBuffer and creating the bitmap from that data. However as Im rendering to a texture I already have the "renderText[0]" texture attached to the FBO so I suppose its a simpler way to get that texture into a bitmap ... is it?
This is my current code :
public void saveChanges()
{
int width = currentBitmapWidth;
int height = currentBitmapHeight;
int size = width * height;
ByteBuffer buf = ByteBuffer.allocateDirect(size * 4);
buf.order(ByteOrder.nativeOrder());
GLES20.glReadPixels(0, 0, width, height, GL10.GL_RGBA, GL10.GL_UNSIGNED_BYTE, buf);
int data[] = new int[size];
buf.asIntBuffer().get(data);
buf = null;
Bitmap createdBitmap = Bitmap.createBitmap(width, height, Bitmap.Config.RGB_565);
createdBitmap.setPixels(data, size-width, -width, 0, 0, width, height);
data = null;
short sdata[] = new short[size];
ShortBuffer sbuf = ShortBuffer.wrap(sdata);
createdBitmap.copyPixelsToBuffer(sbuf);
for (int i = 0; i < size; ++i) {
//BGR-565 to RGB-565
short v = sdata[i];
sdata[i] = (short) (((v&0x1f) << 11) | (v&0x7e0) | ((v&0xf800) >> 11));
}
sbuf.rewind();
createdBitmap.copyPixelsFromBuffer(sbuf);
try {
if(true)
{
Matrix flip = new Matrix();
flip.postScale(1f, -1f);
temp = Bitmap.createBitmap(createdBitmap, 0, 0, createdBitmap.getWidth(), createdBitmap.getHeight(), null, true);
System.out.println("In save changes the temp width = "+temp.getWidth() + " height = "+temp.getHeight());
oldBitmap = createdBitmap;
oldBitmap = Bitmap.createBitmap(oldBitmap, 0, 0, oldBitmap.getWidth(), oldBitmap.getHeight(), flip, true);
//currentImage = bmp;
mOldTextureId = TextureHelper.loadTexture(context, oldBitmap);
currentTextureModified = true;
//drawOld = true;
createdBitmap.recycle();
createdBitmap = null;
}
} catch (Exception e) {
// handle
System.out.println("SAVE IMAGE ERRORRRRRR !!!!");
System.out.println("Exception description !!! "+e.getMessage());
}finally
{
saving = false;
}
}