I need to pass a image data like drawable from java side to cocos2d-x through JNI. How do i implement it?. What should be the parameter for JNI function and how to cast in cocos2d-x side?
create a Java interface forJNI like:
public static native void setBG(int[] raw, int width, int height);
in c++ code do:
//Use static variable here for simplicity
int *imagedata;
int staticwidth;
int staticheight;
Texture2D *userBackgroundImage;
void Java_com_my_company_JniHelper_setBG(JNIEnv* env, jobject thiz, jintArray raw, jint width, jint height)
{
jint *carr;
carr = env->GetIntArrayElements(raw, 0);
if(carr == NULL) {
return; /* exception occurred */
}
ssize_t dataLen = (int)width * (int)height;
int *data = new int[dataLen];
for (long i = 0; i < dataLen; i++)
{
data[i] = carr[i];
}
imagedata = data;//Make a copy because it need to be done in GLThread
staticwidth = (int)width;
staticheight = (int)height;
env->ReleaseIntArrayElements(raw, carr, 0);
LOGD("set image: %d * %d", width, height);
}
Then call the following method somewhere duration layer init or other cocos2d-x code:
void createImage(const void *data, ssize_t dataLen, int width, int height)
{
Texture2D *image = new Texture2D();
if (!image->initWithData(data, dataLen, Texture2D::PixelFormat::BGRA8888, width, height, Size(width, height)))
{
delete image;
delete imagedata;
image = NULL;
imagedata = NULL;
userBackgroundImage = NULL;
return;
}
delete imagedata;
imagedata = NULL;
userBackgroundImage = image;
}
You can then use the Texture2D object to create a sprite or do whatever you want
To call this code from java:
public static int[] BitmapToRaw(Bitmap bitmap) {
Bitmap image = bitmap.copy(Bitmap.Config.ARGB_8888, false);
int width = image.getWidth();
int height = image.getHeight();
int[] raw = new int[width * height];
image.getPixels(raw, 0, width, 0, 0, width, height);
return raw;
}
Bitmap image = BitmapFactory.decodeResource(getResources(), R.drawable.bg);
JniHelper.setBG(BitmapToRaw(image), image.getWidth(), image.getHeight());
I've only ever sent image data from cocos2d-x to Java, so you'll need to find a way to reverse this method. It's used to capture a node and pass it through for screenshots.
CCNode* node = <some node>;
const CCSize& size(node->getContentSize());
CCRenderTexture* render = CCRenderTexture::create(size.width, size.height);
// render node to the texturebuffer
render->clear(0, 0, 0, 1);
render->begin();
node->visit();
render->end();
CCImage* image = render->newCCImage();
// If we don't clear then the JNI call gets corrupted.
render->clear(0, 0, 0, 1);
// Create the array to pass in
jsize length = image->getDataLen();
jintArray imageBytes = t.env->NewIntArray(length);
unsigned char* imageData = image->getData();
t.env->SetIntArrayRegion(imageBytes, 0, length, const_cast<const jint*>(reinterpret_cast<jint*>(imageData)));
t.env->CallStaticVoidMethod(t.classID, t.methodID, imageBytes, (jint)image->getWidth(), (jint)image->getHeight());
image->release();
t.env->DeleteLocalRef(imageBytes);
t.env->DeleteLocalRef(t.classID);
The Java side looks like this:
import android.graphics.Bitmap;
import android.graphics.Bitmap.Config;
public static Bitmap getImage(int[] imageData, int width, int height) {
Bitmap image = Bitmap.createBitmap(width, height, Config.ARGB_8888);
image.setPixels(imageData, 0, width, 0, 0, width, height);
return image;
}
I think the best and easy way to do it will be saving it in Java and then accessing the file from cpp and then deleting it after use.
Related
I am capturing frames in OnPreviewFrame() and then processing them in a thread to check if they are valid or not.
public void onPreviewFrame(byte[] data, Camera camera) {
if (imageFormat == ImageFormat.NV21) {
//We only accept the NV21(YUV420) format.
frameCount++;
if (frameCount > 19 && frameCount % 2 == 0) {
Camera.Parameters parameters = camera.getParameters();
FrameModel fModel = new FrameModel(data);
fModel.setPreviewWidth(parameters.getPreviewSize().width);
fModel.setPreviewHeight(parameters.getPreviewSize().height);
fModel.setPicFormat(parameters.getPreviewFormat());
fModel.setFrameCount(frameCount);
validateFrame(fModel);
}
}
}
In validateFrame(), i submit a ValidatorThread runnable instance to a ThreadPoolExecutor with 4 core and max threads, to process the frames parallelly.
public class ValidatorThread implements Runnable {
private FrameModel frame;
public ValidatorThread(FrameModel fModel) {
frame = fModel;
}
#Override
public void run() {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}
processNV21Data();
}
private void processNV21Data() {
YuvImage yuv = new YuvImage(frame.getData(), frame.getPicFormat(),
frame.getPreviewWidth(), frame.getPreviewHeight(), null);
frame.releaseData();
ByteArrayOutputStream out = new ByteArrayOutputStream();
yuv.compressToJpeg(new Rect(0, 0, frame.getPreviewWidth(), frame.getPreviewHeight()), 100, out);
byte[] bytes = out.toByteArray();
yuv = null;
try {
if (out != null)
out.close();
out = null;
} catch (IOException e) {
e.printStackTrace();
}
Bitmap baseBitmap = BitmapFactory.decodeByteArray(bytes, 0, bytes.length);
bytes = null;
// rotate bitmap
baseBitmap = rotateImage(baseBitmap, frame.getRotation());
//create copy of original bitmap to use later
Bitmap mCheckedBitmap = baseBitmap.copy(Bitmap.Config.ARGB_8888, true);
// convert base bitmap to greyscale for validation
baseBitmap = toGrayscale(baseBitmap);
boolean isBitmapValid = Util.isBitmapValid(baseBitmap);
if (isBitmapValid) {
baseBitmap.recycle();
mCheckedBitmap.recycle();
frame = null;
} else {
baseBitmap.recycle();
mCheckedBitmap.recycle();
frame = null;
}
}
public Bitmap toGrayscale(Bitmap bmpOriginal) {
int width, height;
height = bmpOriginal.getHeight();
width = bmpOriginal.getWidth();
Bitmap bmpGrayscale = Bitmap.createBitmap(width, height, Bitmap.Config.RGB_565);
Canvas c = new Canvas(bmpGrayscale);
Paint paint = new Paint();
bmpOriginal.recycle();
return bmpGrayscale;
}
private Bitmap rotateImage(final Bitmap source, float angle) {
Matrix matrix = new Matrix();
matrix.postRotate(angle);
Bitmap rotatedBitmap = Bitmap.createBitmap(source, 0, 0, source.getWidth(), source.getHeight(), matrix, true);
source.recycle();
return rotatedBitmap;
}
}
The FrameModel class has such declaration :
public class FrameModel {
private byte[] data;
private int previewWidth;
private int previewHeight;
private int picFormat;
private int frameCount;
public void releaseData() {
data = null;
}
// getters and setters
}
I am getting OutOf Memory error while processing multiple frames.
Can anyone help what memory optimisation does the code need?
You can reduce memory usage if you produce grayscale bitmap from YUV data without going through Jpeg. This will also be significantly faster.
public Bitmap yuv2grayscale(byte[] yuv, int width, int height) {
int[] pixels = new int[width * height];
for (int i = 0; i < height*width; i++) {
int y = yuv[i] & 0xff;
pixels[i] = 0xFF000000 | y << 16 | y << 16 | y;
}
return Bitmap.createBitmap(pixels, width, height, Bitmap.Config.RGB_565);
}
Alternatively, you can create an RGB_565 bitmap without going through int[width*height] pixels array, and manipulate the bitmap pixels in place using NDK.
I have a feature request. The current flow is for the user to scan a code (not a QR code, not sure what it is, zxing will scan it), then scan the test card.
The client has asked for me allow the user to import the test from the library. So we need to be able to scan the code off an image.
Is it possible to do this in zxing or am I forced to use the camera / feature is not possible?
Thanks!
Here is my solution. I had to downsize the image, and inver the colors for it to work with zxing. I might add a convert to gray scale, but not today..
public static String scanDataMatrixImage(Bitmap bitmap) {
bitmap = doInvert(bitmap);
double scaling = getScaling(bitmap);
Bitmap resized;
if(scaling>0) {
resized = Bitmap.createScaledBitmap(bitmap, (int) (bitmap.getWidth() * scaling), (int) (bitmap.getHeight() * scaling), true);
}
else{
resized = bitmap;
}
String contents = null;
int[] intArray = new int[resized.getWidth() * resized.getHeight()];
//copy pixel data from the Bitmap into the 'intArray' array
resized.getPixels(intArray, 0, resized.getWidth(), 0, 0, resized.getWidth(), resized.getHeight());
LuminanceSource source = new RGBLuminanceSource(resized.getWidth(), resized.getHeight(), intArray);
BinaryBitmap binaryBitmap = new BinaryBitmap(new HybridBinarizer(source));
MultiFormatReader reader = new MultiFormatReader();
try
{
Result result = reader.decode(binaryBitmap);
contents = result.getText();
} catch (
Exception e
)
{
Log.e("QrTest", "Error decoding barcode", e);
}
return contents;
}
private static double getScaling(Bitmap bitmap){
int width = bitmap.getWidth();
int height = bitmap.getHeight();
int smallest = width;
if(smallest > height){
smallest = height;
}
double ratio = 200.0/smallest;
return ratio;
}
public static Bitmap doInvert(Bitmap src) {
// create new bitmap with the same settings as source bitmap
Bitmap bmOut = Bitmap.createBitmap(src.getWidth(), src.getHeight(), src.getConfig());
// color info
int A, R, G, B;
int pixelColor;
// image size
int height = src.getHeight();
int width = src.getWidth();
// scan through every pixel
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
// get one pixel
pixelColor = src.getPixel(x, y);
// saving alpha channel
A = Color.alpha(pixelColor);
// inverting byte for each R/G/B channel
R = 255 - Color.red(pixelColor);
G = 255 - Color.green(pixelColor);
B = 255 - Color.blue(pixelColor);
// set newly-inverted pixel to output image
bmOut.setPixel(x, y, Color.argb(A, R, G, B));
}
}
// return final bitmap
return bmOut;
}
I used the ocr sample in this link https://github.com/rmtheis/android-ocr
Every thing is working fine but i want it in Portrait view,I followed the steps in this link , Zxing Camera in Portrait mode on Android, to enable ocr tesstow in Portrait mode . The View is portrait now but the camera is still taking the picture in landscape mode.
Any help ?
final class PreviewCallback implements Camera.PreviewCallback {
private static final String TAG = PreviewCallback.class.getSimpleName();
private final CameraConfigurationManager configManager;
private Handler previewHandler;
private int previewMessage;
PreviewCallback(CameraConfigurationManager configManager) {
this.configManager = configManager;
}
void setHandler(Handler previewHandler, int previewMessage) {
this.previewHandler = previewHandler;
this.previewMessage = previewMessage;
}
// (NV21) format.
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
Point cameraResolution = configManager.getCameraResolution();
Handler thePreviewHandler = previewHandler;
if (cameraResolution != null && thePreviewHandler != null) {
Message message = thePreviewHandler.obtainMessage(previewMessage, cameraResolution.x,
cameraResolution.y, data);
message.sendToTarget();
previewHandler = null;
} else {
Log.d(TAG, "Got preview callback, but no handler or resolution available");
}
}
Are you using the preview data with this method:
public void onPreviewFrame(byte[] data, Camera camera) {}
If yes, then I can help you, since I am doing very similar project (that will be open sourced soon)
here is the code that I am using to rotate the preview image
public static Bitmap getBitmapImageFromYUV(byte[] data, int width,
int height, int degree, Rect rect) {
Bitmap bitmap = getBitmapImageFromYUV(data, width, height, rect);
return rotateBitmap(bitmap, degree,rect);
}
public static Bitmap rotateBitmap(Bitmap source, float angle, Rect rect) {
Matrix matrix = new Matrix();
matrix.postRotate(angle);
source = Bitmap.createBitmap(source, 0, 0, source.getWidth(),
source.getHeight(), matrix, true);
source = Bitmap.createBitmap(source, rect.left, rect.top, rect.width(), rect.height());
if(mShouldSavePreview)
saveBitmap(source);
return source;
}
public static Bitmap getBitmapImageFromYUV(byte[] data, int width,
int height, Rect rect) {
YuvImage yuvimage = new YuvImage(data, ImageFormat.NV21, width, height,
null);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
yuvimage.compressToJpeg(new Rect(0, 0, width, height), 90, baos);
byte[] jdata = baos.toByteArray();
BitmapFactory.Options bitmapFatoryOptions = new BitmapFactory.Options();
bitmapFatoryOptions.inPreferredConfig = Bitmap.Config.ARGB_8888;
Bitmap bmp = BitmapFactory.decodeByteArray(jdata, 0, jdata.length,
bitmapFatoryOptions);
Log.d(TAG,"getBitmapImageFromYUV w:"+bmp.getWidth()+" h:"+bmp.getHeight());
return bmp;
}
guys i found the solution!
Replace the next code in function: ocrDecode(byte[] data, int width, int height) in DecodeHandler.java file
beepManager.playBeepSoundAndVibrate();
activity.displayProgressDialog();
// *************SHARNOUBY CODE
byte[] rotatedData = new byte[data.length];
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++)
rotatedData[x * height + height - y - 1] = data[x + y * width];
}
int tmp = width;
width = height;
height = tmp;
//******************************
// Launch OCR asynchronously, so we get the dialog box displayed
// immediately
new OcrRecognizeAsyncTask(activity, baseApi, rotatedData, width, height)
.execute();
...the problem was in the switch case in the function handleMessage(Message message)
the second case was never triggered which calls the rotation code
I am developing Like an autocad app like an desktop in android using OpenGL ES2.0 . I am drawn some objects in GLSurfaceview,like lines, cirles, and linear dimensioning etc. After drawn objects on GLSurfaceview. i am capture screen of the GLSurfaceview and make the PDF File Conversion. then, open the pdf file, some objects are misssing....
This is my output First-image : my Original output , Second-image : PDF File output...
My Code:
Note: In this code, when i click the button, it will take the screenshot as image and save in sdcard location. i used Boolean condition in ondraw method if condition, why because, renderer class, ondraw method is calling anytime, anyway, this code executed without boolean condition, it saved lots of images in memory card, that's why i put this boolean condition.
MainActivity Class :
protected boolean printOptionEnable = false;
saveImageButton.setOnClickListener( new OnClickListener() {
#Override
public void onClick(View v) {
Log.v("hari", "pan button clicked");
isSaveClick = true;
myRenderer.printOptionEnable = isSaveClick;
}
} );
MyRenderer Class :
int width_surface , height_surface ;
#Override
public void onSurfaceChanged(GL10 gl, int width, int height) {
Log.i("JO", "onSurfaceChanged");
// Adjust the viewport based on geometry changes,
// such as screen rotation
GLES20.glViewport(0, 0, width, height);
float ratio = (float) width / height;
width_surface = width ;
height_surface = height ;
}
//---------------------------------------------------------------------
#Override
public void onDrawFrame(GL10 gl) {
try {
if ( printOptionEnable ) {
printOptionEnable = false ;
Log.i("hari", "printOptionEnable if condition:"+printOptionEnable);
int w = width_surface ;
int h = height_surface ;
Log.i("hari", "w:"+w+"-----h:"+h);
int b[]=new int[(int) (w*h)];
int bt[]=new int[(int) (w*h)];
IntBuffer buffer=IntBuffer.wrap(b);
buffer.position(0);
GLES20.glReadPixels(0, 0, w, h,GLES20.GL_RGBA,GLES20.GL_UNSIGNED_BYTE, buffer);
for(int i=0; i<h; i++)
{
//remember, that OpenGL bitmap is incompatible with Android bitmap
//and so, some correction need.
for(int j=0; j<w; j++)
{
int pix=b[i*w+j];
int pb=(pix>>16)&0xff;
int pr=(pix<<16)&0x00ff0000;
int pix1=(pix&0xff00ff00) | pr | pb;
bt[(h-i-1)*w+j]=pix1;
}
}
Bitmap inBitmap = null ;
if ( inBitmap == null || !inBitmap.isMutable() ||
inBitmap.getWidth() != w || inBitmap.getHeight() != h) {
inBitmap = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);
}
//Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);
inBitmap.copyPixelsFromBuffer(buffer);
//return inBitmap ;
// return Bitmap.createBitmap(bt, w, h, Bitmap.Config.ARGB_8888);
inBitmap = Bitmap.createBitmap(bt, w, h, Bitmap.Config.ARGB_8888);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
inBitmap.compress(CompressFormat.JPEG, 90, bos);
byte[] bitmapdata = bos.toByteArray();
ByteArrayInputStream fis = new ByteArrayInputStream(bitmapdata);
final Calendar c=Calendar.getInstance();
long mytimestamp=c.getTimeInMillis();
String timeStamp=String.valueOf(mytimestamp);
String myfile="hari"+timeStamp+".jpeg";
dir_image=new File(Environment.getExternalStorageDirectory()+File.separator+
"printerscreenshots"+File.separator+"image");
dir_image.mkdirs();
try {
File tmpFile = new File(dir_image,myfile);
FileOutputStream fos = new FileOutputStream(tmpFile);
byte[] buf = new byte[1024];
int len;
while ((len = fis.read(buf)) > 0) {
fos.write(buf, 0, len);
}
fis.close();
fos.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
Log.v("hari", "screenshots:"+dir_image.toString());
}
} catch(Exception e) {
e.printStackTrace() ;
}
}
Please Any one help me..
Thanks Advance
Im using rendering-to-texture to render an image, Im modifying the texture and I want to save the texture as a bitmap. Currently Im using the method GLES20.glReadPixels storing the data in a ByteBuffer and creating the bitmap from that data. However as Im rendering to a texture I already have the "renderText[0]" texture attached to the FBO so I suppose its a simpler way to get that texture into a bitmap ... is it?
This is my current code :
public void saveChanges()
{
int width = currentBitmapWidth;
int height = currentBitmapHeight;
int size = width * height;
ByteBuffer buf = ByteBuffer.allocateDirect(size * 4);
buf.order(ByteOrder.nativeOrder());
GLES20.glReadPixels(0, 0, width, height, GL10.GL_RGBA, GL10.GL_UNSIGNED_BYTE, buf);
int data[] = new int[size];
buf.asIntBuffer().get(data);
buf = null;
Bitmap createdBitmap = Bitmap.createBitmap(width, height, Bitmap.Config.RGB_565);
createdBitmap.setPixels(data, size-width, -width, 0, 0, width, height);
data = null;
short sdata[] = new short[size];
ShortBuffer sbuf = ShortBuffer.wrap(sdata);
createdBitmap.copyPixelsToBuffer(sbuf);
for (int i = 0; i < size; ++i) {
//BGR-565 to RGB-565
short v = sdata[i];
sdata[i] = (short) (((v&0x1f) << 11) | (v&0x7e0) | ((v&0xf800) >> 11));
}
sbuf.rewind();
createdBitmap.copyPixelsFromBuffer(sbuf);
try {
if(true)
{
Matrix flip = new Matrix();
flip.postScale(1f, -1f);
temp = Bitmap.createBitmap(createdBitmap, 0, 0, createdBitmap.getWidth(), createdBitmap.getHeight(), null, true);
System.out.println("In save changes the temp width = "+temp.getWidth() + " height = "+temp.getHeight());
oldBitmap = createdBitmap;
oldBitmap = Bitmap.createBitmap(oldBitmap, 0, 0, oldBitmap.getWidth(), oldBitmap.getHeight(), flip, true);
//currentImage = bmp;
mOldTextureId = TextureHelper.loadTexture(context, oldBitmap);
currentTextureModified = true;
//drawOld = true;
createdBitmap.recycle();
createdBitmap = null;
}
} catch (Exception e) {
// handle
System.out.println("SAVE IMAGE ERRORRRRRR !!!!");
System.out.println("Exception description !!! "+e.getMessage());
}finally
{
saving = false;
}
}