I'm rendering sprites (game cards with one image and a table) on Android for a game and pack them with a PixmapPacker afterwards. When I am running the code with the Android emulator, the sprites are drawn well on the png file of the atlas, but the background of the file is completely black with some glitchy yellow pixels on the side of the sprites. Running the same code on my phone results in an almost completely black png with all my sprites black (only my "back.png" is drawn).
Have I cleared all buffers correctly or am I doing something wrong with the order of m_fbo.begin() and m_fbo.end()? Also I am not sure if it is necessary to call spriteBatch.begin() and spriteBatch.end() everytime inside the loop.
Any help would be very much appreciated.
Sprite sprite;
BitmapFont font;
Texture texture;
OrthographicCamera cam;
FrameBuffer m_fbo = null;
SpriteBatch spriteBatch;
Pixmap pixmap;
Skin skin;
public AtlasGenerator() {
cam = new OrthographicCamera(w, h);
cam.position.set(new Vector2(w / 2, h / 2), 1);
cam.update();
PixmapPacker packer = new PixmapPacker(packer_width, packer_height, Pixmap.Format.RGB565, padding, true);
//packer.setTransparentColor(Color.BLACK); // Is this necessary?
font = new BitmapFont();
spriteBatch = new SpriteBatch();
cam.update();
spriteBatch.setProjectionMatrix(cam.combined);
Table table = new Table(skin);
m_fbo = new FrameBuffer(Pixmap.Format.RGB565, w, h, false);
m_fbo.begin();
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT);
Gdx.gl.glClearColor(1f, 1f, 1f, 1);
Gdx.gl.glEnable(GL20.GL_BLEND);
Gdx.gl.glBlendFunc(GL20.GL_SRC_ALPHA, GL20.GL_ONE_MINUS_SRC_ALPHA);
//Gdx.gl.glColorMask(false, false, false, true);
int cardsCounter = game.getCounter();
for (int cardID = 0; cardID < cardsCounter; cardID++) {
System.out.println("Generating card " + (cardID + 1) + " of " + cardsCounter);
texture = new Texture(Gdx.files.internal(game.get(cardID).getImages().get(0).getFilename()));
sprite = new Sprite(texture);
/* adding data to table here...*/
/* Rendering */
spriteBatch.begin();
Gdx.gl.glClear(GL_COLOR_BUFFER_BIT);
Gdx.gl.glClearColor(1f, 1, 1, 1f);
spriteBatch.draw(sprite, 0, h - newh, neww, newh);
table.draw(spriteBatch, 1f);
spriteBatch.end();
ByteBuffer buf;
pixmap = new Pixmap(w, h, Pixmap.Format.RGB888);
buf = pixmap.getPixels();
Gdx.gl.glReadPixels(1, 1, w, h, GL20.GL_RGB, GL20.GL_UNSIGNED_BYTE, buf);
pixmap = flipPixmap(pixmap);
// convert pixmap to RGB565
Pixmap.Format format = Pixmap.Format.RGB565; //desired format
if (pixmap.getFormat()!=format) { //perform conversion if necessary
Pixmap tmp = new Pixmap(pixmap.getWidth(), pixmap.getHeight(), format);
tmp.drawPixmap(pixmap, 0, 0); //copy pix to tmp
pixmap.dispose(); //dispose old pix
pixmap = tmp; //swap values
}
//packer.updatePageTextures();
//packer.updateTextureAtlas();
//packer.updateTextureRegions();
packer.pack(String.valueOf(cardID), pixmap);
}
m_fbo.end();
System.out.println("Finished generating cards");
System.out.println("Start generating card pack...");
PixmapPackerIO.SaveParameters saveParameters = new PixmapPackerIO.SaveParameters();
saveParameters.magFilter = Texture.TextureFilter.Nearest;
saveParameters.minFilter = Texture.TextureFilter.Nearest;
packer.pack("back", new Pixmap(Gdx.files.internal("back.png")));
PixmapPackerIO pixmapPackerIO = new PixmapPackerIO();
try {
pixmapPackerIO.save(Gdx.files.external("cards.atlas"), packer, saveParameters);
} catch (IOException e) {
e.printStackTrace();
}
System.out.println("Finished generating card pack");
packer.dispose();
dispose();
}
I fixed it by removing the OpenGL code and working with a TextureRegion:
m_fboRegion = new TextureRegion(m_fbo.getColorBufferTexture());
m_fboRegion.flip(false, true);
m_fbo.begin();
Gdx.gl.glClearColor(1f, 1f, 1f, 1);
Gdx.gl.glClear(GL_COLOR_BUFFER_BIT);
/* adding some data to sprite and table */
spriteBatch.begin();
Gdx.gl.glClear(GL_COLOR_BUFFER_BIT);
Gdx.gl.glClearColor(1f, 1, 1, 1f);
spriteBatch.draw(sprite, 0, h - newh, neww, newh);
table.draw(spriteBatch, 1f);
spriteBatch.end();
pixmap = ScreenUtils.getFrameBufferPixmap(0, 0, w, h);
pixmap = flipPixmap(pixmap);
m_fbo.end();
Related
I am working on marker AR with native android vuforia what I am trying to do is to move my video object according to camera (it should always face camera) I tried following but its not working but sometime its working but its not persistence
public void renderFrame(State state, float[] projectionMatrix) {
mSampleAppRenderer.renderVideoBackground();
GLES20.glEnable(GLES20.GL_DEPTH_TEST);
isTracking = false;
for (int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++) {
TrackableResult trackableResult = state.getTrackableResult(tIdx);
ImageTarget imageTarget = (ImageTarget) trackableResult
.getTrackable();
imageTarget.startExtendedTracking();
isTracking = true;
float[] modelViewMatrixVideo = Tool.convertPose2GLMatrix(
trackableResult.getPose()).getData();
float[] modelViewProjectionVideo = new float[16];
Matrix44F invTranspMV = SampleMath.Matrix44FTranspose(SampleMath.Matrix44FInverse(Tool.convertPose2GLMatrix(trackableResult.getPose())));
Matrix.translateM(modelViewMatrixVideo, 0, 0f, 0f, 1f);
Matrix.rotateM(modelViewMatrixVideo, 0, (float) Math.toDegrees(Math.asin(-invTranspMV.getData()[6])), 0.0f, 0.f, 1.0f);
Matrix.multiplyMM(modelViewProjectionVideo, 0,
projectionMatrix, 0, modelViewMatrixVideo, 0);
GLES20.glEnable(GLES20.GL_BLEND);
GLES20.glBlendFunc(GLES20.GL_SRC_ALPHA, GLES20.GL_ONE_MINUS_SRC_ALPHA);
GLES20.glUseProgram(videoPlaybackShaderID);
GLES20.glVertexAttribPointer(videoPlaybackVertexHandle, 3,
GLES20.GL_FLOAT, false, 0, quadVertices);
GLES20.glVertexAttribPointer(videoPlaybackTexCoordHandle,
2, GLES20.GL_FLOAT, false, 0,
fillBuffer(videoQuadTextureCoordsTransformedStones));
GLES20.glEnableVertexAttribArray(videoPlaybackVertexHandle);
GLES20.glEnableVertexAttribArray(videoPlaybackTexCoordHandle);
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES,
videoPlaybackTextureID);
GLES20.glUniformMatrix4fv(videoPlaybackMVPMatrixHandle, 1,
false, modelViewProjectionVideo, 0);
GLES20.glDrawElements(GLES20.GL_TRIANGLES, NUM_QUAD_INDEX,
GLES20.GL_UNSIGNED_SHORT, quadIndices);
GLES20.glDisableVertexAttribArray(videoPlaybackVertexHandle);
GLES20.glDisableVertexAttribArray(videoPlaybackTexCoordHandle);
GLES20.glUseProgram(0);
GLES20.glDisable(GLES20.GL_BLEND);
SampleUtils.checkGLError("VideoPlayback renderFrame");
}
GLES20.glDisable(GLES20.GL_DEPTH_TEST);
Renderer.getInstance().end();
}
I have tried above so far and its working sometime but not properly
Please help me i am trying to do this from a week.
I am trying to do is to move my video object according to camera (it should always face camera)
If you want that a object faces the camera (Billboarding), then you have to use a model matrix which is the inverse view matrix, but without the translation part.
Use Matrix44FInverse, to geht the inverse matrix of a Matrix44F:
public void renderFrame(State state, float[] projectionMatrix) {
.....
// get the view matrix and set translation part to (0, 0, 0)
float[] tempViewMat = Tool.convertPose2GLMatrix(trackableResult.getPose()).getData();
tempViewMat[12] = 0;
tempViewMat[13] = 0;
tempViewMat[14] = 0;
// create the billboard matrix
Matrix44F billboardMatrix = new Matrix44F();
billboardMatrix.setData(tempViewMat);
billboardMatrix = SampleMath.Matrix44FInverse(billboardMatrix);
// calculate the model view projection matrix
float[] viewMatrixVideo = Tool.convertPose2GLMatrix(trackableResult.getPose()).getData();
float[] modelViewVideo = new float[16];
Matrix.multiplyMM(modelViewVideo, 0, viewMatrixVideo, 0, billboardMatrix.getData(), 0);
float[] modelViewProjectionVideo = new float[16];
Matrix.multiplyMM(modelViewProjectionVideo, 0, projectionMatrix, 0, modelViewVideo, 0);
.....
}
I used your code but my AR is rotating in every direction but i want some different thing I have one video that I want to place it vertical on marker and if user move left or right only then i want to rotate it to face camera so that i will see my AR video same that is vertical from any view
What you want to do is to fix the upwards direction, but to orientate the normal vector to the line of sight.
The line of sight is the inverse Z-axis of the view space in a Right-Handed Coordinate System.
Matrix44F inverse_view = SampleMath.Matrix44FInverse(
Tool.convertPose2GLMatrix(trackableResult.getPose()));
// line of sight
Vec3F los = new Vec3F(-inverse_view[8], -inverse_view[9], -inverse_view[10] );
Eiterh the Y-Axis (0, 1, 0) or the Z-Axis (0, 1, 0) of the model has to be the Z-axis of the orientation matrix.
The X-Axis is the cross product (Vec3FCross) the line of sight and the Z-axis of the orientation matrix.
e.g.
Vec3F z_axis = new Vec3F(0, 0, 1);
Vec3F x_axis = Vec3FNormalize(Vec3FCross(los, z_axis));
Vec3F y_axis = Vec3FCross(z_axis, x_axis);
float[] orientationMatrix = new float[]{
x_axis.getData()[0], x_axis.getData()[1], x_axis.getData()[2], 0,
y_axis.getData()[0], y_axis.getData()[1], y_axis.getData()[2], 0,
z_axis.getData()[0], z_axis.getData()[1], z_axis.getData()[2], 0,
0, 0, 0, 1
};
// calculate the model view projection matrix
float[] viewMatrixVideo = Tool.convertPose2GLMatrix(trackableResult.getPose()).getData();
float[] modelViewVideo = new float[16];
Matrix.multiplyMM(modelViewVideo, 0, viewMatrixVideo, 0, orientationMatrix, 0);
float[] modelViewProjectionVideo = new float[16];
Matrix.multiplyMM(modelViewProjectionVideo, 0, projectionMatrix, 0, modelViewVideo, 0);
I am working on opencv eye detection project and i have sucessfully detect rectangular region of both eyes through the help of haar cascade for boths eyes. now i want to detect the eye balls from both eyes, the problem is that i have no haar cascade for eye ball tracking. kindly help me if anyone of you have this xml and suggest other solution.
here is my code of eye detection
private Mat get_template(CascadeClassifier clasificator, Rect area,int size)
{
Mat eye = new Mat();
Mat template = new Mat();
Mat mROI = mGray.submat(area);
MatOfRect eyes = new MatOfRect();
Point iris = new Point();
Rect eye_template = new Rect();
clasificator.detectMultiScale(mROI, eyes, 1.15, 2, Objdetect.CASCADE_FIND_BIGGEST_OBJECT|Objdetect.CASCADE_SCALE_IMAGE, new Size(30,30), new Size());
Rect[] eyesArray = eyes.toArray();
for (int i = 0; i < eyesArray.length; i++)
{
Rect e = eyesArray[i];
e.x = area.x + e.x;
e.y = area.y + e.y;
Core.rectangle(mROI, e.tl(), e.br(), new Scalar(25, 50, 0, 255));
Rect eye_only_rectangle = new Rect((int)e.tl().x, (int)( e.tl().y + e.height*0.4), (int)e.width, (int)(e.height*0.6));
//reduce ROI
mROI = mGray.submat(eye_only_rectangle);
Mat vyrez = mRgba.submat(eye_only_rectangle);
Core.MinMaxLocResult mmG = Core.minMaxLoc(mROI);
//Draw pink circle on eyeball
int radius = vyrez.height()/2;
// Core.circle(vyrez, mmG.minLoc, 2, new Scalar(0, 255, 0, 1), radius);
//Core.circle(vyrez, mmG.minLoc,2, new Scalar(255, 0, 255),1);
iris.x = mmG.minLoc.x + eye_only_rectangle.x;
iris.y = mmG.minLoc.y + eye_only_rectangle.y;
eye_template = new Rect((int)iris.x-size/2,(int)iris.y-size/2 ,size,size);
//draw red rectangle around eyeball
//Core.rectangle(mRgba,eye_template.tl(),eye_template.br(),new Scalar(255, 0, 0, 255), 2);
eye = (mRgba.submat(eye_only_rectangle));
template = (mGray.submat(eye_template)).clone();
//return template;
Mat eyeball_HSV = new Mat();
Mat dest = new Mat();
//Mat eye = new Mat();
//eye = mRgba.submat(eye_only_rectangle);
List<Mat> hsv_channel = new ArrayList<Mat>();
//convert image to HSV
Imgproc.cvtColor(eye, eyeball_HSV, Imgproc.COLOR_RGB2HSV, 0);
// get HSV channel
//hsv_channel[0] is hue
//hsv_channel[1] is saturation
//hsv_channel[2] is visibility
Core.split(eyeball_HSV, hsv_channel);
try
{
hsv_channel.get(0).setTo(new Scalar(145));
Log.v(TAG, "Got the Channel!");
}
catch(Exception ex)
{
ex.printStackTrace();
Log.v(TAG, "Didn't get any channel");
}
Core.merge(hsv_channel, eyeball_HSV);
Imgproc.cvtColor(eyeball_HSV, dest, Imgproc.COLOR_HSV2RGB);
Imgproc.cvtColor(dest, eye, Imgproc.COLOR_RGB2RGBA);
}
return eye;
}`enter code here`
If you are willing to consider other solutions then haar cascades, you can use facial landmark detection code. Facial landmark packages can give the location of the eyes in the image (usually, center of the eye and left and right borders).
Examples of landmark detection packages:
STASM:
http://www.milbo.users.sonic.net/stasm/
Flandmark detector:
http://cmp.felk.cvut.cz/~uricamic/flandmark/
This might be a simple question being addressed somewhere, but I can’t find it. I do hope someone can lead me to the right path. The design resolution for my application is 800x480. To maintain the correct aspect ratio on devices with higher resolution, I followed this post and managed to get the “black bar” (I used blue for testing) on both side on a bigger screen (nexus 7). However, it seems that the stage is not scaled to cover the screen. Please see screen shot bellow, blue colour are the Gdx.gl.glClearColor(0.5f, 1, 1, 1); black rectangular (800x480) is the actual Sprite.
Link to image
I am not sure where I went wrong. Any help is much appreciated. Code below:
private SpriteBatch batch;
private Texture splashTexture;
private Sprite splashSp;
TextureRegion splashTr;
Stage stage;
#Override
public void create() {
stage = new Stage();
batch = new SpriteBatch();
splashTexture = new Texture(Gdx.files.internal("img/splashTexture.png"));
splashTexture.setFilter(TextureFilter.Linear, TextureFilter.Linear);
TextureRegion splashTr = new TextureRegion(splashTexture, 0, 0, 800, 480);
splashSp = new Sprite(splashTr);
Gdx.app.log("myapp", "Creating game");
}
#Override
public void render() {
Gdx.gl.glClearColor(0.5f, 1, 1, 1);
Gdx.gl.glClear(GL10.GL_COLOR_BUFFER_BIT);
stage.draw();
batch.begin();
batch.draw(splashSp, 0, 0);
batch.end();
}
#Override
public void resize(int width, int height) {
Gdx.app.log("myapp", "width:" + width + " height:" + height);
Vector2 size = Scaling.fit.apply(800, 480, width, height);
int viewportX = (int)(width - size.x) / 2;
int viewportY = (int)(height - size.y) / 2;
int viewportWidth = (int)size.x;
int viewportHeight = (int)size.y;
Gdx.app.log("myapp", "viewportWidth:" + viewportWidth + " viewportHeight:" + viewportHeight);
Gdx.app.log("myapp", "viewportX:" + viewportX + " viewportY:" + viewportY);
Gdx.gl.glViewport(viewportX, viewportY, viewportWidth, viewportHeight);
stage.setViewport(800, 480, true);
Gdx.app.log("myapp", "Resizing game");
}
You need to set the camera and stage.
first declare a variable of camera like this
OrthographicCamera camera;
then in create method do this
camera = new OrthographicCamera();
camera.setToOrtho(false, 800, 480);
camera.update();
mystage = new Stage(800, 480, false);
and in render method update the camera
camera.update();
worked fine for mee..
I wanted to mask a Bitmap with a black and white alpha mask. My mask image is black and white, the BLACK area means TRANSPARENT and WHITE area means OPAQUE.
What I need is:
When I use this mask image to mask any other image, the area of resultant image should TRANSPARENT if the corresponding area of mask image is BLACK. Otherwise the area of resultant image should be OPAQUE.
I have attached the sample images. Please help me with this guys.
Sample Image:
Sample Image for Masking
What I have tried so far:
The following methods work fine. But they are very slow. I needed some solution that is efficient in terrms of speed and memory than these methods.
First Method:
int width = rgbDrawable.getWidth();
int height = rgbDrawable.getHeight();
if (width != alphaDrawable.getWidth() || height != alphaDrawable.getHeight()) {
throw new IllegalStateException("image size mismatch!");
}
Bitmap destBitmap = Bitmap.createBitmap(width, height,
Bitmap.Config.ARGB_8888);
int[] pixels = new int[width];
int[] alpha = new int[width];
for (int y = 0; y < height; y++)
{
rgbDrawable.getPixels(pixels, 0, width, 0, y, width, 1);
alphaDrawable.getPixels(alpha, 0, width, 0, y, width, 1);
for (int x = 0; x < width; x++)
{
// Replace the alpha channel with the r value from the bitmap.
pixels[x] = (pixels[x] & 0x00FFFFFF) | ((alpha[x] << 8) & 0xFF000000);
}
destBitmap.setPixels(pixels, 0, width, 0, y, width, 1);
}
alphaDrawable.recycle();
rgbDrawable.recycle();
return destBitmap;
Second Method
float[] nlf = new float[] {
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
1, 0, 0, 0, 0};
ColorMatrix sRedToAlphaMatrix = new ColorMatrix(nlf);
ColorMatrixColorFilter sRedToAlphaFilter = new ColorMatrixColorFilter(sRedToAlphaMatrix);
// Load RGB data
Bitmap rgb = rgbDrawable;
// Prepare result Bitmap
Bitmap target = Bitmap.createBitmap(rgb.getWidth(), rgb.getHeight(), Bitmap.Config.ARGB_8888);
Canvas c = new Canvas(target);
c.setDensity(Bitmap.DENSITY_NONE);
// Draw RGB data on our result bitmap
c.drawBitmap(rgb, 0, 0, null);
// At this point, we don't need rgb data any more: discard!
rgb.recycle();
rgb = null;
// Load Alpha data
Bitmap alpha = alphaDrawable;
// Draw alpha data on our result bitmap
final Paint grayToAlpha = new Paint();
grayToAlpha.setColorFilter(sRedToAlphaFilter);
grayToAlpha.setXfermode(new PorterDuffXfermode(PorterDuff.Mode.DST_IN));
c.drawBitmap(alpha, 0, 0, grayToAlpha);
// Don't need alpha data any more: discard!
alpha.recycle();
alpha = null;
return target;
This question already has answers here:
Convert a Bitmap to GrayScale in Android
(5 answers)
Closed 9 years ago.
I am trying to convert color image into grayscale using the average of red, green, blue. But it comes out with errors.
Here is my code
imgWidth = myBitmap.getWidth();
imgHeight = myBitmap.getHeight();
for(int i =0;i<imgWidth;i++) {
for(int j=0;j<imgHeight;j++) {
int s = myBitmap.getPixel(i, j)/3;
myBitmap.setPixel(i, j, s);
}
}
ImageView img = (ImageView)findViewById(R.id.image1);
img.setImageBitmap(myBitmap);
But when I run my application on Emulator, it's force close. Any idea?
I have solved my problem use the following code:
for(int x = 0; x < width; ++x) {
for(int y = 0; y < height; ++y) {
// get one pixel color
pixel = src.getPixel(x, y);
// retrieve color of all channels
A = Color.alpha(pixel);
R = Color.red(pixel);
G = Color.green(pixel);
B = Color.blue(pixel);
// take conversion up to one single value
R = G = B = (int)(0.299 * R + 0.587 * G + 0.114 * B);
// set new pixel color to output bitmap
bmOut.setPixel(x, y, Color.argb(A, R, G, B));
}
}
You can do this too :
ColorMatrix matrix = new ColorMatrix();
matrix.setSaturation(0);
imageview.setColorFilter(new ColorMatrixColorFilter(matrix));
Try the solution from this previous answer by leparlon:
public Bitmap toGrayscale(Bitmap bmpOriginal)
{
int width, height;
height = bmpOriginal.getHeight();
width = bmpOriginal.getWidth();
Bitmap bmpGrayscale = Bitmap.createBitmap(width, height, Bitmap.Config.RGB_565);
Canvas c = new Canvas(bmpGrayscale);
Paint paint = new Paint();
ColorMatrix cm = new ColorMatrix();
cm.setSaturation(0);
ColorMatrixColorFilter f = new ColorMatrixColorFilter(cm);
paint.setColorFilter(f);
c.drawBitmap(bmpOriginal, 0, 0, paint);
return bmpGrayscale;
}
Lalit has the most practical answer. However, you wanted the resulting grey to be the average of the red, green and blue and should set up your matrix like so:
float oneThird = 1/3f;
float[] mat = new float[]{
oneThird, oneThird, oneThird, 0, 0,
oneThird, oneThird, oneThird, 0, 0,
oneThird, oneThird, oneThird, 0, 0,
0, 0, 0, 1, 0,};
ColorMatrixColorFilter filter = new ColorMatrixColorFilter(mat);
paint.setColorFilter(filter);
c.drawBitmap(original, 0, 0, paint);
And finally, as I have faced the problem of converting an image to grayscale before - the most visually pleasing result in all cases is achieved by not taking the average, but through giving each colour different weight depending on its percieved brightness, I tend to use these values:
float[] mat = new float[]{
0.3f, 0.59f, 0.11f, 0, 0,
0.3f, 0.59f, 0.11f, 0, 0,
0.3f, 0.59f, 0.11f, 0, 0,
0, 0, 0, 1, 0,};