Changing multiple textures in scene - android

according to examples from site andengine examples I've created own project but with more than one object. Let's say it is a chess board, I have 8 colors per 8 rectangles. Now I stuck because if one of rectangles with specific color will be set to false (I have variable isclicked = false/true) all rectangles with the same color will also also changed.
How should I fix this problem?
I wrote the same code like the source from link above. I store colors in single pic which is further splitting.
Here are a pics:
not clicked colors
clicked colors
in game (none was clicked)
in game (one was clicked)
Here is my code, I'll appreciate for any help.
public class Main extends SimpleBaseGameActivity {
private static final int SIZE = 50;
private static final int IMAGES_COUNT = 8;
private static int CAMERA_WIDTH = 400;
private static int CAMERA_HEIGHT = 600;
private ITextureRegion[] mColorsRegion = new ITextureRegion[8];
//
private BitmapTextureAtlas mColorsTextureAtlas;
private TiledTextureRegion mColorsTextureRegion;
private TextureRegion mBackgroundTextureRegion;
private BitmapTextureAtlas mBackgroundTextureAtlas;
private BitmapTextureAtlas mPanelTextureAtlas;
private TextureRegion mPanelTextureRegion;
#Override
public EngineOptions onCreateEngineOptions() {
final Camera camera = new Camera(0, 0, CAMERA_WIDTH, CAMERA_HEIGHT);
return new EngineOptions(true, ScreenOrientation.PORTRAIT_FIXED, new RatioResolutionPolicy(CAMERA_WIDTH, CAMERA_HEIGHT), camera);
}
#Override
protected void onCreateResources() {
BitmapTextureAtlasTextureRegionFactory.setAssetBasePath("gfx/");
// stuff with colors
this.mColorsTextureAtlas = new BitmapTextureAtlas(this.getTextureManager(), 320, 40, TextureOptions.BILINEAR);
this.mColorsTextureRegion = BitmapTextureAtlasTextureRegionFactory.createTiledFromAsset(this.mColorsTextureAtlas, this, "bar.png",
0, 0, 8, 1);
this.mColorsTextureAtlas.load();
// woohoo! stuff with background
this.mBackgroundTextureAtlas = new BitmapTextureAtlas(getTextureManager(), 400, 800, TextureOptions.BILINEAR_PREMULTIPLYALPHA);
this.mBackgroundTextureRegion = BitmapTextureAtlasTextureRegionFactory.createFromAsset(this.mBackgroundTextureAtlas, this,
"bg.jpg", 0, 0);
this.mBackgroundTextureAtlas.load();
// stuff with panel
this.mPanelTextureAtlas = new BitmapTextureAtlas(getTextureManager(), 400, 800, TextureOptions.BILINEAR_PREMULTIPLYALPHA);
this.mPanelTextureRegion = BitmapTextureAtlasTextureRegionFactory.createFromAsset(this.mPanelTextureAtlas, this, "panel.png", 0, 0);
this.mPanelTextureAtlas.load();
}
#Override
protected Scene onCreateScene() {
this.mEngine.registerUpdateHandler(new FPSLogger());
final Scene scene = new Scene();
scene.setBackground(new Background(5.F, 5.F, 5.F));
// show background
Sprite background = new Sprite(0, 0, mBackgroundTextureRegion, getVertexBufferObjectManager());
scene.attachChild(background);
// show panel
Sprite panel = new Sprite(0, 400, mPanelTextureRegion, getVertexBufferObjectManager());
scene.attachChild(panel);
// show minirectangles
// Init generating color numbers
MyColors colors = new MyColors(IMAGES_COUNT);
// Init minirectangles with randomed images
MiniRectangle[] minirectangle = new MiniRectangle[IMAGES_COUNT * IMAGES_COUNT];
for (int i = 0; i < IMAGES_COUNT; i++) {
for (int j = 0; j < IMAGES_COUNT; j++) {
final int index = i * IMAGES_COUNT + j;
minirectangle[index] = new MiniRectangle(j * SIZE + 2, i * SIZE + 2, SIZE - 4, SIZE - 4,
mColorsTextureRegion.getTextureRegion(colors.getRan(index)), getVertexBufferObjectManager()) {
#Override
public boolean onAreaTouched(TouchEvent pSceneTouchEvent, float pTouchAreaLocalX, float pTouchAreaLocalY) {
if (this.isVisible()) {
setClicked();
togglex(this.isClicked());
}
return true;
}
};
// further setting for minirectangle
minirectangle[index].setIndexX(j);
minirectangle[index].setIndexY(i);
minirectangle[index].setNumber(index);
minirectangle[index].setClicked(false);
minirectangle[index].setColorNumber(colors.getRan(index));
minirectangle[index].addColors(mColorsRegion);
// attach to scene and register touch arena
scene.attachChild(minirectangle[index]);
scene.registerTouchArea(minirectangle[index]);
}
}
return scene;
}
protected void togglex(boolean clicked) {
this.mColorsTextureAtlas.clearTextureAtlasSources();
boolean xclicked = clicked;
BitmapTextureAtlasTextureRegionFactory.createTiledFromAsset(this.mColorsTextureAtlas, this, xclicked ? "bar2.png"
: "bar.png", 0, 0, 8, 1);
}
}

You can't change the contents of the texture atlas - all of your texture regions are referencing it, so they are all changed.
Think of a texture atlas as a large array. The texture regions are like pointers to different regions in this array. So if you want to update your sprite's texture region, you should point it to another area in the texure. But instead, you are changing the contents of this large array, the texture. So all of the texture regions which reference it are changing, too.
Solution:
You should load both images to your atlas, and change the texture regions only.
Loading both images:
this.mColorsTextureAtlas = new BitmapTextureAtlas(this.getTextureManager(), 320, 80, TextureOptions.BILINEAR); //Note that I doubled the height of the texture.
this.mColorsNotClickedTextureRegion = BitmapTextureAtlasTextureRegionFactory.createTiledFromAsset(this.mColorsTextureAtlas, this, "bar.png", 0, 0, 8, 1);
this.mColorsClickedTextureRegion = BitmapTextureAtlasTextureRegionFactory.createTiledFromAsset(this.mColorsTextureAtlas, this, "bar2.png", 0, 40, 8, 1); //The position of bar2.png is not 0,0 because it'll override bar.png. If the height is 40, we position it 40 units below the position of bar.png.
this.mColorsTextureAtlas.load();
Now, when a rectangle is clicked, change it's texture region.

Related

Drawing many actors takes long time in scene2d

I have a problem with drawing many actors as it takes long time when testing with desktop project and not working on my android device.
I have a play button that when clicked should show 100 level for player to choose from.
Here is my code:
stage = new Stage(new ScalingViewport(Scaling.fill, 800, 1280));
Gdx.input.setInputProcessor(stage);
skin = new Skin(Gdx.files.internal("data/uiskin.json"));
Image play = new Image(new Texture(Gdx.files.internal("play.png")));
stage.addActor(play);
play.addListener(new ClickListener() {
#Override
public void clicked(InputEvent event, float x, float y) {
Table container = new Table();
stage.addActor(container);
container.setFillParent(true);
Table table = new Table();
Puzzle[] puzzles = new Puzzle[100];
for (int i=0; i<puzzles.length; i++) {
table.padTop(60);
table.padBottom(60);
puzzles[i] = new Puzzle(i, false);
if (i%6 == 0) table.row();
table.add(puzzles[i]).pad(5);
}
ScrollPane scroll = new ScrollPane(table, skin);
container.add(scroll).expand().fill().colspan(4);
}
});
Here is puzzle class which simply shows a rectangle with puzzle number and if it is solved its color should be blue and if not color should be white.
private class Puzzle extends Actor {
TextureRegion rect;
BitmapFont font;
float w,h;
boolean solved;
int drawNum;
public Puzzle(int number, boolean solved) {
rect = new TextureRegion(new Texture(Gdx.files.internal("rect.png")));
setSize(rect.getRegionWidth(), rect.getRegionHeight());
this.drawNum = number + 1;
this.solved = solved;
if (solved) font = HelpingMethods.createFont(38, Color.GOLD);
else font = HelpingMethods.createFont(38, Color.DARK_GRAY);
GlyphLayout layout = new GlyphLayout();
layout.setText(font, "" + this.drawNum);
w = layout.width;
h = layout.height;
}
#Override
public void draw(Batch batch, float parentAlpha) {
Color color = getColor();
if (!solved) batch.setColor(1, 1, 1, color.a * parentAlpha);
else batch.setColor(0, 0, 1, color.a * parentAlpha);
font.setColor(color.r, color.g, color.b, color.a * parentAlpha);
batch.draw(rect, getX(), getY());
font.draw(batch, "" + drawNum, getX() + getWidth()/2 - w/2,
getY() + h + getHeight()/2 - h/2);
}
}
Here is createFont() method:
public static BitmapFont createFont(int size, Color color) {
FreeTypeFontGenerator generator = new FreeTypeFontGenerator
(Gdx.files.internal("fonts/font.ttf"));
FreeTypeFontGenerator.FreeTypeFontParameter parameter =
new FreeTypeFontGenerator.FreeTypeFontParameter();
parameter.size = size;
parameter.color = color;
parameter.minFilter = Texture.TextureFilter.Linear;
parameter.magFilter = Texture.TextureFilter.Linear;
BitmapFont font = generator.generateFont(parameter);
return font;
}
Any Solutions ?
The problem is, that whenever the play is clicked, you create new 100 Puzzle objects. In Puzzle constructor you generate BitmapFont with FreeTypeFontGenerator, which is expensive operation. And you do that 100 times. Instead, you should generate your BitmapFont object once (for example, when you initialize your game), and pass a reference to it to every Puzzle object. Also reuse Talbe container and GlyphLayout objects.
In game development in general you should avoid creating new objects, when possible, and reuse them instead. And the reason is not only it can be slow, but also, as in your case, when you create new Puzzle objects instead of old ones, you create a lot of work for a garbage collector, which can cause stutters.
Don't forget to dispose the BitmapFont object, when it's not needed anymore.

trying to develop typing game using libgdx in android

I want to create multiple independent Texture objects with key words on it falling from top to bottom and a key board displaying at bottom to type the letter on the texture object to capture it and generate new objects repeatedly for given time interval I have gone through the wiki's code for help but when I'm trying to display the words on the texture objects they change the letter on every Texture object on the Batch
package com.example.jtech.bubbletypinggame;
import com.badlogic.gdx.ApplicationAdapter;
import com.badlogic.gdx.Gdx;
import com.badlogic.gdx.audio.Music;
import com.badlogic.gdx.audio.Sound;
import com.badlogic.gdx.graphics.Color;
import com.badlogic.gdx.graphics.GL20;
import com.badlogic.gdx.graphics.OrthographicCamera;
import com.badlogic.gdx.graphics.Texture;
import com.badlogic.gdx.graphics.g2d.BitmapFont;
import com.badlogic.gdx.graphics.g2d.Sprite;
import com.badlogic.gdx.graphics.g2d.SpriteBatch;
import com.badlogic.gdx.math.MathUtils;
import com.badlogic.gdx.math.Rectangle;
import com.badlogic.gdx.utils.Array;
import com.badlogic.gdx.utils.TimeUtils;
import java.util.Iterator;
public class BubbleTypingGame extends ApplicationAdapter {
private Texture [] dropImage = new Texture[5];
private Texture bucketImage;
private Texture background;
private Sound dropSound;
private Music rainMusic;
private SpriteBatch batch;
private OrthographicCamera camera;
private Rectangle bucket;
private Array<CustomRectangle> raindrops;
private Array<String> rainKeyWords;
private Sprite mySprite;
CustomRectangle raindrop;
private long lastDropTime;
String[] chars = {"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"};
int i = 0;
String captionString;
private BitmapFont font;
int j = 0;
int x = 10, y = 740;
#Override
public void create() {
font = new BitmapFont();
font.setColor(Color.RED);
//load backgroud image for the game
background = new Texture("background_nebula.jpg");
// load the images for the droplet and the bucket, 64x64 pixels each
dropImage [0] = new Texture(Gdx.files.internal("balloon_a.png"));
dropImage [1] = new Texture(Gdx.files.internal("balloon_b.png"));
dropImage [2] = new Texture(Gdx.files.internal("balloon_c.png"));
dropImage [3] = new Texture(Gdx.files.internal("balloon_d.png"));
dropImage [4] = new Texture(Gdx.files.internal("balloon_e.png"));
// 25JANwORKING//
//25JANWORKING//
bucketImage = new Texture(Gdx.files.internal("bucket.png"));
// load the drop sound effect and the rain background "music"
dropSound = Gdx.audio.newSound(Gdx.files.internal("drop.mp3"));
rainMusic = Gdx.audio.newMusic(Gdx.files.internal("rain.mp3"));
// start the playback of the background music immediately
rainMusic.setLooping(true);
rainMusic.play();
// create the camera and the SpriteBatch
camera = new OrthographicCamera();
camera.setToOrtho(false, 800, 480);
batch = new SpriteBatch();
// create a Rectangle to logically represent the bucket
bucket = new Rectangle();
bucket.x = 800 / 2 - 64 / 2; // center the bucket horizontally
bucket.y = 20; // bottom left corner of the bucket is 20 pixels above the bottom screen edge
bucket.width = 64;
bucket.height = 64;
// create the raindrops array and spawn the first raindrop
raindrops = new Array<CustomRectangle>();
rainKeyWords = new Array<String>();
spawnRaindrop();
}
private void spawnRaindrop() {
raindrop = new CustomRectangle();
raindrop.x = MathUtils.random(0, 800-100);
raindrop.y = 480;
raindrop.width = 100;
raindrop.height = 50;
raindrop.keyWord = "k";
raindrops.add(raindrop);
lastDropTime = TimeUtils.nanoTime();
if(i==chars.length){
i=0;
}
captionString = chars[i];
rainKeyWords.add(captionString);
i++;
}
private void displayKeyWord(){
if(i==chars.length){
i=0;
}
captionString = chars[i];
rainKeyWords.add(captionString);
lastDropTime = TimeUtils.nanoTime();
i++;
}
#Override
public void render() {
// clear the screen with a dark blue color. The
// arguments to glClearColor are the red, green
// blue and alpha component in the range [0,1]
// of the color to be used to clear the screen.
Gdx.gl.glClearColor(0, 0, 0.2f, 1);
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT);
// tell the camera to update its matrices.
camera.update();
// tell the SpriteBatch to render in the
// coordinate system specified by the camera.
batch.setProjectionMatrix(camera.combined);
// begin a new batch and draw the bucket and
// all drops
batch.begin();
if(j==5){
j=0;
}
batch.draw(background,0,0);
batch.draw(bucketImage, bucket.x, bucket.y);
batch.draw(dropImage[j], raindrop.x, 400);
/*for(Rectangle raindrop: raindrops) {
batch.draw(dropImage[i], raindrop.x, raindrop.y);
//font.draw(batch, captionString, raindrop.x+45, raindrop.y+30);
}*/
batch.end();
/*// process user input
if(Gdx.input.isTouched()) {
Vector3 touchPos = new Vector3();
touchPos.set(Gdx.input.getX(), Gdx.input.getY(), 0);
camera.unproject(touchPos);
bucket.x = touchPos.x - 64 / 2;
}
if(Gdx.input.isKeyPressed(Keys.LEFT)) bucket.x -= 200 * Gdx.graphics.getDeltaTime();
if(Gdx.input.isKeyPressed(Keys.RIGHT)) bucket.x += 200 * Gdx.graphics.getDeltaTime();
// make sure the bucket stays within the screen bounds
if(bucket.x < 0) bucket.x = 0;
if(bucket.x > 800 - 64) bucket.x = 800 - 64;
// check if we need to create a new raindrop*/
if(TimeUtils.nanoTime() - lastDropTime > 1000000000) spawnRaindrop();
// move the raindrops, remove any that are beneath the bottom edge of
// the screen or that hit the bucket. In the later case we play back
// a sound effect as well.
Iterator<CustomRectangle> iter = raindrops.iterator();
while(iter.hasNext()) {
Rectangle raindrop = iter.next();
raindrop.y -= 100 * Gdx.graphics.getDeltaTime();
if(raindrop.y + 64 < 0) iter.remove();
if(raindrop.overlaps(bucket)) {
dropSound.play();
iter.remove();
}
}
}
#Override
public void dispose() {
// dispose of all the native resources
dropImage[j].dispose();
bucketImage.dispose();
dropSound.dispose();
rainMusic.dispose();
batch.dispose();
}
}
Now, the code where it looks like you draw the letters are commented out, but I assume this is what you intend to use.
for(Rectangle raindrop: raindrops) {
batch.draw(dropImage[i], raindrop.x, raindrop.y);
font.draw(batch, captionString, raindrop.x+45, raindrop.y+30);
}
What this means: for each object in the raindrops array, you draw the captionString. So if you have 20 objects in raindrops and captionString = "a", then you will draw the letter "a" 20 times.
You are using a single string to represent 20 strings. A String can only hold a single value. So, the last value you give captionString, is the only value you can display using it. You don't change the value in the loop.
Every raindrop object needs to have its own string value. And this value is the one you will need to draw, not captionString.
[edit]
Looks like you already have this in place. Just use the right string.
Try:
for(Rectangle raindrop: raindrops) {
batch.draw(dropImage[i], raindrop.x, raindrop.y);
font.draw(batch, raindrop.keyWord, raindrop.x+45, raindrop.y+30);
}

Running OpenCV eye detection from within Android service

I want to run eye detection by OpenCV4Android from Android background service. I have a piece of code that runs well but as an Activity not service. I understand that the Android camera must have a preview to open. So I have created a preview (small one to make it looks hidden, since I want the processing to be in the background) and started the camera for recording. The camera starts successfully, but OpenCV doesn't detect eyes and faces. It only loads the xml classifiers. I expected the callbacks of OpenCV like onCameraViewStarted and onCameraFrame to get called when I open the camera for recording, but they didn't.
Here is the code:
public class BackgroundService extends Service implements SurfaceHolder.Callback, CameraBridgeViewBase.CvCameraViewListener2 {
private static final String TAG = "OCVSample::Activity";
private static final Scalar FACE_RECT_COLOR = new Scalar(0, 255, 0, 255);
public static final int JAVA_DETECTOR = 0;
private static final int TM_SQDIFF = 0;
private static final int TM_SQDIFF_NORMED = 1;
private static final int TM_CCOEFF = 2;
private static final int TM_CCOEFF_NORMED = 3;
private static final int TM_CCORR = 4;
private static final int TM_CCORR_NORMED = 5;
private int learn_frames = 0;
private Mat templateR;//right eye template
private Mat templateL; // left eye template
int method = 0;
private MenuItem mItemFace50;
private MenuItem mItemFace40;
private MenuItem mItemFace30;
private MenuItem mItemFace20;
private MenuItem mItemType;
private Mat mRgba;
private Mat mGray;
// matrix for zooming
private Mat mZoomWindow;
private Mat mZoomWindow2;
private File mCascadeFile;
private CascadeClassifier mJavaDetector;
private CascadeClassifier mJavaDetectorEye;
private int mDetectorType = JAVA_DETECTOR;
private String[] mDetectorName;
private float mRelativeFaceSize = 0.2f;
private int mAbsoluteFaceSize = 0;
private CameraBridgeViewBase mOpenCvCameraView;
private SeekBar mMethodSeekbar;
private TextView mValue;
double xCenter = -1;
double yCenter = -1;
MediaRecorder mediaRecorder;
// Binder given to clients
private final IBinder mBinder = new LocalBinder();
public class LocalBinder extends Binder {
BackgroundService getService() {
// Return this instance of this service so clients can call public methods
return BackgroundService.this;
}
}//end inner class that returns an instance of the service.
#Override
public IBinder onBind(Intent intent) {
return mBinder;
}//end onBind.
private WindowManager windowManager;
private SurfaceView surfaceView;
private Camera camera = null;
#Override
public void onCreate() {
// Start foreground service to avoid unexpected kill
Notification notification = new Notification.Builder(this)
.setContentTitle("Background Video Recorder")
.setContentText("")
.setSmallIcon(R.drawable.vecsat_logo)
.build();
startForeground(1234, notification);
// Create new SurfaceView, set its size to 1x1, move it to the top left corner and set this service as a callback
windowManager = (WindowManager) this.getSystemService(Context.WINDOW_SERVICE);
surfaceView = new SurfaceView(this);
WindowManager.LayoutParams layoutParams = new WindowManager.LayoutParams(
100, 100,
WindowManager.LayoutParams.TYPE_SYSTEM_OVERLAY,
WindowManager.LayoutParams.FLAG_WATCH_OUTSIDE_TOUCH,
PixelFormat.TRANSLUCENT
);
Log.i(TAG, "100 x 100 executed");
layoutParams.gravity = Gravity.LEFT | Gravity.TOP;
windowManager.addView(surfaceView, layoutParams);
surfaceView.getHolder().addCallback(this);
//constructor:
mDetectorName = new String[2];// contains 3 positions..
mDetectorName[JAVA_DETECTOR] = "Java"; //let the detector be of type java detector, specify that in the JAVA_DETECTOR index.
Log.i(TAG, "Instantiated new " + ((Object) this).getClass().getSimpleName());
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_11, this,
mLoaderCallback); //once the application is resumed reload the library.
}
// Method called right after Surface created (initializing and starting MediaRecorder)
#Override
public void surfaceCreated(SurfaceHolder surfaceHolder) {
Log.i(TAG, "surfaceCreated method");
camera = Camera.open(1);
camera.unlock();
mediaRecorder = new MediaRecorder();
mediaRecorder.setPreviewDisplay(surfaceHolder.getSurface());
mediaRecorder.setCamera(camera);
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.CAMCORDER);
mediaRecorder.setVideoSource(MediaRecorder.VideoSource.CAMERA);
mediaRecorder.setProfile(CamcorderProfile.get(CamcorderProfile.QUALITY_HIGH));
mediaRecorder.setOutputFile(
Environment.getExternalStorageDirectory()+"/"+
DateFormat.format("yyyy-MM-dd_kk-mm-ss", new Date().getTime())+
".mp4"
);
try { mediaRecorder.prepare(); } catch (Exception e) {}
mediaRecorder.start();
}
// Stop recording and remove SurfaceView
#Override
public void onDestroy() {
Log.i(TAG, "surfaceDestroyed method");
camera.lock();
camera.release();
windowManager.removeView(surfaceView);
}
#Override
public void surfaceChanged(SurfaceHolder surfaceHolder, int format, int width, int height) {}
#Override
public void surfaceDestroyed(SurfaceHolder surfaceHolder) {
}
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
//int status, status of initialization, sucess or not..
//now make a switch for the status cases: under success case do the work, load the classifiers..
switch (status) {
case LoaderCallbackInterface.SUCCESS: {
Log.i(TAG, "OpenCV loaded successfully"); // was loaded and initialized successfully..
try {
// load cascade file from application resources
InputStream is = getResources().openRawResource(
R.raw.lbpcascade_frontalface); // get the face classifier from the resource.
File cascadeDir = getDir("cascade", Context.MODE_PRIVATE);
mCascadeFile = new File(cascadeDir,
"lbpcascade_frontalface.xml"); // create a directory inside your app, and a file inside it to store the
FileOutputStream os = new FileOutputStream(mCascadeFile); // prepare an output stream that will write the classifier's code on the file in the app.
//read and write
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = is.read(buffer)) != -1) {
os.write(buffer, 0, bytesRead);
}
is.close();
os.close();
// --------------------------------- load left eye
// classificator -----------------------------------
InputStream iser = getResources().openRawResource(
R.raw.haarcascade_lefteye_2splits);
File cascadeDirER = getDir("cascadeER",
Context.MODE_PRIVATE);
File cascadeFileER = new File(cascadeDirER,
"haarcascade_eye_right.xml");
FileOutputStream oser = new FileOutputStream(cascadeFileER);
byte[] bufferER = new byte[4096];
int bytesReadER;
while ((bytesReadER = iser.read(bufferER)) != -1) {
oser.write(bufferER, 0, bytesReadER);
}
iser.close();
oser.close();
//check if you can load the classifer.
mJavaDetector = new CascadeClassifier(
mCascadeFile.getAbsolutePath());
if (mJavaDetector.empty()) {
Toast.makeText(getApplicationContext(), "face classifier error", Toast.LENGTH_LONG).show();
Log.e(TAG, "Failed to load cascade face classifier");
mJavaDetector = null;
} else
Log.i(TAG, "Loaded cascade classifier from "
+ mCascadeFile.getAbsolutePath());
mJavaDetectorEye = new CascadeClassifier(
cascadeFileER.getAbsolutePath());
if (mJavaDetectorEye.empty()) {
Toast.makeText(getApplicationContext(), "eye classifer error", Toast.LENGTH_LONG).show();
Log.e(TAG, "Failed to load cascade eye classifier");
mJavaDetectorEye = null;
} else
Log.i(TAG, "Loaded cascade classifier from "
+ mCascadeFile.getAbsolutePath());
cascadeDir.delete();
} catch (IOException e) {
e.printStackTrace();
Log.e(TAG, "Failed to load cascade. Exception thrown: " + e);
}
//Whether classifiers are opened or not, open the front camera.
// mOpenCvCameraView.setCameraIndex(1);
//mOpenCvCameraView.enableFpsMeter(); // What is this? This method enables label with fps value on the screen
// mOpenCvCameraView.enableView(); // What? This means enable connecting to the camera.
}
break;
default: {
//When the loading of the libarary is failed
super.onManagerConnected(status);
}
break;
}
}
}; // end the class.
public void onCameraViewStarted(int width, int height) {
Log.i(TAG, "onCameraViewStarted method");
//onCameraViewStarted callback will be delivered only after enableView is called and surface is available
//This method is a member of CvCameraViewListener2, and we must implement it.
mGray = new Mat(); //initialize new gray scale matrix to contain the img pixels.
mRgba = new Mat(); //initialize new rgb matrix to contain the img pixels.
}
public void onCameraViewStopped() {
Log.i(TAG, "onCameraViewStopped method");
//Release the allocated memory
//release the matrix, this releases the allocated space in memory, since mat contains a header that contains img info and a pointer that points to the matrix in the memory.
mGray.release();
mRgba.release();
mZoomWindow.release();
mZoomWindow2.release();
}
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Log.i(TAG, "onCameraFrame method");
//This method is a member of CvCameraViewListener2, and we must implement it.
// In this method we get every frame from the camera and process it in order to track the objects.
//inputFrame is the received frame from the camera.
mRgba = inputFrame.rgba(); //convert the frame to rgba scale, then assign this value to the rgba Mat img matrix.
mGray = inputFrame.gray(); //convert the frame to gray scale, then assign this value to the gray Mat img matrix.
//Shall we consider Flipping the camera img horizontally.
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows(); //get the height of the captured frame stored in mgray Mat array (rows), why gray to rgb???
if (Math.round(height * mRelativeFaceSize) > 0) { //multiply that height with 0.2... Is the result > 0?
//if yes this indicates that there is a frame that was captured (it's height is not zero), so set the face size to
// Math.round(height * mRelativeFaceSize)
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
}
if (mZoomWindow == null || mZoomWindow2 == null)
CreateAuxiliaryMats();
MatOfRect faces = new MatOfRect(); //a matrix that will contain rectangles around the face (including the faces inside the rectangles), it will be filled by detectMultiScale method.
//if mJavaDetector is not null, this contains the face classifier that we have loaded previously
if (mJavaDetector != null)
//if not null, use this classifier to detect faces.
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2,
2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize),
new Size());
//in th function detectMultiScale above,
// faces is the array that will contain the rectangles around the detected face.
// the 3rd param: specifies how much the image size is reduced at each image scale.
//4th param: Parameter specifying how many neighbors each candidate rectangle should have to retain it.
//5: :)
//6: Minimum possible object size. Objects smaller than that are ignored (if you set a very small minimum value, your app will run heavily).
//7: Maximum possible object size. Objects larger than that are ignored. Both minimum and maximum should be set carefully to avoid slow running of the app.
Rect[] facesArray = faces.toArray(); //array of faces
for (int i = 0; i < facesArray.length; i++) {
/* Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(),
FACE_RECT_COLOR, 3);*/
//Now draw rectangles around the obtained faces, and a circle at each rectangle center.
//mrgba in the line bellow means that the rectangle should be drawn on the colored img.
//facesArray[i].tl() returns a Point: Template class for 2D points specified by its coordinates x and y -> Template class
// facesArray[i].x and facesArray[i].y are the x and y coords of the top left top corner.
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
//calculate the center in x and y coords.
xCenter = (facesArray[i].x + facesArray[i].width + facesArray[i].x) / 2;
yCenter = (facesArray[i].y + facesArray[i].y + facesArray[i].height) / 2;
Point center = new Point(xCenter, yCenter); //store the center.
//Imgproc.circle(mRgba, center, 10, new Scalar(255, 0, 0, 255), 3);
Core.circle(mRgba, center, 10, new Scalar(255, 0, 0, 255), 3); //draw a red circle at the center of the face rectangle.
/*Imgproc.putText(mRgba, "[" + center.x + "," + center.y + "]",
new Point(center.x + 20, center.y + 20),
Core.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255,
255));*/
//write the coordinates of the rectangle center:
Core.putText(mRgba, "[" + center.x + "," + center.y + "]",
new Point(center.x + 20, center.y + 20) , // this is the bottom left corner of the text string
Core.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255,
255));
Rect r = facesArray[i]; //get the currect face, we want to use it to detect the eyes inside it.
// compute the eye area
//Rect (x, y, w, h)
Rect eyearea = new Rect(r.x + r.width / 8,
(int) (r.y + (r.height / 4.5)), r.width - 2 * r.width / 8,
(int) (r.height / 3.0));
// split it
Rect eyearea_right = new Rect(r.x + r.width / 16,
(int) (r.y + (r.height / 4.5)),
(r.width - 2 * r.width / 16) / 2, (int) (r.height / 3.0));
Rect eyearea_left = new Rect(r.x + r.width / 16
+ (r.width - 2 * r.width / 16) / 2,
(int) (r.y + (r.height / 4.5)),
(r.width - 2 * r.width / 16) / 2, (int) (r.height / 3.0));
// draw the area - mGray is working grayscale mat, if you want to
// see area in rgb preview, change mGray to mRgba
/*Imgproc.rectangle(mRgba, eyearea_left.tl(), eyearea_left.br(),
new Scalar(255, 0, 0, 255), 2);
Imgproc.rectangle(mRgba, eyearea_right.tl(), eyearea_right.br(),
new Scalar(255, 0, 0, 255), 2);*/
Core.rectangle(mRgba, eyearea_left.tl(), eyearea_left.br(),
new Scalar(255, 0, 0, 255), 2);
Core.rectangle(mRgba, eyearea_right.tl(), eyearea_right.br(),
new Scalar(255, 0, 0, 255), 2);
if (learn_frames < 5) {
// no learned frames -> Learn templates from at least 5 frames..
templateR = get_template(mJavaDetectorEye, eyearea_right, 24);
templateL = get_template(mJavaDetectorEye, eyearea_left, 24);
learn_frames++;
} else {
// Learning finished, use the new templates for template
// matching
match_eye(eyearea_right, templateR, method);
match_eye(eyearea_left, templateL, method);
}
// cut eye areas and put them to zoom windows
Imgproc.resize(mRgba.submat(eyearea_left), mZoomWindow2,
mZoomWindow2.size());
Imgproc.resize(mRgba.submat(eyearea_right), mZoomWindow,
mZoomWindow.size());
}
return mRgba;
}
private void setMinFaceSize(float faceSize) {
mRelativeFaceSize = faceSize;
mAbsoluteFaceSize = 0;
}
private void CreateAuxiliaryMats() {
if (mGray.empty())
return;
int rows = mGray.rows();
int cols = mGray.cols();
if (mZoomWindow == null) {
mZoomWindow = mRgba.submat(rows / 2 + rows / 10, rows, cols / 2
+ cols / 10, cols);
mZoomWindow2 = mRgba.submat(0, rows / 2 - rows / 10, cols / 2
+ cols / 10, cols);
}
}
private void match_eye(Rect area, Mat mTemplate, int type) {
Point matchLoc;
Mat mROI = mGray.submat(area);
int result_cols = mROI.cols() - mTemplate.cols() + 1;
int result_rows = mROI.rows() - mTemplate.rows() + 1;
// Check for bad template size
if (mTemplate.cols() == 0 || mTemplate.rows() == 0) {
return ;
}
Mat mResult = new Mat(result_cols, result_rows, CvType.CV_8U);
switch (type) {
case TM_SQDIFF:
Imgproc.matchTemplate(mROI, mTemplate, mResult, Imgproc.TM_SQDIFF);
break;
case TM_SQDIFF_NORMED:
Imgproc.matchTemplate(mROI, mTemplate, mResult,
Imgproc.TM_SQDIFF_NORMED);
break;
case TM_CCOEFF:
Imgproc.matchTemplate(mROI, mTemplate, mResult, Imgproc.TM_CCOEFF);
break;
case TM_CCOEFF_NORMED:
Imgproc.matchTemplate(mROI, mTemplate, mResult,
Imgproc.TM_CCOEFF_NORMED);
break;
case TM_CCORR:
Imgproc.matchTemplate(mROI, mTemplate, mResult, Imgproc.TM_CCORR);
break;
case TM_CCORR_NORMED:
Imgproc.matchTemplate(mROI, mTemplate, mResult,
Imgproc.TM_CCORR_NORMED);
break;
}
Core.MinMaxLocResult mmres = Core.minMaxLoc(mResult);
// there is difference in matching methods - best match is max/min value
if (type == TM_SQDIFF || type == TM_SQDIFF_NORMED) {
matchLoc = mmres.minLoc;
} else {
matchLoc = mmres.maxLoc;
}
Point matchLoc_tx = new Point(matchLoc.x + area.x, matchLoc.y + area.y);
Point matchLoc_ty = new Point(matchLoc.x + mTemplate.cols() + area.x,
matchLoc.y + mTemplate.rows() + area.y);
/*Imgproc.rectangle(mRgba, matchLoc_tx, matchLoc_ty, new Scalar(255, 255, 0,
255));*/
Core.rectangle(mRgba, matchLoc_tx, matchLoc_ty, new Scalar(255, 255, 0,
255));
Rect rec = new Rect(matchLoc_tx,matchLoc_ty);
}
private Mat get_template(CascadeClassifier clasificator, Rect area, int size) {
Mat template = new Mat(); //prepare a Mat which will serve as a template for eyes.
Mat mROI = mGray.submat(area); //detect only region of interest which is represented by the area. So, from the total Mat get only the submat that represent roi.
MatOfRect eyes = new MatOfRect(); //will be around eyes (including eyes), this will be filled by detectMultiScale
Point iris = new Point(); //to identify iris.
Rect eye_template = new Rect();
clasificator.detectMultiScale(mROI, eyes, 1.15, 2,
Objdetect.CASCADE_FIND_BIGGEST_OBJECT
| Objdetect.CASCADE_SCALE_IMAGE, new Size(30, 30),
new Size());
Rect[] eyesArray = eyes.toArray(); //get the detected eyes
for (int i = 0; i < eyesArray.length;) {
Rect e = eyesArray[i];
e.x = area.x + e.x; //the starting x coordinates of the rect (area) around the eye + the area
e.y = area.y + e.y;
Rect eye_only_rectangle = new Rect((int) e.tl().x,
(int) (e.tl().y + e.height * 0.4), (int) e.width,
(int) (e.height * 0.6));
mROI = mGray.submat(eye_only_rectangle);
Mat vyrez = mRgba.submat(eye_only_rectangle);
Core.MinMaxLocResult mmG = Core.minMaxLoc(mROI);
// Imgproc.circle(vyrez, mmG.minLoc, 2, new Scalar(255, 255, 255, 255), 2);
Core.circle(vyrez, mmG.minLoc, 2, new Scalar(255, 255, 255, 255), 2);
iris.x = mmG.minLoc.x + eye_only_rectangle.x;
iris.y = mmG.minLoc.y + eye_only_rectangle.y;
eye_template = new Rect((int) iris.x - size / 2, (int) iris.y
- size / 2, size, size);
/*Imgproc.rectangle(mRgba, eye_template.tl(), eye_template.br(),
new Scalar(255, 0, 0, 255), 2);*/
Core.rectangle(mRgba, eye_template.tl(), eye_template.br(),
new Scalar(255, 0, 0, 255), 2);
template = (mGray.submat(eye_template)).clone();
return template;
}
return template;
}
public void onRecreateClick(View v)
{
learn_frames = 0;
}
}
Notice that the camera opens successfully for recording, and the xml files are loaded, but nothing happens after that. I made the window size as 100 x 100 just for testing purposes, I know it should be 1 x 1.
Can anyone please tell me how to solve this problem? How can I run opencv video camera for face and eye tracking from background service?
I tried to get the opencv camera in a service as you are doing but I was unable to get neither onCameraFrame nor onCameraViewStarted callbacks, which meant that the camera was not getting initialized. After a bunch of tries:
Setting the preview to INVISIBLE/GONE -> not working
Setting the preview size to a pixel size of 1×1 or respecting
camera's aspect ratio 4x3 ->not working
Setting the preview outside the screen -> not working
I found out that opencv camera needs to be previewed with view's size, only that way I was able to get onCameraFrame callback.
Fortunately, I could place another element on top of the camera preview to hide it, and show the alarms only.
You could find a simple CameraInService example here, hope it is useful for you.

libgdx pixel issues between desktop and android projects

UPDATE Looks like this is a problem because of the static notification bar on tablet because of the lack of hardware buttons. I just didn't think about that. Anyway, in the case of the TF101 it returns a resolution of 1280x752 so about 1.702 (80 : 47) ratio. If I use a suitable unit size, like 33.5 or 11.75 vertically I get the proper scaling and this seems to fix the problem of skewed pixels.
END UPDATE
I've been setting up a game using 16x16 units for my tiled maps. I am using the resolution 1280x800 on both my desktop and android projects, I'm testing this to get a sense of how it will look on my TF101 asus tablet. I currently use a camera with units of 20x12.5 (wxh) and notice no pixel scaling on my desktop project, but when I run the game on my android I get weird scaling, and a green horizontal line. I can also move about quarter cell further along the x-axis on the tablet, shown in the screen shots. The pixels on the android project don't seem uniform at all.
I set the verticalTiles amount to 12.5f, then calculate the horizontalTiles amount as
verticalTiles = 12.5f;
...
horizontalTiles = (float) width / (float) height * verticalTiles;
camera = new OrthographicCamera(horizontalTiles, verticalTiles);
I'm aiming for devices with different aspect ratios to simply see more or less of the map, but can't seem to get working correctly. Any help would be appreciated.
Android Capture - http://imageshack.us/f/7/dsvg.png/ - notice the highlights on the roof edges, they are not uniform at all.
Desktop Capture - http://imageshack.us/f/853/5itv.png/
Current MainGame class
package com.bitknight.bqex;
/* Bunch of imports */
public class MainGame implements ApplicationListener {
private OrthographicCamera camera;
private SpriteBatch spriteBatch;
private TiledMap map;
private OrthogonalTiledMapRenderer mapRenderer;
private Texture texture;
private Texture clothArmor;
private Sprite sprite;
private BitmapFont font;
private float horizontalTiles = 0;
private float verticalTiles = 12.5f;
private int hoverTileX = 0;
private int hoverTileY = 0;
private TiledMapTileLayer layer;
private Cell cell;
private TiledMapTile canMoveToTile;
private TiledMapTile cannotMoveToTile;
private AnimatedTiledMapTile animatedStopTile;
private AnimatedTiledMapTile animatedGoTile;
private Texture spriteSheet;
private TextureRegion region;
private Player player;
float h, w;
float ppuX, ppuY;
#Override
public void create() {
// Setup the animated tiles
Array<StaticTiledMapTile> tileArray;
// Start position on the sheet
int startX = 192;
int startY = 1568;
spriteSheet = new Texture(Gdx.files.internal("data/maps/tilesheet.png"));
spriteSheet.setFilter(TextureFilter.Nearest, TextureFilter.Nearest);
// We are trying to load two strips of 4 frames, 8 total
for( int i = 0; i < 2; ++i ) {
tileArray = new Array<StaticTiledMapTile>(4);
for( int j = 0; j < 4; ++j ) {
region = new TextureRegion(spriteSheet, startX, startY, 16, 16);
tileArray.add(new StaticTiledMapTile(region));
startX += 16;
}
if( i == 0 ) {
animatedStopTile = new AnimatedTiledMapTile(1/10f, tileArray);
} else {
animatedGoTile = new AnimatedTiledMapTile(1/10f, tileArray);
}
}
// Load the map
map = new TmxMapLoader().load("data/maps/base.tmx");
// Setup the two tiles that show movable and not movable sprites
canMoveToTile = map.getTileSets().getTileSet(0).getTile(1959);
canMoveToTile.setBlendMode(BlendMode.ALPHA);
cannotMoveToTile = map.getTileSets().getTileSet(0).getTile(1958);
cannotMoveToTile.setBlendMode(BlendMode.ALPHA);
// Manually create the layer used to show the cursor sprites
layer = new TiledMapTileLayer(100, 100, 16, 16);
layer.setName("display");
cell = new Cell();
cell.setTile(canMoveToTile);
layer.setOpacity(1f);
mapRenderer = new OrthogonalTiledMapRenderer(map, 1/16f);
spriteBatch = new SpriteBatch();
font = new BitmapFont(Gdx.files.internal("data/consolas.fnt"), false);
font.setScale(0.6f);
texture = new Texture(Gdx.files.internal("data/maps/tilesheet.png"));
texture.setFilter(TextureFilter.Linear, TextureFilter.Linear);
clothArmor = new Texture(Gdx.files.internal("data/img/native/clotharmor.png"));
region = new TextureRegion(clothArmor, 32, 256, 32, 32);
sprite = new Sprite(region);
sprite.setOrigin(0.5f, 0.5f);
sprite.setPosition(0f - 0.5f, 0f);
sprite.setSize(2, 2);
// Setup player and associated animations
Array<TextureRegion> regions = new Array<TextureRegion>();
player = new Player();
}
#Override
public void dispose() {
spriteBatch.dispose();
texture.dispose();
clothArmor.dispose();
spriteSheet.dispose();
}
#Override
public void render() {
player.update(Gdx.graphics.getDeltaTime());
camera.update();
Gdx.gl.glClearColor(0, 0, 0, 1);
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT);
if( Gdx.input.isKeyPressed(Input.Keys.ESCAPE) ) {
Gdx.app.exit();
}
// Clear the last cell
layer.setCell(hoverTileX, hoverTileY, null);
// Convert screen coordinates to world coordinates
Vector3 worldCoordinates = new Vector3(Gdx.input.getX(0), Gdx.input.getY(0), 0);
camera.unproject(worldCoordinates);
hoverTileX = (int)(worldCoordinates.x);
hoverTileY = (int)(worldCoordinates.y);
TiledMapTileLayer layer = (TiledMapTileLayer)map.getLayers().get("collision");
if( Gdx.input.isTouched(0) ) {
//sprite.setPosition(hoverTileX - 0.5f, hoverTileY);
player.pos.x = hoverTileX - 0.5f;
player.pos.y = hoverTileY - 0.25f;
cell.setTile(animatedGoTile);
} else {
if (layer.getCell(hoverTileX, hoverTileY) != null) {
cell.setTile(cannotMoveToTile);
} else {
cell.setTile(canMoveToTile);
}
}
layer.setCell(hoverTileX, hoverTileY, cell);
mapRenderer.setView(camera);
mapRenderer.render();
mapRenderer.getSpriteBatch().begin();
mapRenderer.renderTileLayer(layer);
mapRenderer.getSpriteBatch().end();
spriteBatch.setProjectionMatrix(camera.combined);
spriteBatch.begin();
player.render(spriteBatch);
spriteBatch.end();
}
#Override
public void resize(int width, int height) {
horizontalTiles = (float) width / (float) height * verticalTiles;
camera = new OrthographicCamera(horizontalTiles, verticalTiles);
w = width;
h = height;
}
#Override
public void pause() {
}
#Override
public void resume() {
}
}
Looks like this is a problem because of the static notification bar on tablet because of the lack of hardware buttons. I just didn't think about that. Anyway, in the case of the TF101 it returns a resolution of 1280x752 so about 1.702 (80 : 47) ratio. If I use a suitable unit size, like 33.5 or 11.75 vertically I get the proper scaling and this seems to fix the problem of skewed pixels.
Also, while this is good for the TF101 tablet in my case it's not really a great solution. Here is a Gemserk series that talks about a nice solution.
http://blog.gemserk.com/2013/01/22/our-solution-to-handle-multiple-screen-sizes-in-android-part-one/

libgdx and motion actions

I want to create some game, but I have a problem. I want to draw two images. I used Texture and SpriteBatch to draw two images. But now I want to implement some actions. I want to create messege for user. If he click on first image he gets message: your choice is picture1. And for the other image algorithm is the same.
public void create() {
background = new Texture(Gdx.files.internal("backg.png"));
polishFlag = new Texture(Gdx.files.internal("german.png"));
englishFlag = new Texture(Gdx.files.internal("english.png"));
batch = new SpriteBatch();
}
public void render() {
batch.begin();
batch.draw(background, 0, 0, 480, 320);
batch.draw(germanFlag, 140,80, 90, 60);
batch.draw(englishFlag, 260,80, 90, 60);
batch.end();
}
How I can implement this functionality? I want to this solution work on android platform. Some ideas?
Take a look at the InputProcessor interface. (Alternatively, use an InputMultiplexer class).
Using an InputProcessor you could do something like:
public class YourGame implements InputProcessor{
com.badlogic.gdx.math.Rectangle touchBounds;
string message ;
BitmapFont font;
//lots of input processor methods
#Override
public boolean onTouchDown(x, y, int button){
tocuhBounds.x = 140;
if (rectangle.contains(x,y))
message = "your choice is picture1";
else{
touchBounds.x = 260;
if (rectangle.contains(x,y))
message = "your choice is picture2";
}
}
public void create() {
background = new Texture(Gdx.files.internal("backg.png"));
polishFlag = new Texture(Gdx.files.internal("german.png"));
englishFlag = new Texture(Gdx.files.internal("english.png"));
batch = new SpriteBatch();
touchBounds = new Rectangle();
touchBounds.width = 90;
touchBounds.height = 60;
touchBounds.y = 80;
font = new BitmapFont();
}
public void render() {
batch.begin();
batch.draw(background, 0, 0, 480, 320);
batch.draw(germanFlag, 140,80, 90, 60);
batch.draw(englishFlag, 260,80, 90, 60);
font.draw(batch, message, 10, 10);
batch.end();
}
}

Categories

Resources