Problems Using Wavefront .obj's texture coordinates in Android OpenGL ES - android

I'm writing an android app using openGL ES. I followed some online tutorials and managed to load up a textured cube using hard-coded vertices/indices/texture coordinates
As a next step I wrote a parser for wavefront .obj files. I made a mock file using the vertices etc from the tutorial, which loads fine.
However, when I use a file made using a 3d modelling package, all the textures get messed up
Below is how I'm currently getting the texture coordinates:
First I load all the texture coordinates, the vt's into a big vector
Next I find the first two texture coordinates for each f triangle (so f 1/2/3 2/5/2 3/4/1 means I take the 2nd and 5th texture coordinates. Since .obj starts counting from 1 not 0, I have to -1 from the position and then multiply the position by 2 for the x coord position and do the same but +1 for the y coord position in my vt array)
I take those texture coordinates that I just found and add them to another vector.
Once I've gone through all the vertices. I turn the vector into a FloatBuffer, passing that to glTexCoordPointer in my draw method
Here is the code for parsing the file:
private void openObjFile(String filename, Context context, GL10 gl){
Vector<String> lines = openFile(filename, context); // opens the file
Vector<String[]> tokens = new Vector<String[]>();
Vector<Float> vertices = new Vector<Float>();
Vector<Float> textureCoordinates = new Vector<Float>();
Vector<Float> vertexNormals = new Vector<Float>();
// tokenise
for(int i = 0;i<lines.size();i++){
String line = lines.get(i);
tokens.add(line.split(" "));
}
for(int j = 0;j<tokens.size();j++){
String[] linetokens = tokens.get(j);
// get rid of comments
//if(linetokens[0].equalsIgnoreCase("#")){
//tokens.remove(j);
//}
// get texture from .mtl file
if(linetokens[0].equalsIgnoreCase("mtllib")){
parseMaterials(linetokens[1],context, gl);
}
// vertices
if(linetokens[0].equalsIgnoreCase("v")){
vertices.add(Float.valueOf(linetokens[1]));
vertices.add(Float.valueOf(linetokens[2]));
vertices.add(Float.valueOf(linetokens[3]));
}
// texture coordinates
if(linetokens[0].equalsIgnoreCase("vt")){
textureCoordinates.add(Float.valueOf(linetokens[1]));
textureCoordinates.add(Float.valueOf(linetokens[2]));
}
// vertex normals
if(linetokens[0].equalsIgnoreCase("vn")){
vertexNormals.add(Float.valueOf(linetokens[1]));
vertexNormals.add(Float.valueOf(linetokens[2]));
vertexNormals.add(Float.valueOf(linetokens[3]));
}
}
// vertices
this.vertices = GraphicsUtil.getFloatBuffer(vertices);
Mesh mesh = null;
Vector<Short> indices = null;
Vector<Float> textureCoordinatesMesh = null;
Vector<Float> vertexNormalsMesh = null;
for(int j = 0;j<tokens.size();j++){
String[] linetokens = tokens.get(j);
if(linetokens[0].equalsIgnoreCase("g")){
if(mesh!=null){
mesh.setIndices(GraphicsUtil.getShortBuffer(indices));
mesh.setNumindices(indices.size());
mesh.setNormals(GraphicsUtil.getFloatBuffer(vertexNormalsMesh));
mesh.setTextureCoordinates(GraphicsUtil.getFloatBuffer(textureCoordinatesMesh));
meshes.add(mesh);
}
mesh = new Mesh();
indices = new Vector<Short>();
textureCoordinatesMesh = new Vector<Float>();
vertexNormalsMesh = new Vector<Float>();
} else if(linetokens[0].equalsIgnoreCase("usemtl")){
String material_name = linetokens[1];
for(int mn = 0;mn<materials.size();mn++){
if(materials.get(mn).getName().equalsIgnoreCase(material_name)){
mesh.setTextureID(materials.get(mn).getTextureID());
mn = materials.size();
}
}
} else if(linetokens[0].equalsIgnoreCase("f")){
for(int v = 1;v<linetokens.length;v++){
String[] vvtvn = linetokens[v].split("/");
short index = Short.parseShort(vvtvn[0]);
index -= 1;
indices.add(index);
if(v!=3){
int texturePosition = (Integer.parseInt(vvtvn[1]) - 1) * 2;
float xcoord = (textureCoordinates.get(texturePosition));
float ycoord = (textureCoordinates.get(texturePosition+1));
// normalise
if(xcoord>1 || ycoord>1){
xcoord = xcoord / Math.max(xcoord, ycoord);
ycoord = ycoord / Math.max(xcoord, ycoord);
}
textureCoordinatesMesh.add(xcoord);
textureCoordinatesMesh.add(ycoord);
}
int normalPosition = (Integer.parseInt(vvtvn[2]) - 1) *3;
vertexNormalsMesh.add(vertexNormals.get(normalPosition));
vertexNormalsMesh.add(vertexNormals.get(normalPosition)+1);
vertexNormalsMesh.add(vertexNormals.get(normalPosition)+2);
}
}
}
if(mesh!=null){
mesh.setIndices(GraphicsUtil.getShortBuffer(indices));
mesh.setNumindices(indices.size());
mesh.setNormals(GraphicsUtil.getFloatBuffer(vertexNormalsMesh));
mesh.setTextureCoordinates(GraphicsUtil.getFloatBuffer(textureCoordinatesMesh));
meshes.add(mesh);
}// Adding the final mesh
}
And here is the code for drawing:
public void draw(GL10 gl){
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
// Counter-clockwise winding.
gl.glFrontFace(GL10.GL_CCW);
gl.glEnable(GL10.GL_CULL_FACE);
gl.glCullFace(GL10.GL_BACK);
// Pass the vertex buffer in
gl.glVertexPointer(3, GL10.GL_FLOAT, 0,
vertices);
for(int i=0;i<meshes.size();i++){
meshes.get(i).draw(gl);
}
// Disable the buffers
gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);
}
public void draw(GL10 gl){
if(textureID>=0){
// Enable Textures
gl.glEnable(GL10.GL_TEXTURE_2D);
// Get specific texture.
gl.glBindTexture(GL10.GL_TEXTURE_2D, textureID);
// Use UV coordinates.
gl.glEnableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
// Pass in texture coordinates
gl.glTexCoordPointer(2, GL10.GL_FLOAT, 0, textureCoordinates);
}
// Pass in texture normals
gl.glNormalPointer(GL10.GL_FLOAT, 0, normals);
gl.glEnableClientState(GL10.GL_NORMAL_ARRAY);
gl.glDrawElements(GL10.GL_TRIANGLES, numindices,GL10.GL_UNSIGNED_SHORT, indices);
if(textureID>=0){
// Disable buffers
gl.glDisableClientState(GL10.GL_NORMAL_ARRAY);
gl.glDisableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
}
}
I'd really appreciate any help with this. It is frustrating to be not-quite able to load up the model from file and I'm really not sure what I'm doing wrong or missing here

I have to admit to being a little confused by the framing of your code. Specific things I would expect to be an issue:
you decline to copy a texture coordinate to the final mesh list for the third vertex associated with any face; this should put all of your coordinates out of sync after the first two
your texture coordinate normalisation step is unnecessary — to the extent that I'm not sure why it's in there — and probably broken (what if xcoord is larger than ycoord on the first line, then smaller on the second?)
OBJ considers (0, 0) to be the top left of a texture, OpenGL considers it to be the bottom left, so unless you've set the texture matrix stack to invert texture coordinates in code not shown, you need to invert them yourself, e.g. textureCoordinatesMesh.add(1.0 - ycoord);
Besides that, generic OBJ comments that I'm sure you're already well aware of and don't relate to the problem here are that you should expect to handle files that don't supply normals and files that don't supply either normals or texture coordinates (you currently assume both are present), and OBJ can hold faces with an arbitrary number of vertices, not just triangles. But they're always planar and convex, so you can just draw them as a fan or break them into triangles as though they were a fan.

Related

Writing Wavefront OBJ Parser For Use with OpenGL Indexed Vertex Buffer Objects

I'm trying to write a basic Wavefront OBJ loader for my Android OpenGL ES 2.0 program. For now, I'm ignoring everything in an OBJ file except for vertices, normals, and faces. Here's what I've written so far:
InputStream inputStream = context.getResources().openRawResource(resourceID);
BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream));
String line;
while((line = reader.readLine()) != null)
{
if(line.startsWith("v "))
{
Float x = Float.valueOf(line.split(" ")[1]);
Float y = Float.valueOf(line.split(" ")[2]);
Float z = Float.valueOf(line.split(" ")[3]);
verticesArrayList.add(x);
verticesArrayList.add(y);
verticesArrayList.add(z);
}
else if(line.startsWith("vn "))
{
Float x = Float.valueOf(line.split(" ")[1]);
Float y = Float.valueOf(line.split(" ")[2]);
Float z = Float.valueOf(line.split(" ")[3]);
normalsArrayList.add(x);
normalsArrayList.add(y);
normalsArrayList.add(z);
}
else if(line.startsWith("f "))
{
// Loop 3 times for the 3 vertices/textures/normals associated with each face
for(int i = 1; i <= 3; i++)
{
Short vertex = (short) (Short.valueOf(line.split(" ")[i].split("/")[0]) - 1);
indicesArrayList.add(vertex);
// Make a copy of my normals array list
if(normalsArrayList2.size() == 0)
normalsArrayList2 = new ArrayList<Float>(normalsArrayList);
// Attempt to re-arrange the normals to match the order of the vertices
int normal = Integer.valueOf(line.split(" ")[i].split("/")[2]) - 1;
normalsArrayList2.add(vertex * 3, normalsArrayList.get(normal * 3));
normalsArrayList2.add((vertex * 3) + 1, normalsArrayList.get((normal * 3) + 1));
normalsArrayList2.add((vertex * 3) + 2, normalsArrayList.get((normal * 3) + 2));
}
}
}
reader.close();
However, I'm not sure if I'm setting my normals correctly. You see what I'm doing there at the bottom of my code? I'm trying to re-arrange the normals so that they match the order of the vertices. I'm doing this because I'm using the GLES20.glDrawElements() method like so:
// Get handle to vertex shader's aPosition member, enable the handle, and prepare the vertex data
mPositionHandle = GLES20.glGetAttribLocation(mProgram, "aPosition");
GLES20.glEnableVertexAttribArray(mPositionHandle);
GLES20.glVertexAttribPointer(mPositionHandle, COORDINATES_PER_VERTEX, GLES20.GL_FLOAT, false, VERTEX_STRIDE, verticesBuffer);
// Get handle to vertex shader's aNormal member, enable the handle, and prepare the vertex data
mNormalHandle = GLES20.glGetAttribLocation(mProgram, "aNormal");
GLES20.glEnableVertexAttribArray(mNormalHandle);
GLES20.glVertexAttribPointer(mNormalHandle, COORDINATES_PER_VERTEX, GLES20.GL_FLOAT, false, VERTEX_STRIDE, normalsBuffer);
// Draw the cube
GLES20.glDrawElements(GLES20.GL_TRIANGLES, numberOfIndices, GLES20.GL_UNSIGNED_SHORT, indicesBuffer);
I got the idea for this re-arranging from this site, after countless hours of Googling this subject.
However, something isn't going right. The lighting in my shader works for all the shapes I created manually with OpenGL, just not for the models I read in using my OBJ parser, so I don't think my lighting calculations are the problem, just how the normals are being calculated in my OBJ parser. Also, I opened my test OBJ files with Maya and used the "Vertex Normal Edit Tool" to visually verify that all of the normals are pointing the right way. Are there any glaring problems you can see with any of my code?
Use ArrayList's set() method instead of add()

How to make a cylinder in renderscript

I have been trying to make a cylinder in renderscript. This is the code I've tried:
public Mesh cylinder(){
float radius=1.25f, halfLength=5;
int slices=16;
Mesh.TriangleMeshBuilder mbo= new TriangleMeshBuilder(mRSGL,3, Mesh.TriangleMeshBuilder.TEXTURE_0);
for(int i=0; i<slices; i++) {
float theta = (float) (((float)i)*2.0*Math.PI);
float nextTheta = (float) (((float)i+1)*2.0*Math.PI);
/*vertex at middle of end*/
mbo.addVertex(0.0f, halfLength, 0.0f);
/*vertices at edges of circle*/
mbo.addVertex((float)(radius*Math.cos(theta)), halfLength, (float)(radius*Math.sin(theta)));
mbo.addVertex((float)(radius*Math.cos(nextTheta)), halfLength, (float)(radius*Math.sin(nextTheta)));
/* the same vertices at the bottom of the cylinder*/
mbo.addVertex((float)(radius*Math.cos(nextTheta)), -halfLength, (float)(radius*Math.sin(nextTheta)));
mbo.addVertex((float)(radius*Math.cos(theta)), halfLength, (float)(radius*Math.sin(theta)));
mbo.addVertex(0.0f, -halfLength, 0.0f);
mbo.addTriangle(0, 1, 2);
mbo.addTriangle(3, 4, 5);
}
return mbo.create(true);
}
But this code gives me a rectangle of length 5. Any ideas where I'm going wrong?
You actually have a few problems here. First, your angles are always equal to multiples of 2pi. You need to divide by the number of sectors when you calculate your angles. Additionally in this step you have an unnecessary explicit type conversion, java will handle the conversion of integer to double for you.
Second, you are constantly adding the same two triangles to the mesh and not adding any triangles for the side of the cylinder, just the two end faces. In your loop when calling addTriangle() you should use indices, for example addTriangle(n, n+1, n+2).
Finally, you were missing a negative sign when you created your 4th vertex, so it was actually at halfLength, not -halfLength.
Try this:
public Mesh cylinder(){
float radius=1.25f, halfLength=5;
int slices=16;
Mesh.TriangleMeshBuilder mbo= new TriangleMeshBuilder(mRSGL,3, Mesh.TriangleMeshBuilder.TEXTURE_0);
/*vertex at middle of end*/
mbo.addVertex(0.0f, halfLength, 0.0f);
mbo.addVertex(0.0f, -halfLength, 0.0f);
for(int i=0; i<slices; i++) {
float theta = (float) (i*2.0*Math.PI / slices);
float nextTheta = (float) ((i+1)*2.0*Math.PI / slices);
/*vertices at edges of circle*/
mbo.addVertex((float)(radius*Math.cos(theta)), halfLength, (float)(radius*Math.sin(theta)));
mbo.addVertex((float)(radius*Math.cos(nextTheta)), halfLength, (float)(radius*Math.sin(nextTheta)));
/* the same vertices at the bottom of the cylinder*/
mbo.addVertex((float)(radius*Math.cos(nextTheta)), -halfLength, (float)(radius*Math.sin(nextTheta)));
mbo.addVertex((float)(radius*Math.cos(theta)), -halfLength, (float)(radius*Math.sin(theta)));
/*Add the faces for the ends, ordered for back face culling*/
mbo.addTriangle(4*i+3, 4*i+2, 0);
//The offsets here are to adjust for the first two indices being the center points. The sector number (i) is multiplied by 4 because the way you are building this mesh, there are 4 vertices added with each sector
mbo.addTriangle(4*i+5, 4*i+4, 1);
/*Add the faces for the side*/
mbo.addTriangle(4*i+2, 4*i+4, 4*i+5);
mbo.addTriangle(4*i+4, 4*i+2, 4*i+3);
}
return mbo.create(true);
}
I have also added a slight optimization where the vertices for the centers of the circles are created only once, thus saving memory. The order of indices here is for back face culling. Reverse it if you want front face. Should your needs require a more efficient method eventually, allocation builders allow for using trifans and tristrips, but for a mesh of this complexity the ease of triangle meshes is merited. I have run this code on my own system to verify that it works.

How can I to render a 2D graph/figure using OpenGL on Android?

I'm making a simple fractal viewing app for Android, just for fun. I'm also using it as an oppotunity to learn OpenGL since I've never worked with it before. Using the Android port of the NeHe tutorials as a starting point, my approach is to have one class (FractalModel) which does all the math to create the fractal, and FractalView which does all the rendering.
The difficulty I'm having is in getting the rendering to work. Since I'm essentially plotting a graph of points of different colors where each point should correspond to 1 pixel, I thought I'd handle this by rendering 1x1 rectangles over the entire screen, using the dimensions to calculate the offsets so that there's a 1:1 correspondence between the rectangles and the physical pixels. Since the color of each pixel will be calculated independently, I can re-use the same rendering code to render different parts of the fractal (I want to add panning and zooming later on).
Here is the view class I wrote:
public class FractalView extends GLSurfaceView implements Renderer {
private float[] mVertices;
private FloatBuffer[][] mVBuffer;
private ByteBuffer[][] mBuffer;
private int mScreenWidth;
private int mScreenHeight;
private float mXOffset;
private float mYOffset;
private int mNumPixels;
//references to current vertex coordinates
private float xTL;
private float yTL;
private float xBL;
private float yBL;
private float xBR;
private float yBR;
private float xTR;
private float yTR;
public FractalView(Context context, int w, int h){
super(context);
setEGLContextClientVersion(1);
mScreenWidth = w;
mScreenHeight = h;
mNumPixels = mScreenWidth * mScreenHeight;
mXOffset = (float)1.0/mScreenWidth;
mYOffset = (float)1.0/mScreenHeight;
mVertices = new float[12];
mVBuffer = new FloatBuffer[mScreenHeight][mScreenWidth];
mBuffer = new ByteBuffer[mScreenHeight][mScreenWidth];
}
public void onDrawFrame(GL10 gl){
int i,j;
gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
gl.glLoadIdentity();
mapVertices();
gl.glColor4f(0.0f,1.0f, 0.0f,.5f);
for(i = 0; i < mScreenHeight; i++){
for(j = 0; j < mScreenWidth; j++){
gl.glFrontFace(GL10.GL_CW);
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, mVBuffer[i][j]);
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, 0, mVertices.length / 3);
gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);
}
}
}
public void onSurfaceChanged(GL10 gl, int w, int h){
if(h == 0) { //Prevent A Divide By Zero By
h = 1; //Making Height Equal One
}
gl.glViewport(0, 0, w, h); //Reset The Current Viewport
gl.glMatrixMode(GL10.GL_PROJECTION); //Select The Projection Matrix
gl.glLoadIdentity(); //Reset The Projection Matrix
//Calculate The Aspect Ratio Of The Window
GLU.gluPerspective(gl, 45.0f, (float)w / (float)h, 0.1f, 100.0f);
gl.glMatrixMode(GL10.GL_MODELVIEW); //Select The Modelview Matrix
gl.glLoadIdentity();
}
public void onSurfaceCreated(GL10 gl, EGLConfig config){
gl.glShadeModel(GL10.GL_SMOOTH); //Enable Smooth Shading
gl.glClearColor(0.0f, 0.0f, 0.0f, 0.5f); //Black Background
gl.glClearDepthf(1.0f); //Depth Buffer Setup
gl.glEnable(GL10.GL_DEPTH_TEST); //Enables Depth Testing
gl.glDepthFunc(GL10.GL_LEQUAL); //The Type Of Depth Testing To Do
//Really Nice Perspective Calculations
gl.glHint(GL10.GL_PERSPECTIVE_CORRECTION_HINT, GL10.GL_NICEST);
}
private void mapVertices(){
int i,j;
xTL = -1;
yTL = 1;
xTR = -1 + mXOffset;
yTR = 1;
xBL = -1;
yBL = 1 - mYOffset;
xBR = -1 + mXOffset;
yBR = 1 - mYOffset;
for(i = 0; i < mScreenHeight; i++){
for (j = 0; j < mScreenWidth; j++){
//assign coords to vertex array
mVertices[0] = xBL;
mVertices[1] = yBL;
mVertices[2] = 0f;
mVertices[3] = xBR;
mVertices[4] = xBR;
mVertices[5] = 0f;
mVertices[6] = xTL;
mVertices[7] = yTL;
mVertices[8] = 0f;
mVertices[9] = xTR;
mVertices[10] = yTR;
mVertices[11] = 0f;
//add doubleBuffer
mBuffer[i][j] = ByteBuffer.allocateDirect(mVertices.length * 4);
mBuffer[i][j].order(ByteOrder.nativeOrder());
mVBuffer[i][j] = mBuffer[i][j].asFloatBuffer();
mVBuffer[i][j].put(mVertices);
mVBuffer[i][j].position(0);
//transform right
transformRight();
}
//transform down
transformDown();
//reset x
xTL = -1;
xTR = -1 + mXOffset;
xBL = -1;
xBR = -1 + mXOffset;
}
}
//transform all the coordinates 1 "pixel" to the right
private void transformRight(){
xTL = xTL + mXOffset; //TL
xBL = xBL + mXOffset; //BL
xBR = xBR + mXOffset; //BR
xTR = xTR + mXOffset; //TR;
}
//transform all of the coordinates 1 pixel down;
private void transformDown(){
yTL = yTL - mYOffset;
yBL = yBL - mYOffset;
yBR = yBR - mYOffset;
yTR = yTR - mYOffset;
}
}
Basically I'm trying to do it the same way as this (the square in lesson 2) but with far more objects. I'm assuming 1 and -1 roughly correspond to screen edges, (I know this isn't totally true, but I don't really understand how to use projection matrices and want to keep this as simple as possible unless there's a good resource out there I can learn from) but I understand that OpenGL's coordinates are separate from real screen coordinates. When I run my code I just get a black screen (it should be green) but LogCat shows the garbage collector working away so I know something is happening. I'm not sure if it's just a bug caused by my just not doing something right, or if it's just REALLY slow. In either case, what should I do differently? I feel like I may be going about this all wrong. I've looked around and most of the tutorials and examples are based on the link above.
Edit: I know I could go about this by generating a texture that fills up the entire screen and just drawing that, though the link I read which mentioned it said it would be slower since you're not supposed to redraw a texture every frame. That said, I only really need to redraw the texture when the perspective changes, so I could write my code to take this into account. The main difficulty I'm having currently is drawing the bitmap, and getting it to display correctly.
I would imagine that the blank screen is due to the fact that you are swapping buffers so many times, and also the fact that you are generating all your vertex buffers every frame. Thousands of buffer swaps AND thousands of buffer creations in a single frame would be INCREDIBLY slow.
One thing to mention is that Android devices have limited memory, so the garbage collector working away is probably an indication that your buffer creation code is eating up a lot of the available memory and the device is trying to free up some for the creation of new buffers.
I would suggest creating a texture that you fill with your pixel data each frame and then render to a single square that fills the screen. This will increase your speed by a huge amount, and also make your program more flexible.
Edit:
Look at the tutorial here : http://www.nullterminator.net/gltexture.html to get an idea on how to create textures and load them. You will basically need to fill BYTE* data with your own data.
If you are changing the data dynamically, you will need to update the texture data. Use the information here : http://www.opengl.org/wiki/Texture : in the section about Texture image modification.

Android Opengl ES tiling engine, smooth scrolling

Following this : Best approach for oldschool 2D zelda-like game
I got a simple 2D tiles generator working, im reading an int map[100][100] filled with either 1's or 0's and draw tiles according to their tile id, 0 is water, 1 grass.
Im using some basic Numpad control handler, using a camIncr (32.0f), i set the camera position according to the movement :
case KeyEvent.KEYCODE_DPAD_RIGHT:
cameraPosX = (float)(cameraPosX + camIncr);
break;
In my draw loop, im just drawing enough tiles to fit on my screen, and track the top left tile using cameraOffsetX and cameraOffsetY (its the camera position / tile size )
Im using a GLU.gluOrtho2D for my projection.
Here is the draw loop inside my custom renderer :
gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
gl.glMatrixMode( GL10.GL_PROJECTION );
gl.glLoadIdentity( );
GLU.gluOrtho2D(gl, 0, scrWidth, scrHeight, 0);
repere.draw(gl, 100.0f); // this is just a helper, draw 2 lines at the origin
//Call the drawing methods
gl.glMatrixMode(GL10.GL_MODELVIEW);
gl.glLoadIdentity();
tiledBackground.draw(gl, filtering);
my tiledBackground draw function :
int cols = (569 / 32) + 2; // how many columns can fit on the screen
int rows = (320 / 32) + 1; // haw many rows can fit on the screen
int cameraPosX = (int) Open2DRenderer.getCameraPosX();
int cameraPosY = (int) Open2DRenderer.getCameraPosY();
tileOffsetX = (int) (cameraPosX / 32);
tileOffsetY = (int) (cameraPosY / -32);
gl.glPushMatrix();
for (int y = 0; y < rows; y++) {
for (int x = 0; x < cols; x++) {
try {
tile = map[y + tileOffsetY][x + tileOffsetX];
} catch (Exception e) {
e.printStackTrace(); //when out of array
tile = 0;
}
gl.glPushMatrix();
if (tile==0){
waterTile.draw(gl, filter);
}
if (tile==4) {
grassTile.draw(gl, filter);
}
gl.glTranslatef(32.0f, 0.0f, 0.0f);
}//
gl.glPopMatrix();
gl.glTranslatef(0.0f, 32.0f, 0.0f);
}
gl.glPopMatrix();
}
the waterTile and grassTile .draw function draw a 32x32 textured tile, might post the code if relevant.
Everything is fine, i can move using numpad arrows, and my map 'moves' with me, since im only drawing what i can see, its fast (see android OpenGL ES simple Tile generator performance problem where Aleks pointed me to a simple 'culling' idea)
I would like my engine to 'smooth scroll' now. I've tried tweaking the camIncr variable, the GLU.gluOrtho2D etc, nothing worked.
Any ideas ? :)
I finally found out.
i added a glTranslatef method right before entering the loop :
gl.glPushMatrix();
gl.glTranslatef(-cameraPosX%32, -cameraPosY%32, 0);
for (int y = 0; y < rows; y++) {
...
First, i was unsuccessfully trying to translate the scene using a brute cameraPosX / TILE_HEIGHT division, didn't work.
We have to translate the offset by which the tile extends beyond the screen, not the total cameraPosX offset, so we're using the Mod (%) operator instead of division.
Sorry for my bad english ^^

android OpenGL ES simple Tile generator performance problem

following this question : Best approach for oldschool 2D zelda-like game
Thank to previous replies, and with a major inspiration from http://insanitydesign.com/wp/projects/nehe-android-ports/ , i started to build a simple Tile Generator for my simple 2D zelda-like game project.
I can now generate a map with the same textured tile, using 2 for(..) imbricated iterations to draw horizontal and vertical tiles, and got some basic DPAD key input listeners to scroll over the x and y axis.
but now im running into my first performance problems, just with one texture and one model.
When trying to build a 10x10 map, scrolling is fine and smooth.
When trying with 50x50, things get worse, and with a 100x100, its way unacceptable.
Is there a way only to tell OpenGL to render the 'visible' part of my mapset and ignore the hidden tiles? im a totally new to this.
im using
GLU.gluLookAt(gl, cameraPosX, cameraPosY, 10.0f,cameraPosX, cameraPosY, 0.0f, 0.0f, 1.0f, 0.0f);
to set the camera and point of view for a 2D-style feeling.
Any help ? :)
for (int j = 0; j < 10; j++) {
for (int i = 0; i < 10; i++) {
gl.glPushMatrix(); // Sauvegarde la matrice sur le stack
//Bind the texture according to the set texture filter
gl.glBindTexture(GL10.GL_TEXTURE_2D, textures[filter]);
//Set the face rotation
gl.glFrontFace(GL10.GL_CW);
//Enable texture state
gl.glEnableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
//Enable vertex state
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
//Point to our vertex buffer
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, vertexBuffer);
//point to our texture buff
gl.glTexCoordPointer(2, GL10.GL_FLOAT, 0, textureBuffer);
//Draw the vertices as triangle strip
gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, 0, vertices.length / 3);
//Disable the client state before leaving
gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);
gl.glDisableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
gl.glTranslatef(1.0f, 0.0f, 0.0f); // on avance d'une tile
}
// on va commencer a dessiner la 2e ligne
gl.glPopMatrix(); // Rappelle la matrice sur le stack
gl.glTranslatef(0.0f, -1.0f, 0.0f);
}
The reason why the loop gets slow is that it makes OpenGL to do lots of unnecessary work. This is because there are lots of redundant state changes.
That means that you are calling gl functions with parameters that doesn't have any effect. Calling these functions eat up a lot of CPU time and might cause the whole OpenGL pipeline to stall as it cannot work very effectively.
For example you should call glBindTexture only if you want to change the texture used. The above code binds the same texture over and over again in the inner loop which is very expensive. Similarly you don't need to enable and disable texture coordinate and vertex arrays in the inner loop. Even setting texture coordinate pointer and vertex pointer in the inner loop is unnecessary as they don't change between subsequent loops.
The bottom line is, that in the inner loop you should only change translation and call glDrawArrays. Everything else just eats up resources for nothing.
There are more advanced things you can do to speed this up even more. Tile background can be drawn so that it causes only one call to glDrawArrays (or glDrawElements). If you are interested in, you should Google topics like batching and texture atlases.
You can easily make your loop to draw only the visible aria.
Here is some example how it needs to be done. I don't know the android API so thread my example as metacode.
int cols = SCREEN_WIDTH / TILE_SIZE + 1; // how many columns can fit on the screen
int rows = SCREEN_HEIGHT / TILE_SIZE + 1; // haw many rows can fit on the screen
int firstVisibleCol = cameraPosX / TILE_SIZE; // first column we need to draw
int firstVisibleRow = cameraPosY / TILE_SIZE; // first row we need to draw
// and now the loop becomes
for (int j = firstVisibleRow; j < rows; j++) {
for (int i = firstVisibleCol ; i < cols; i++) {
...
}
}

Categories

Resources