I'm reading this awesome Beginning Android Games book, and I'm trying now to implement some tests myself.
I'm using OpenGl ES 1.0, and I'm OK now manipulating the view frustum, projections, translation, rotation, scale etc.
What I'm trying to do:
a) render a rocket to the screen, add some velocity and acceleration to it (using Euler's integration - add the acceleration to the velocity, and the velocity to the position) to simulate a path (parabola). - This is done, implemented without any issue.
b) Rotate the rocket, so that we can simulate also the inclination of the object along its path. - That's the problem.
To be clear, I'm adding the image below.
I can't figure out what's the correct angle to add to the rocket, between one frame and the next one.
I tried to get that with some geometry.
Obj Pos 1 is the rocket representation at frame 1.
Obj Pos 2 is the rocket representation, at the next frame (frame 2).
V1 is the vector that holds the center X and Y coordinates of the Obj Pos 1.
V2 i the vector that holds the center X and Y coordinates of the Obj Pos 2.
Tangent line 1 is the tangent line to the parabola, to where V1 points.
Tangent line 2 is the tangent line to the parabola, to where V2 points.
A1 is the angle between both vectors.
A2 is the angle between both tangent lines.
As far as I can see the correct angle to apply to the rocket, from frame 1 to frame 2 is angle A2. But how can I calculate it?
And, is this correct for game purposes? I mean, we don't need to be exact on the physics concept, we just need to be good enough to simulate animation and 'cheat' the user.
Followd the code below:
public class PersonalTest008Rocket extends GLGame {
#Override
public Screen getStartScreen() {
return new RocketScreen(this);
}
class RocketScreen extends Screen {
GLGraphics glGraphics;
Camera2D camera;
final float WORLD_WIDTH = 60;
final float WORLD_HEIGHT = 36;
float[] rocketRawData;
short[] rocketRawIndices;
BindableVertices rocketVertices;
DynamicGameObject rocket;
float angle;
Vector2 gravity;
public RocketScreen(Game game) {
super(game);
glGraphics = ((GLGame) game).getGLGraphics();
camera = new Camera2D(glGraphics, WORLD_WIDTH, WORLD_HEIGHT);
rocketRawData = new float[]{
// x, y, r, g, b, a
+4.0f, +0.0f, 1.0f, 0.0f, 0.0f, 1, // 0
+2.0f, +1.0f, 0.5f, 0.0f, 0.0f, 1, // 1
+2.0f, -1.0f, 0.5f, 0.0f, 0.0f, 1, // 2
-2.0f, +1.0f, 0.0f, 0.5f, 0.5f, 1, // 3
-2.0f, -1.0f, 0.0f, 0.5f, 0.5f, 1, // 4
-3.0f, +1.0f, 0.0f, 0.5f, 0.5f, 1, // 5
-3.0f, -1.0f, 0.0f, 0.5f, 0.5f, 1, // 6
-4.0f, +3.0f, 0.0f, 0.0f, 1.0f, 1, // 7
-5.0f, +0.0f, 0.0f, 0.0f, 1.0f, 1, // 8
-4.0f, -3.0f, 0.0f, 0.0f, 1.0f, 1 // 9
};
rocketRawIndices = new short[]{
0, 1, 2,
1, 4, 2,
1, 3, 4,
3, 4, 6,
3, 5, 6,
3, 7, 5,
5, 8, 6,
6, 9, 4
};
rocketVertices = new BindableVertices(glGraphics, 10, 3 * 8, true, false);
rocketVertices.setVertices(rocketRawData, 0, rocketRawData.length);
rocketVertices.setIndices(rocketRawIndices, 0, rocketRawIndices.length);
int velocity = 30;
angle = 45;
rocket = new DynamicGameObject(0, 0, 9, 6);
rocket.position.add(1, 1);
rocket.velocity.x = (float) Math.cos(Math.toRadians(angle)) * velocity;
rocket.velocity.y = (float) Math.sin(Math.toRadians(angle)) * velocity;
gravity = new Vector2(0, -10);
}
#Override
public void update(float deltaTime) {
rocket.velocity.add(gravity.x * deltaTime, gravity.y * deltaTime);
rocket.position.add(rocket.velocity.x * deltaTime, rocket.velocity.y * deltaTime);
}
#Override
public void present(float deltaTime) {
GL10 gl = glGraphics.getGL();
gl.glClearColor(0.5f, 0.5f, 0.5f, 1);
gl.glClear(GL10.GL_COLOR_BUFFER_BIT);
camera.setViewportAndMatrices();
gl.glTranslatef(rocket.position.x, rocket.position.y, 0);
gl.glRotatef(angle, 0, 0, 1);
rocketVertices.bind();
rocketVertices.draw(GL10.GL_TRIANGLES, 0, rocketRawIndices.length);
rocketVertices.unbind();
}
#Override
public void pause() {
}
#Override
public void resume() {
}
#Override
public void dispose() {
}
}
}
You can calculate the angle based on the instantaneous velocity vector:
// Get direction to point the ship
mangleInDeg = (float) (Math.atan2(mRelSpeed.y, mRelSpeed.x) * 180 / Math.PI);
mangleInDeg += 90.0; // offset the angle to coincide with angle of the base ship image
Related
I'm trying to build an Augmented Reality application in Android using BoofCV (OpenCV alternative for Java) and OpenGL ES 2.0. I have a marker which I can get the image points of and "world to cam" transformation using BoofCV's solvePnP function. I want to be able to draw the marker in 3D using OpenGL. Here's what I have so far:
On every frame of the camera, I call solvePnP
Se3_F64 worldToCam = MathUtils.worldToCam(__qrWorldPoints, imagePoints);
mGLAssetSurfaceView.setWorldToCam(worldToCam);
This is what I have defined as the world points
static float qrSideLength = 79.365f; // mm
private static final double[][] __qrWorldPoints = {
{qrSideLength * -0.5, qrSideLength * 0.5, 0},
{qrSideLength * -0.5, qrSideLength * -0.5, 0},
{qrSideLength * 0.5, qrSideLength * -0.5, 0},
{qrSideLength * 0.5, qrSideLength * 0.5, 0}
};
I'm feeding it a square that has origin at its center, with a sidelength in millimeters.
I can confirm that the rotation vector and translation vector I'm getting back from solvePnP are reasonable, so I don't know if there's a problem here.
I pass the result from solvePnP into my renderer
public void setWorldToCam(Se3_F64 worldToCam) {
DenseMatrix64F _R = worldToCam.R;
Vector3D_F64 _T = worldToCam.T;
// Concatenating the the rotation and translation vector into
// a View matrix
double[][] __view = {
{_R.get(0, 0), _R.get(0, 1), _R.get(0, 2), _T.getX()},
{_R.get(1, 0), _R.get(1, 1), _R.get(1, 2), _T.getY()},
{_R.get(2, 0), _R.get(2, 1), _R.get(2, 2), _T.getZ()},
{0, 0, 0, 1}
};
DenseMatrix64F _view = new DenseMatrix64F(__view);
// Matrix to convert from BoofCV (OpenCV) coordinate system to OpenGL coordinate system
double[][] __cv_to_gl = {
{1, 0, 0, 0},
{0, -1, 0, 0},
{0, -1, 0, 0},
{0, 0, 0, 1}
};
DenseMatrix64F _cv_to_gl = new DenseMatrix64F(__cv_to_gl);
// Multiply the View Matrix by the BoofCV to OpenGL matrix to apply the coordinate transform
DenseMatrix64F view = new SimpleMatrix(__view).mult(new SimpleMatrix(__cv_to_gl)).getMatrix();
// BoofCV stores matrices in row major order, but OpenGL likes column major order
// I transpose the view matrix and get a flattened list of 16,
// Then I convert them to floating point
double[] viewd = new SimpleMatrix(view).transpose().getMatrix().getData();
for (int i = 0; i < mViewMatrix.length; i++) {
mViewMatrix[i] = (float) viewd[i];
}
}
I'm also using the camera intrinsics I get from camera calibration to feed into the projection matrix of OpenGL
#Override
public void onSurfaceChanged(GL10 gl, int width, int height) {
// this projection matrix is applied to object coordinates
// in the onDrawFrame() method
double fx = MathUtils.fx;
double fy = MathUtils.fy;
float fovy = (float) (2 * Math.atan(0.5 * height / fy) * 180 / Math.PI);
float aspect = (float) ((width * fy) / (height * fx));
// be careful with this, it could explain why you don't see certain objects
float near = 0.1f;
float far = 100.0f;
Matrix.perspectiveM(mProjectionMatrix, 0, fovy, aspect, near, far);
GLES20.glViewport(0, 0, width, height);
}
The square I'm drawing is the one defined in this Google example.
#Override
public void onDrawFrame(GL10 gl) {
// redraw background color
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
// Set the camera position (View matrix)
// Matrix.setLookAtM(mViewMatrix, 0, 0, 0, -3, 0f, 0f, 0f, 0f, 1.0f, 0.0f);
// Combine the rotation matrix with the projection and camera view
// Note that the mMVPMatrix factor *must be the first* in order
// for matrix multiplication product to be correct
// Calculate the projection and view transformation
Matrix.multiplyMM(mMVPMatrix, 0, mProjectionMatrix, 0, mViewMatrix, 0);
// Draw shape
mSquare.draw(mMVPMatrix);
}
I believe the problem has to do with the fact that this definition of a square in Google's example code doesn't take the real world side length into account. I understand that the OpenGL coordinate system has the corners (-1, 1), (-1, -1), (-1, 1), (1, 1) which doesn't correspond to the millimeter object points I have defined for use in BoofCV, even though they are in the right order.
static float squareCoords[] = {
-0.5f, 0.5f, 0.0f, // top left
-0.5f, -0.5f, 0.0f, // bottom left
0.5f, -0.5f, 0.0f, // bottom right
0.5f, 0.5f, 0.0f }; // top right
I am not getting expected coordinate values from gluUnProject function.
I will put some code first. Here is the function which get called on touch event
public float[] getWorldSpaceFromMouseCoordinates(float mouseX, float mouseY)
{
float[] finalCoord = { 0.0f, 0.0f, 0.0f, 0.0f };
// mouse Y needs to be inverted
mouseY = (float)_viewport[3] - mouseY;
float[] mouseZ = new float[1];
FloatBuffer fb = FloatBuffer.allocate(1);
GLES20.glReadPixels((int)mouseX, (int)mouseY, 1, 1, GLES20.GL_DEPTH_COMPONENT, GLES20.GL_FLOAT, fb);
int result = GLU.gluUnProject(mouseX, mouseY, fb.get(0), mViewMatrix, 0, mProjectionMatrix, 0, _viewport, 0, finalCoord, 0);
float[] temp2 = new float[4];
Matrix.multiplyMV(temp2, 0, mViewMatrix, 0, finalCoord, 0);
if(result == GL10.GL_TRUE){
finalCoord[0] = temp2[0] / temp2[3];
finalCoord[1] = temp2[1] / temp2[3];
finalCoord[2] = temp2[2] / temp2[3];
}
Log.d("Coordinate:", "" + temp2[0] + "," + temp2[1] + "," + temp2[2]);
return finalCoord;
}
here is setting up matrices
#Override
public void onSurfaceChanged(GL10 unused, int width, int height)
{
// Adjust the viewport based on geometry changes,
// such as screen rotation
GLES20.glViewport(0, 0, width, height);
_viewport = new int[] { 0, 0, width, height };
float ratio = (float) width / height;
// this projection matrix is applied to object coordinates
// in the onDrawFrame() method
Matrix.frustumM(mProjectionMatrix, 0, -ratio, ratio, -1, 1, 2, 7);
}
setting up modelview matrix (note that model matrix is just an identity.)
// Set the camera position (View matrix)
Matrix.setLookAtM(mViewMatrix, 0, 0, 0, -3, 0f, 0f, 0f, 0f, 1.0f, 0.0f);
So as per my understanding my expectation from this function is that it will give me world coordinates w.r.t origin which is not happening. I am creating a square with following coordinates
_vertices = new float [] { -0.5f, 0.5f, 0.0f, // top left
-0.5f, -0.5f, 0.0f, // bottom left
0.5f, -0.5f, 0.0f, // bottom right
0.5f, 0.5f, 0.0f }; // top right
however I am getting X values ranging from (.3, -.3) Y values ranging in (.5,-.5) and Z always -1.0 for whole viewport. X values in (0.2,-0.2) when touching corners of square and Y values in (0.15, -0.15).
Let me know if any more code s required.
So I found out what the problem was. glReadPixels() with GL_DEPTH_COMPONENT is not supported in OpenGL ES 2.0. That is because I was always getting wrong depth value and hence wrong coordinates. Now I had two choices whether to use an FBO and store depth using a shader OR I could do Ray Picking(Since I had only one object in scene Si was hoping that gluUnProject() will do). I chose former here is my code I hope it will help somebody (Its not not generic and geometry is hard coded)
public float[] getWorldSpaceFromMouseCoordinates(float mouseX, float mouseY)
{
float[] farCoord = { 0.0f, 0.0f, 0.0f, 0.0f };
float[] nearCoord = { 0.0f, 0.0f, 0.0f, 0.0f };
// mouse Y needs to be inverted
//mouseY = (float) _viewport[3] - mouseY;
// calling glReadPixels() with GL_DEPTH_COMPONENT is not supported in
// GLES so now i will try to implement ray picking
int result = GLU.gluUnProject(mouseX, mouseY, 1.0f, mViewMatrix, 0, mProjectionMatrix, 0, _viewport, 0,
farCoord, 0);
if (result == GL10.GL_TRUE)
{
farCoord[0] = farCoord[0] / farCoord[3];
farCoord[1] = farCoord[1] / farCoord[3];
farCoord[2] = farCoord[2] / farCoord[3];
}
result = GLU.gluUnProject(mouseX, mouseY, 0.0f, mViewMatrix, 0, mProjectionMatrix, 0, _viewport, 0, nearCoord,
0);
if (result == GL10.GL_TRUE)
{
nearCoord[0] = nearCoord[0] / nearCoord[3];
nearCoord[1] = nearCoord[1] / nearCoord[3];
nearCoord[2] = nearCoord[2] / nearCoord[3];
}
float [] dirVector = Vector.normalize(Vector.minus(farCoord, nearCoord));
float [] rayOrigin = {0.0f, 0.0f, 3.0f};
Log.d("Far Coordinate:", "" + farCoord[0] + "," + farCoord[1] + "," + farCoord[2]);
Log.d("Near Coordinate:", "" + nearCoord[0] + "," + nearCoord[1] + "," + nearCoord[2]);
float [] vertices = { -0.5f, 0.5f, 0.0f, // top left
-0.5f, -0.5f, 0.0f, // bottom left
0.5f, -0.5f, 0.0f, // bottom right
0.5f, 0.5f, 0.0f }; // top right
// calculate normal for square
float[] v1 = { vertices[3] - vertices[0], vertices[4] - vertices[1], vertices[5] - vertices[2]};
float[] v2 = { vertices[9] - vertices[0], vertices[10] - vertices[1], vertices[11] - vertices[2]};
float[] n = Vector.normalize(Vector.crossProduct(v1, v2));
// now calculate intersection point as per following link
// http://antongerdelan.net/opengl/raycasting.html
// our plane passes through origin so findint 't' ll be
float t = -(Vector.dot(rayOrigin, n) / Vector.dot(dirVector, n));
// now substitute above t in ray equation gives us intersection point
float [] intersectionPoint = Vector.addition(rayOrigin, Vector.scalarProduct(t, dirVector));
Log.d("Ipoint:", "" + intersectionPoint[0] + "," + intersectionPoint[1] + "," + intersectionPoint[2]);
return intersectionPoint;
}
In open GL there is a term called picking. Which is used to determine which object on the screen was selected. Can someone explain to me what the difference between using picking and putting a touch based listener in each and every instance of a object ex. A Cube class.
Hypothetically; What i want to do is demonstrate multiple cubes on a screen randomly. I figured if I give the Cube class a listener, upon touching the cube the listener should fire off accordingly for each cube pressed.
This is the code I would add the listener to.
Would this be possible or is picking necessary?
public class Cube extends Shapes {
private FloatBuffer mVertexBuffer;
private FloatBuffer mColorBuffer;
private ByteBuffer mIndexBuffer;
private Triangle[] normTris = new Triangle[12];
private Triangle[] transTris = new Triangle[12];
// every 3 entries represent the position of one vertex
private float[] vertices =
{
-1.0f, -1.0f, -1.0f,
1.0f, -1.0f, -1.0f,
1.0f, 1.0f, -1.0f,
-1.0f, 1.0f, -1.0f,
-1.0f, -1.0f, 1.0f,
1.0f, -1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
-1.0f, 1.0f, 1.0f
};
// every 4 entries represent the color (r,g,b,a) of the corresponding vertex in vertices
private float[] colors =
{
1.0f, 0.0f, 0.0f, 1.0f,
0.0f, 1.0f, 0.0f, 1.0f,
0.0f, 0.0f, 1.0f, 1.0f,
1.0f, 0.0f, 0.0f, 1.0f,
0.0f, 1.0f, 0.0f, 1.0f,
0.0f, 0.0f, 1.0f, 1.0f,
0.0f, 0.0f, 0.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f
};
// every 3 entries make up a triangle, every 6 entries make up a side
private byte[] indices =
{
0, 4, 5, 0, 5, 1,
1, 5, 6, 1, 6, 2,
2, 6, 7, 2, 7, 3,
3, 7, 4, 3, 4, 0,
4, 7, 6, 4, 6, 5,
3, 0, 1, 3, 1, 2
};
private float[] createVertex(int Index)
{
float[] vertex = new float[3];
int properIndex = Index * 3;
vertex[0] = vertices[properIndex];
vertex[1] = vertices[properIndex + 1];
vertex[2] = vertices[properIndex + 2];
return vertex;
}
public Triangle getTriangle(int index){
Triangle tri = null;
//if(index >= 0 && index < indices.length){
float[] v1 = createVertex(indices[(index * 3) + 0]);
float[] v2 = createVertex(indices[(index * 3) + 1]);
float[] v3 = createVertex(indices[(index * 3) + 2]);
tri = new Triangle(v1, v2, v3);
// }
return tri;
}
public int getNumberOfTriangles(){
return indices.length / 3;
}
public boolean checkCollision(Ray r, OpenGLRenderer renderer){
boolean isCollide = false;
int i = 0;
while(i < getNumberOfTriangles() && !isCollide){
float[] I = new float[3];
if(Shapes.intersectRayAndTriangle(r, transTris[i], I) > 0){
isCollide = true;
}
i++;
}
return isCollide;
}
public void translate(float[] trans){
for(int i = 0; i < getNumberOfTriangles(); i++){
transTris[i].setV1(Vector.addition(transTris[i].getV1(), trans));
transTris[i].setV2(Vector.addition(transTris[i].getV2(), trans));
transTris[i].setV3(Vector.addition(transTris[i].getV3(), trans));
}
}
public void scale(float[] scale){
for(int i = 0; i < getNumberOfTriangles(); i++){
transTris[i].setV1(Vector.scalePoint(transTris[i].getV1(), scale));
transTris[i].setV2(Vector.scalePoint(transTris[i].getV2(), scale));
transTris[i].setV3(Vector.scalePoint(transTris[i].getV3(), scale));
}
}
public void resetTransfomations(){
for(int i = 0; i < getNumberOfTriangles(); i++){
transTris[i].setV1(normTris[i].getV1().clone());
transTris[i].setV2(normTris[i].getV2().clone());
transTris[i].setV3(normTris[i].getV3().clone());
}
}
public Cube()
{
Buffer[] buffers = super.getBuffers(vertices, colors, indices);
mVertexBuffer = (FloatBuffer) buffers[0];
mColorBuffer = (FloatBuffer) buffers[1];
mIndexBuffer = (ByteBuffer) buffers[2];
}
public Cube(float[] vertices, float[] colors, byte[] indices)
{
if(vertices != null) {
this.vertices = vertices;
}
if(colors != null) {
this.colors = colors;
}
if(indices != null) {
this.indices = indices;
}
Buffer[] buffers = getBuffers(this.vertices, this.colors, this.indices);
mVertexBuffer = (FloatBuffer) buffers[0];
mColorBuffer = (FloatBuffer) buffers[1];
mIndexBuffer = (ByteBuffer) buffers[2];
for(int i = 0; i < getNumberOfTriangles(); i++){
normTris[i] = getTriangle(i);
transTris[i] = getTriangle(i);
}
}
public void draw(GL10 gl)
{
gl.glFrontFace(GL10.GL_CW);
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, mVertexBuffer);
gl.glColorPointer(4, GL10.GL_FLOAT, 0, mColorBuffer);
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
gl.glEnableClientState(GL10.GL_COLOR_ARRAY);
// draw all 36 triangles
gl.glDrawElements(GL10.GL_TRIANGLES, 36, GL10.GL_UNSIGNED_BYTE, mIndexBuffer);
gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);
gl.glDisableClientState(GL10.GL_COLOR_ARRAY);
}
}
Using a Listener does not work in this case.
If you for example take a look at the onTouchListener. This is basically an interface providing just a single method onTouch(). Now when android is processing touch inputs and the target view was touched it knows that your listener can be informed about the touch by calling onTouch() of your listener.
When using OpenGL you have the problem, that noone handles the touch input inside your opengl surface. You have to do it yourself. So there is noone who will call your listener.
Why? What you render inside your gl surface is up to you. You only know what the actual geometry is and therefore you are the only one who can decide which object was selected.
You basically have two options to do the selection:
Ray Shooting - Shoot a ray through the eye of the viewer and the touched point into the scene and check which object was hit.
Color Picking - assign ids to your objects, encode id as a color, render scene with this color. finally check the color at the touch position and decode the color to get the id of the object.
For most applications I would prefer the second solution.
I have a simple 2D engine that renders 2D textured quads, and right now I can scale the quad or rotate it, but when I try to translate it I have a strange distortion (the quad is squashed in the half left part of the screen with an infinite perspective effect), here's the code :
private final float quad_vertex[] = {
-0.5f, 0.5f, 0.0f, // top left
-0.5f, -0.5f, 0.0f, // bottom left
0.5f, -0.5f, 0.0f, // bottom right
0.5f, 0.5f, 0.0f // top right
};
final float left = -width/2.0f;
final float right = width/2.0f;
final float bottom = -height/2.0f;
final float top = height/2.0f;
final float near = 0.1f;
final float far = 200.0f;
Matrix.orthoM(projection_matrix, 0, left, right, top, bottom, near, far);
Matrix.setLookAtM(view_matrix, 0, 0, 0, 1.0f, 0.0f, 0f, 0f, 0f, 1.0f, 0.0f);
...
Matrix.setIdentityM(model_matrix, 0);
Matrix.scaleM(model_matrix, 0, scale_width, scale_height, 1.0f);
Matrix.translateM(model_matrix, 0, x, 0, 0);
//Matrix.rotateM(model_matrix, 0, x, 1, 0, 0);
x = x + 1.0f;
Matrix.multiplyMM(viewprojection_matrix, 0, projection_matrix, 0, view_matrix, 0);
Matrix.multiplyMM(modelviewprojection_matrix, 0, viewprojection_matrix, 0, model_matrix, 0);
So, any idea what is the problem ? Thanks in advance :)
Sounds like a similar problem that I ran into. I was using the tutorial code at http://developer.android.com/training/graphics/opengl/index.html. Changing a line in the shader code from gl_Position = vPosition * uMVPMatrix; to gl_Position = uMVPMatrix * vPosition; fixed the problem.
Matrix multiplication is a non-commutative operation - the order of the operands is important!
I am starting up work on an Android game and am learning OpenGL ES. I have used OpenGL a bit, though it was quite some time ago by now. I have mostly used DirectX lately with C++, so I understand graphic API concepts fairly well.
When, playing with the API on my own, I was unable to get the results I anticipated, I turned to this tutorial I found online that seemed fairly comprehensive, and though I understood it easily and followed the tutorial fairly strictly, I still can't get the screen to display a simple square (currently not using anything other than a vertex array with no colour).
Below is the code for my renderer class which I have been staring at for some time and am starting to go a little crazy with my inability to find my mistake. I have done far more complicated things with graphics APIs (in both DirectX and OpenGL) so I find this kind of embarrassing and just need somebody to point out my probably glaringly obvious oversight.
Thank you in advance!
public class GameRenderer implements Renderer {
private float red, green, blue = 0.0f;
private final float vertices[] = {
0.5f, -0.5f, 0.0f, // 0, Bottom Right
0.5f, 0.5f, 0.0f, // 1, Top Right
-0.5f, 0.5f, 0.0f, // 2, Top Left
-0.5f, -0.5f, 0.0f, // 3, Bottom Left
};
private final short indices[] = {
0, 1, 2, 0, 2, 3
};
private final float colours[] = {
1.0f, 1.0f, 1.0f,
0.5f, 0.5f, 0.5f,
0.5f, 0.5f, 0.5f,
0.0f, 0.0f, 0.0f
};
FloatBuffer vFBuff;
FloatBuffer cFBuff;
ShortBuffer iSBuff;
public GameRenderer(){
super();
}
#Override
public void onDrawFrame(GL10 gl) {
gl.glClearColor(red, green, blue, 0.5f);
gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
gl.glLoadIdentity();
gl.glScalef(2.0f, 2.0f, 0.0f);
gl.glTranslatef(0.0f, 0.0f, -4f);
gl.glDrawElements(GL10.GL_TRIANGLES, indices.length, GL10.GL_UNSIGNED_SHORT, iSBuff);
}
#Override
public void onSurfaceChanged(GL10 gl, int width, int height) {
// TODO Auto-generated method stub
// set viewport
gl.glViewport(0, 0, width, height);
gl.glMatrixMode(GL10.GL_PROJECTION);
gl.glLoadIdentity();
GLU.gluPerspective(gl, 45.0f, (float)width / (float)height, 0.0f, 100.0f);
gl.glMatrixMode(GL10.GL_MODELVIEW);
}
#Override
public void onSurfaceCreated(GL10 gl, EGLConfig config) {
gl.glShadeModel(GL10.GL_SMOOTH);
gl.glClearDepthf(1.0f);
gl.glEnable(GL10.GL_DEPTH_TEST);
gl.glDepthFunc(GL10.GL_LEQUAL);
gl.glHint(GL10.GL_PERSPECTIVE_CORRECTION_HINT, GL10.GL_NICEST);
ByteBuffer vBBuff = ByteBuffer.allocateDirect(vertices.length * 4);
ByteBuffer cBBuff = ByteBuffer.allocateDirect(colours.length * 4);
ByteBuffer iBBuff = ByteBuffer.allocateDirect(indices.length * 2);
vFBuff = vBBuff.asFloatBuffer();
vFBuff.put(vertices);
vFBuff.position(0);
cFBuff = cBBuff.asFloatBuffer();
cFBuff.put(colours);
cFBuff.position(0);
iSBuff = iBBuff.asShortBuffer();
iSBuff.put(indices);
iSBuff.position(0);
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, vFBuff);
gl.glFrontFace(GL10.GL_CCW);
gl.glEnable(GL10.GL_CULL_FACE);
gl.glCullFace(GL10.GL_BACK);
}
public void setColour(float r, float g, float b) {
red = r;
blue = b;
green = g;
}
GLU.gluPerspective(gl, 45.0f, (float)width / (float)height, 0.0f, 100.0f);
Don't set your zNear to zero:
If (r = zFar / zNear) roughly log2(r) bits of depth buffer precision are lost. Because r approaches infinity as zNear approaches 0, zNear must never be set to 0.