I have a little labyrinth game. It looks like this: http://i.imgur.com/VEy54Lc.png
However i can not figure how to not allow the camera to move through the boxes
here is the code:
public class EulerCameraTest extends GLGame {
//#Override
public Screen getStartScreen() {
return new EulerCameraScreen(this);
}
class EulerCameraScreen extends GLScreen {
Texture crateTexture;
Vertices3 cube;
PointLight light;
AmbientLight aLight;
EulerCamera camera;
Texture buttonTexture;
SpriteBatcher batcher;
Camera2D guiCamera;
TextureRegion buttonRegion;
Vector2 touchPos;
float lastX = -1;
float lastY = -1;
public EulerCameraScreen(Game game) {
super(game);
crateTexture = new Texture(glGame, "crate.png", true);
cube = createCube();
light = new PointLight();
light.setPosition(3, 3, -3);
aLight = new AmbientLight();
aLight.setColor(128, 128, 128, 255);
camera = new EulerCamera(67, glGraphics.getWidth() / (float)glGraphics.getHeight(), 0.1f, 100);
camera.getPosition().set(0, 0, 3);
buttonTexture = new Texture(glGame, "button.png");
batcher = new SpriteBatcher(glGraphics, 1);
guiCamera = new Camera2D(glGraphics, 480, 320);
buttonRegion = new TextureRegion(buttonTexture, 0, 0, 64, 64);
touchPos = new Vector2();
}
private Vertices3 createCube() {
float[] vertices = { -0.5f, -0.5f, 0.5f, 0, 1, 0, 0, 1,
0.5f, -0.5f, 0.5f, 1, 1, 0, 0, 1,
0.5f, 0.5f, 0.5f, 1, 0, 0, 0, 1,
-0.5f, 0.5f, 0.5f, 0, 0, 0, 0, 1,
0.5f, -0.5f, 0.5f, 0, 1, 1, 0, 0,
0.5f, -0.5f, -0.5f, 1, 1, 1, 0, 0,
0.5f, 0.5f, -0.5f, 1, 0, 1, 0, 0,
0.5f, 0.5f, 0.5f, 0, 0, 1, 0, 0,
0.5f, -0.5f, -0.5f, 0, 1, 0, 0, -1,
-0.5f, -0.5f, -0.5f, 1, 1, 0, 0, -1,
-0.5f, 0.5f, -0.5f, 1, 0, 0, 0, -1,
0.5f, 0.5f, -0.5f, 0, 0, 0, 0, -1,
-0.5f, -0.5f, -0.5f, 0, 1, -1, 0, 0,
-0.5f, -0.5f, 0.5f, 1, 1, -1, 0, 0,
-0.5f, 0.5f, 0.5f, 1, 0, -1, 0, 0,
-0.5f, 0.5f, -0.5f, 0, 0, -1, 0, 0,
-0.5f, 0.5f, 0.5f, 0, 1, 0, 1, 0,
0.5f, 0.5f, 0.5f, 1, 1, 0, 1, 0,
0.5f, 0.5f, -0.5f, 1, 0, 0, 1, 0,
-0.5f, 0.5f, -0.5f, 0, 0, 0, 1, 0,
-0.5f, -0.5f, -0.5f, 0, 1, 0, -1, 0,
0.5f, -0.5f, -0.5f, 1, 1, 0, -1, 0,
0.5f, -0.5f, 0.5f, 1, 0, 0, -1, 0,
-0.5f, -0.5f, 0.5f, 0, 0, 0, -1, 0 };
short[] indices = { 0, 1, 2, 2, 3, 0,
4, 5, 6, 6, 7, 4,
8, 9, 10, 10, 11, 8,
12, 13, 14, 14, 15, 12,
16, 17, 18, 18, 19, 16,
20, 21, 22, 22, 23, 20,
24, 25, 26, 26, 27, 24 };
Vertices3 cube = new Vertices3(glGraphics, vertices.length / 8, indices.length, false, true, true);
cube.setVertices(vertices, 0, vertices.length);
cube.setIndices(indices, 0, indices.length);
return cube;
}
//#Override
public void resume() {
crateTexture.reload();
}
//#Override
public void update(float deltaTime) {
game.getInput().getTouchEvents();
float x = game.getInput().getTouchX(0);
float y = game.getInput().getTouchY(0);
guiCamera.touchToWorld(touchPos.set(x, y));
if(game.getInput().isTouchDown(0)) {
if(touchPos.x < 64 && touchPos.y < 64) {
Vector3 direction = camera.getDirection();
direction.y = 0;
camera.getPosition().add(direction.mul(deltaTime));
} else {
if(lastX == -1) {
lastX = x;
lastY = y;
} else {
camera.rotate((x - lastX) / 10, (y - lastY) / 10);
lastX = x;
lastY = y;
}
}
} else {
lastX = -1;
lastY = -1;
}
}
//#Override
public void present(float deltaTime) {
GL10 gl = glGraphics.getGL();
gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
gl.glViewport(0, 0, glGraphics.getWidth(), glGraphics.getHeight());
camera.setMatrices(gl);
gl.glEnable(GL10.GL_DEPTH_TEST);
gl.glEnable(GL10.GL_TEXTURE_2D);
gl.glEnable(GL10.GL_LIGHTING);
crateTexture.bind();
cube.bind();
light.enable(gl, GL10.GL_LIGHT0);
aLight.enable(gl);
int[] matrix = new int[]
{0,0,0,1,0,
0,1,1,1,0,
0,1,0,0,0
};
int step = 0;
for(int z = 0; z >= -4/2; z-=2/2)
{
for(int x = -4/2; x <=4/2; x+=2/2 )
{
if(matrix[step++] == 1)
continue;
gl.glPushMatrix();
gl.glTranslatef(x, 0, z);
cube.draw(GL10.GL_TRIANGLES, 0, 6 * 2 * 3);
gl.glPopMatrix();
}
}
cube.unbind();
gl.glDisable(GL10.GL_LIGHTING);
gl.glDisable(GL10.GL_DEPTH_TEST);
gl.glEnable(GL10.GL_BLEND);
gl.glBlendFunc(GL10.GL_SRC_ALPHA, GL10.GL_ONE_MINUS_SRC_ALPHA);
guiCamera.setViewportAndMatrices();
batcher.beginBatch(buttonTexture);
batcher.drawSprite(32, 32, 64, 64, buttonRegion);
batcher.endBatch();
gl.glDisable(GL10.GL_BLEND);
gl.glDisable(GL10.GL_TEXTURE_2D);
}
}
}
I believe that the present() method is where i should be looking in. does anyone have an idea how to make the camera not be able to walk thru the boxes?
I believe update(float deltaTime) is where you should be looking. If the camera should not go through the boxes then you should not move it when hitting the box. You need to create a good system to control your movement around the scene and possibly convert some values to the scene coordinate system. To be more general you should separate the physics engine from the graphics engine.
Consider you have an object that represents your scene and has data such as:
int width; // The width of the scene
int height; // The height of the scene
bool boxes[width][height]; // The grid of boxes. If the segment is true then a box should be drawn
float playerPositionX;
float playerPositionY;
Now your draw method would look something like:
for(int x = 0; x < scene.width; x++)
{
for(int y = 0; y < scene.height; y++)
{
if(!scene.boxes[y][x])
continue;
gl.glPushMatrix();
gl.glTranslatef(x*sceneScale, 0, y*sceneScale); // Scene scale is just some constant to make things as large as you want (a box size most likely in your case).
cube.draw(GL10.GL_TRIANGLES, 0, 6 * 2 * 3);
gl.glPopMatrix();
}
}
But then on your update method you would rather simply call something like:
scene.movePlayer(x, y);
So the scene has a method
public void movePlayer(float x, float y) {
// first get the current position indices:
int previousX = (int)playerPositionX;
int previousY = (int)playerPositionY;
// And the target position:
int targetX = (int)(playerPositionX+x);
int targetY = (int)(playerPositionY+y);
// Now check if you have collisions
if(targetX < 0 || targetX >= width || boxes[targetX][previousY]) { // The previousY is NOT a bug
// We hit the wall in X direction so do not move it
x = .0f;
}
if(targetY < 0 || targetY >= height || boxes[previousX][targetY]) { // The previousX is NOT a bug
// We hit the wall in Y direction so do not move it
y = .0f;
}
playerPositionX += x;
playerPositionY += y;
}
Now all you need to do in the update is to set the camera position depending on the scene player position (again apply whatever factors you need such as scene scale).
Related
I need to plot few coordinates on screen, the coordinates without Z axis get displayed but the coordinates with Z axis values aren't being displayed. I tried normalizing the coordinates before plotting them which worked. When normalized all the coordinates get plotted. But in the case of unnormalized coordinates the vertices with Z axis are hidden.
OpenGL Version : ES 2.0
Coordinates:
float squareCoords[] = {
202.00002f, 244.00002f, 0.0f,
440.00003f, 625.00006f, 0.0f,
440.00003f, 625.00006f, 0.0f,
690.00006f, 186.0f,0.0f,
202.00002f, 244.00002f, 50.0f,
440.00003f, 625.00006f, 50.0f,
440.00003f, 625.00006f, 50.0f,
690.00006f, 186.0f, 50.0f
};
indices:
short[] drawOrder = {
0,1,2,3,
0,4,
1,5,
2,6,
4,5,6,7
};
Draw Code:
GLES20.glDrawElements(
GLES20.GL_LINES, drawOrder.length,
GLES20.GL_UNSIGNED_SHORT, drawListBuffer);
On Surface Changed code:
public void onSurfaceChanged(GL10 unused, int width, int height) {
mWidth = width;
mHeight = height;
GLES20.glViewport(0, 0, mWidth, mHeight);
float ratio = (float) mWidth / mHeight;
// this projection matrix is applied to object coordinates
// in the onDrawFrame() method
Matrix.orthoM(mProjMatrix, 0, 0f, width, 0.0f, height, 0, 50);
}
OnDraw:
public void onDrawFrame(GL10 unused) {
Square square = new Square();
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT|GLES20.GL_DEPTH_BUFFER_BIT);
if (mFirstDraw)
mFirstDraw = false;
long time = SystemClock.uptimeMillis() % 4000L;
float angle = 0.090f * ((int) time);
// float angle = 90;
// Matrix.setRotateM(mRotationMatrix, 0, angle, 0, 0, -1.0f);
// angle += 0.7f;
if (angle > 360f)
angle = 0f;
Matrix.setLookAtM(mVMatrix, 0, 0f, 0f, 4f, 0f, 0f, 0f, 0f, 1f, 0f);
// projection x view = modelView
Matrix.multiplyMM(mMVPMatrix, 0, mProjMatrix, 0, mVMatrix, 0);
// Creating rotation matrix
Matrix.setRotateM(rotationMatrix, 0, angle, -1f, 0f, 0f);
// rotation x camera = modelView
float[] duplicateMatrix = Arrays.copyOf(mMVPMatrix, 16);
Matrix.multiplyMM(mMVPMatrix, 0, duplicateMatrix, 0, rotationMatrix, 0);
square.draw(mMVPMatrix);
}
I'm rotating the diagram to figure out whether the vertices on Z axis are drawn.
I personally think this line is the culprit, here I've given far value 50 and near value 0. I wonder what these values should be
Matrix.orthoM(mProjMatrix, 0, 0f, width, 0.0f, height, 0, 50);
The problem here was the value of far wasn't higher enough. I put far as 500
Matrix.orthoM(mProjectionMatrix, 0, 0f, width, 0.0f, height,0, 500);
and changed the coordinates to:
float squareCoords[] = {
202.00002f, 244.00002f, 0.0f,
440.00003f, 625.00006f, 0.0f,
440.00003f, 625.00006f, 0.0f,
690.00006f, 186.0f,0.0f,
202.00002f, 244.00002f, 200.0f,
440.00003f, 625.00006f, 200.0f,
440.00003f, 625.00006f, 200.0f,
690.00006f, 186.0f, 200.0f
};
Its working now.
I tried lot of color matrix for imageView to look as white text with black background but i am unable to find a suitable solution.
Check here
I tried this color matrix but it is a colored negative effect.
float[] mx = new float[]{
-1.0f, 0, 0, 0, 255, //red
0, -1.0f, 0, 0, 255, //green
0, 0, -1.0f, 0, 255, //blue
0, 0, 0, 1.0f, 0 //alpha
};
And i tried this and this is a grayscale effect
mx = new float[]{
0.5f, 0.5f, 0.5f, 0, 0,
0.5f, 0.5f, 0.5f, 0, 0,
0.5f, 0.5f, 0.5f, 0, 0,
0, 0, 0, 1, 0,
-1, -1, -1, 0, 1
};
I am not getting expected coordinate values from gluUnProject function.
I will put some code first. Here is the function which get called on touch event
public float[] getWorldSpaceFromMouseCoordinates(float mouseX, float mouseY)
{
float[] finalCoord = { 0.0f, 0.0f, 0.0f, 0.0f };
// mouse Y needs to be inverted
mouseY = (float)_viewport[3] - mouseY;
float[] mouseZ = new float[1];
FloatBuffer fb = FloatBuffer.allocate(1);
GLES20.glReadPixels((int)mouseX, (int)mouseY, 1, 1, GLES20.GL_DEPTH_COMPONENT, GLES20.GL_FLOAT, fb);
int result = GLU.gluUnProject(mouseX, mouseY, fb.get(0), mViewMatrix, 0, mProjectionMatrix, 0, _viewport, 0, finalCoord, 0);
float[] temp2 = new float[4];
Matrix.multiplyMV(temp2, 0, mViewMatrix, 0, finalCoord, 0);
if(result == GL10.GL_TRUE){
finalCoord[0] = temp2[0] / temp2[3];
finalCoord[1] = temp2[1] / temp2[3];
finalCoord[2] = temp2[2] / temp2[3];
}
Log.d("Coordinate:", "" + temp2[0] + "," + temp2[1] + "," + temp2[2]);
return finalCoord;
}
here is setting up matrices
#Override
public void onSurfaceChanged(GL10 unused, int width, int height)
{
// Adjust the viewport based on geometry changes,
// such as screen rotation
GLES20.glViewport(0, 0, width, height);
_viewport = new int[] { 0, 0, width, height };
float ratio = (float) width / height;
// this projection matrix is applied to object coordinates
// in the onDrawFrame() method
Matrix.frustumM(mProjectionMatrix, 0, -ratio, ratio, -1, 1, 2, 7);
}
setting up modelview matrix (note that model matrix is just an identity.)
// Set the camera position (View matrix)
Matrix.setLookAtM(mViewMatrix, 0, 0, 0, -3, 0f, 0f, 0f, 0f, 1.0f, 0.0f);
So as per my understanding my expectation from this function is that it will give me world coordinates w.r.t origin which is not happening. I am creating a square with following coordinates
_vertices = new float [] { -0.5f, 0.5f, 0.0f, // top left
-0.5f, -0.5f, 0.0f, // bottom left
0.5f, -0.5f, 0.0f, // bottom right
0.5f, 0.5f, 0.0f }; // top right
however I am getting X values ranging from (.3, -.3) Y values ranging in (.5,-.5) and Z always -1.0 for whole viewport. X values in (0.2,-0.2) when touching corners of square and Y values in (0.15, -0.15).
Let me know if any more code s required.
So I found out what the problem was. glReadPixels() with GL_DEPTH_COMPONENT is not supported in OpenGL ES 2.0. That is because I was always getting wrong depth value and hence wrong coordinates. Now I had two choices whether to use an FBO and store depth using a shader OR I could do Ray Picking(Since I had only one object in scene Si was hoping that gluUnProject() will do). I chose former here is my code I hope it will help somebody (Its not not generic and geometry is hard coded)
public float[] getWorldSpaceFromMouseCoordinates(float mouseX, float mouseY)
{
float[] farCoord = { 0.0f, 0.0f, 0.0f, 0.0f };
float[] nearCoord = { 0.0f, 0.0f, 0.0f, 0.0f };
// mouse Y needs to be inverted
//mouseY = (float) _viewport[3] - mouseY;
// calling glReadPixels() with GL_DEPTH_COMPONENT is not supported in
// GLES so now i will try to implement ray picking
int result = GLU.gluUnProject(mouseX, mouseY, 1.0f, mViewMatrix, 0, mProjectionMatrix, 0, _viewport, 0,
farCoord, 0);
if (result == GL10.GL_TRUE)
{
farCoord[0] = farCoord[0] / farCoord[3];
farCoord[1] = farCoord[1] / farCoord[3];
farCoord[2] = farCoord[2] / farCoord[3];
}
result = GLU.gluUnProject(mouseX, mouseY, 0.0f, mViewMatrix, 0, mProjectionMatrix, 0, _viewport, 0, nearCoord,
0);
if (result == GL10.GL_TRUE)
{
nearCoord[0] = nearCoord[0] / nearCoord[3];
nearCoord[1] = nearCoord[1] / nearCoord[3];
nearCoord[2] = nearCoord[2] / nearCoord[3];
}
float [] dirVector = Vector.normalize(Vector.minus(farCoord, nearCoord));
float [] rayOrigin = {0.0f, 0.0f, 3.0f};
Log.d("Far Coordinate:", "" + farCoord[0] + "," + farCoord[1] + "," + farCoord[2]);
Log.d("Near Coordinate:", "" + nearCoord[0] + "," + nearCoord[1] + "," + nearCoord[2]);
float [] vertices = { -0.5f, 0.5f, 0.0f, // top left
-0.5f, -0.5f, 0.0f, // bottom left
0.5f, -0.5f, 0.0f, // bottom right
0.5f, 0.5f, 0.0f }; // top right
// calculate normal for square
float[] v1 = { vertices[3] - vertices[0], vertices[4] - vertices[1], vertices[5] - vertices[2]};
float[] v2 = { vertices[9] - vertices[0], vertices[10] - vertices[1], vertices[11] - vertices[2]};
float[] n = Vector.normalize(Vector.crossProduct(v1, v2));
// now calculate intersection point as per following link
// http://antongerdelan.net/opengl/raycasting.html
// our plane passes through origin so findint 't' ll be
float t = -(Vector.dot(rayOrigin, n) / Vector.dot(dirVector, n));
// now substitute above t in ray equation gives us intersection point
float [] intersectionPoint = Vector.addition(rayOrigin, Vector.scalarProduct(t, dirVector));
Log.d("Ipoint:", "" + intersectionPoint[0] + "," + intersectionPoint[1] + "," + intersectionPoint[2]);
return intersectionPoint;
}
Having constructed the following OpenGL-ES program which renders a simple textured cube using ambient light alone, I have come across a strange anomaly whilst attempting to implement the "glMaterialfv" function with either GL_FRONT or GL_BACK parameters. Whilst the material is correctly processes in conjuction with the GL_BACK_AND_FRONT parameter, neither GL_FRONT nor GL_BACK appear to procur the correct results. As my normals appear to work in the presence of a directional light source, I can only assume that I'm missing something quite obvious. Could this possibly be an issue with the Android emulator itself?
package tal.cube1;
import javax.microedition.khronos.egl.EGLConfig;
import javax.microedition.khronos.opengles.GL10;
import javax.microedition.khronos.opengles.GL11;
import android.graphics.BitmapFactory;
import android.opengl.GLU;
import android.opengl.GLUtils;
import android.opengl.GLSurfaceView.Renderer;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.nio.FloatBuffer;
import java.nio.ShortBuffer;
import android.content.res.*;
import android.graphics.Bitmap;
public class OpenGLRenderer implements Renderer
{
private final float mf_textureCoordinates[] =
{
0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f
};
private final float mf_normals[] =
{ 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1,
0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0,
0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1,
0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0,
-1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0,
1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0
};
private final float mf_vertices[] =
{-1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1,
-1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1,
1, 1, -1, 1, -1, -1, -1, -1, -1, -1, 1, -1,
-1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1,
-1, 1, -1, -1, -1, -1, -1, -1, 1, -1, 1, 1,
1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1
};
private final short mf_indices[] =
{ 0, 1, 2, 0, 2, 3,
4, 5, 6, 4, 6, 7,
8, 9, 10, 8, 10, 11,
12, 13, 14, 12, 14, 15,
16, 17, 18, 16, 18, 19,
20, 21, 22, 20, 22, 23
};
private final float mf_ambientLight[] =
{
1.0f, 1.0f, 1.0f, 1.0f
};
private final float mf_ambientMaterial[] =
{
1.0f, 0.0f, 0.0f, 1.0f
};
private FloatBuffer m_vertexBuffer;
private FloatBuffer m_normalBuffer;
private FloatBuffer m_textureBuffer;
private ShortBuffer m_indexBuffer;
private Bitmap m_texture;
private int m_textures[];
private float m_angle = 0.0f;
public OpenGLRenderer(Resources p_resources)
{
super();
m_texture = BitmapFactory.decodeResource(p_resources, R.drawable.crate);
}
#Override public void onSurfaceCreated(GL10 p_gl, EGLConfig p_config)
{
ByteBuffer vbb = ByteBuffer.allocateDirect(mf_vertices.length * 4);
vbb.order(ByteOrder.nativeOrder());
m_vertexBuffer = vbb.asFloatBuffer();
m_vertexBuffer.put(mf_vertices);
m_vertexBuffer.position(0);
ByteBuffer nbb = ByteBuffer.allocateDirect(mf_normals.length * 4);
nbb.order(ByteOrder.nativeOrder());
m_normalBuffer = nbb.asFloatBuffer();
m_normalBuffer.put(mf_normals);
m_normalBuffer.position(0);
ByteBuffer tbb = ByteBuffer.allocateDirect(mf_textureCoordinates.length * 4);
tbb.order(ByteOrder.nativeOrder());
m_textureBuffer = tbb.asFloatBuffer();
m_textureBuffer.put(mf_textureCoordinates);
m_textureBuffer.position(0);
ByteBuffer ibb = ByteBuffer.allocateDirect(mf_indices.length * 2);
ibb.order(ByteOrder.nativeOrder());
m_indexBuffer = ibb.asShortBuffer();
m_indexBuffer.put(mf_indices);
m_indexBuffer.position(0);
m_textures = new int[1];
p_gl.glBindTexture(GL10.GL_TEXTURE_2D, m_textures[0]);
p_gl.glGenTextures(1, m_textures, 0);
GLUtils.texImage2D(GL10.GL_TEXTURE_2D, 0, m_texture, 0);
p_gl.glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
p_gl.glClearDepthf(1.0f);
p_gl.glShadeModel(GL10.GL_FLAT);
p_gl.glDepthFunc(GL10.GL_LEQUAL);
p_gl.glFrontFace(GL10.GL_CCW);
p_gl.glCullFace(GL10.GL_BACK);
p_gl.glHint(GL10.GL_PERSPECTIVE_CORRECTION_HINT, GL10.GL_NICEST);
p_gl.glDisable(GL10.GL_DITHER);
p_gl.glEnable(GL10.GL_DEPTH_TEST);
p_gl.glEnable(GL10.GL_CULL_FACE);
p_gl.glEnable(GL10.GL_LIGHTING);
p_gl.glEnable(GL10.GL_TEXTURE_2D);
p_gl.glEnableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
p_gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
p_gl.glEnableClientState(GL11.GL_NORMAL_ARRAY);
p_gl.glTexCoordPointer(2, GL10.GL_FLOAT, 0, m_textureBuffer);
p_gl.glNormalPointer(GL10.GL_FLOAT, 0, m_normalBuffer);
p_gl.glVertexPointer(3, GL10.GL_FLOAT, 0, m_vertexBuffer);
p_gl.glLightModelfv(GL10.GL_LIGHT_MODEL_AMBIENT, mf_ambientLight, 0);
p_gl.glMaterialfv(GL10.GL_FRONT_AND_BACK, GL10.GL_AMBIENT, mf_ambientMaterial, 0);
}
#Override public void onDrawFrame(GL10 p_gl)
{
p_gl.glClear(GL10.GL_COLOR_BUFFER_BIT |
GL10.GL_DEPTH_BUFFER_BIT);
p_gl.glLoadIdentity();
p_gl.glTranslatef(0.0f, 0.0f, -8);
p_gl.glRotatef(m_angle, 1.0f, 1.0f, 1.0f);
p_gl.glDrawElements(GL10.GL_TRIANGLES, m_indexBuffer.capacity(),
GL10.GL_UNSIGNED_SHORT, m_indexBuffer);
m_angle += 1.0f;
}
#Override public void onSurfaceChanged(GL10 p_gl, int p_width, int p_height)
{
p_gl.glViewport(0, 0, p_width, p_height);
p_gl.glMatrixMode(GL10.GL_PROJECTION);
p_gl.glLoadIdentity();
GLU.gluPerspective(p_gl, 45.0f, (float)p_width / (float)p_height,
0.1f, 100.0f);
p_gl.glMatrixMode(GL10.GL_MODELVIEW);
p_gl.glLoadIdentity();
}
}
Having now reviewed the OpenGL-ES 1.1 documentation, I can now confirm that the GL_FRONT and GL_BACK parameters are only supported under the full OpenGL 1.1 specification. My advice for anyone currently developing for the OpenGL-ES platform, is to ensure that their documentation specifically covers the "ES" OpenGL subset. Due to some erroneous assumptions, not to mention a fair measure of lethargy, I have spent many hours needlessly testing features which are not supported by the target platform. Just like baking a cake, preparation is everything, and I deserve nothing less that a swift kick in the knackers for adopting such a school-boy approach to an otherwise solid API. Just to reiterate, whilst this may be common sense for most people, the full OpenGL specification is of little value if you're developing for its lesser "ES" counterpart.
I'm having a problem where my application looks right on my emulator, but on my phone it only displays a fragment of my scene.
Images here (The emulator is the one on the right.
My renderer code is seen here. (This class is abstract but all the implementing class is doing is draw the polygons)
public abstract class AbstractRenderer implements Renderer {
float x = 0.5f;
float y = 1f;
float z = 3;
boolean displayCoordinateSystem = true;
public void onSurfaceCreated(GL10 gl, EGLConfig eglConfig) {
gl.glDisable(GL10.GL_DITHER);
gl.glHint(GL10.GL_PERSPECTIVE_CORRECTION_HINT, GL10.GL_FASTEST);
gl.glClearColor(.5f, .5f, .5f, 1);
gl.glShadeModel(GL10.GL_SMOOTH);
gl.glEnable(GL10.GL_DEPTH_TEST);
}
public void onSurfaceChanged(GL10 gl, int w, int h) {
gl.glViewport(0, 0, w, h);
float ratio = (float) w / h;
gl.glMatrixMode(GL10.GL_PROJECTION);
gl.glLoadIdentity();
gl.glFrustumf(-ratio, ratio, -1, 1, 0, 10);
}
public void onDrawFrame(GL10 gl) {
gl.glDisable(GL10.GL_DITHER);
gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
gl.glMatrixMode(GL10.GL_MODELVIEW);
gl.glLoadIdentity();
GLU.gluLookAt(gl, x, y, z, 0f, 0, 0f, 0f, 1f, 0f);
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
if(displayCoordinateSystem) {
drawCoordinateSystem(gl);
}
draw(gl);
// gl.glFlush();
}
private void drawCoordinateSystem(GL10 gl) {
ByteBuffer vbb = ByteBuffer.allocateDirect(6*3*4);
vbb.order(ByteOrder.nativeOrder());
FloatBuffer vertices = vbb.asFloatBuffer();
ByteBuffer ibb = ByteBuffer.allocateDirect(6*2);
ibb.order(ByteOrder.nativeOrder());
ShortBuffer indexes = ibb.asShortBuffer();
final float coordLength = 27f;
//add point (-1, 0, 0)
vertices.put(-coordLength);
vertices.put(0);
vertices.put(0);
//add point (1, 0, 0)
vertices.put(coordLength);
vertices.put(0);
vertices.put(0);
//add point (0, -1, 0)
vertices.put(0);
vertices.put(-coordLength);
vertices.put(0);
//add point (0, 1, 0)
vertices.put(0);
vertices.put(coordLength);
vertices.put(0);
//add point (0, 0, -1)
vertices.put(0);
vertices.put(0);
vertices.put(-coordLength);
//add point (0, 0, 1)
vertices.put(0);
vertices.put(0);
vertices.put(coordLength);
for(int i = 0; i < 6; i++) {
indexes.put((short)i);
}
vertices.position(0);
indexes.position(0);
gl.glColor4f(1, 1, 0, 0.5f);
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, vertices);
gl.glDrawElements(GL10.GL_LINES, 2, GL10.GL_UNSIGNED_SHORT, indexes);
indexes.position(2);
gl.glColor4f(0, 1, 0, 0.5f);
gl.glDrawElements(GL10.GL_LINES, 2, GL10.GL_UNSIGNED_SHORT, indexes);
indexes.position(4);
gl.glColor4f(0, 0, 1, 0.5f);
gl.glDrawElements(GL10.GL_LINES, 2, GL10.GL_UNSIGNED_SHORT, indexes);
}
protected abstract void draw(GL10 gl);
}
My guess is that i'm not setting some value that is set by default by the emulator implementation. Only thing is i have no clue as to what that thing might be.
Hoping to hear from you dudes and dudettes!
It's a depth buffer problem: From the "notes" section in the man page of glFrustum:
near must never be set to 0.
You should calculate the near value to be as far from the camera as possible, and the far to be as close as possible, while still encompassing the things you want to draw.