Android OpenGL set rotation speed of cube - android

I have cube that rotates around the center of the coordinates system. But the problem is it rotates very slowly. So in my case how to set the rotation speed?
The following three methods update the mCurrentModelMatrix with the given model transformation. These are stateful accumulative methods.
public void trnslate(float x, float y, float z)
{
float[] tempModelMatrix = new float[16];
Matrix.setIdentityM(tempModelMatrix, 0);
Matrix.translateM(tempModelMatrix,0,x,y,z);
Matrix.multiplyMM(this.mCurrentModelMatrix, 0,
tempModelMatrix, 0, this.mCurrentModelMatrix, 0);
}
public void rotate(float angle, float x, float y, float z)
{
float[] tempModelMatrix = new float[16];
Matrix.setIdentityM(tempModelMatrix, 0);
Matrix.rotateM(tempModelMatrix,0,angle,x,y,z);
Matrix.multiplyMM(this.mCurrentModelMatrix, 0,
tempModelMatrix, 0, this.mCurrentModelMatrix, 0);
}
public void scale(float xFactor, float yFactor, float zFactor)
{
float[] tempModelMatrix = new float[16];
Matrix.setIdentityM(tempModelMatrix, 0);
Matrix.scaleM(tempModelMatrix,0,xFactor,yFactor,zFactor);
Matrix.multiplyMM(this.mCurrentModelMatrix, 0,
tempModelMatrix, 0, this.mCurrentModelMatrix, 0);
}
/*
* Calculaute the final model view matrix
* 1. Order of matrix multiplication is important
* 2. MVPmatrix = proj * view * model;
* 3. Setup the MVP matrix in the vertex shader memory
*/
protected void setupMatrices()
{
float[] tempModelMatrix = new float[16];
Matrix.setIdentityM(tempModelMatrix, 0);
//translate the model combo next
Matrix.multiplyMM(mMVPMatrix, 0, //matrix and offset
mCurrentModelMatrix, 0,
tempModelMatrix, 0);
//translate eye coordinates first
Matrix.multiplyMM(mMVPMatrix, 0,
this.mVMatrix, 0,
mMVPMatrix, 0);
//Project it: screen coordinates
Matrix.multiplyMM(mMVPMatrix, 0,
mProjMatrix, 0,
mMVPMatrix, 0);
//Set the vertex uniform handler representing the MVP matrix
GLES20.glUniformMatrix4fv(muMVPMatrixHandle, //uniform handle
1, //number of uniforms. 1 if it is not an array
false, //transpose: must be false
mMVPMatrix, //client matrix memory pointer
0); //offset
}
draw method
// Drawing operation
#Override
protected void draw(GL10 gl, int positionHandle) {
// Hide the hidden surfaces using these APIs
GLES20.glEnable(GLES20.GL_DEPTH_TEST);
GLES20.glDepthFunc(GLES20.GL_LESS);
// Transfer vertices to the shader
transferVertexPoints(positionHandle);
// Transfer texture points to the shader
transferTexturePoints(getTextureHandle());
// Implement rotation from 0 to 360 degrees
// Stop when asked and restart when the stopFlag
// is set to false.
// Decide what the current angle to apply
// for rotation is.
if (stopFlag == true) {
// stop rotation
curAngle = stoppedAtAngle;
} else {
curAngle += 1.0f;
}
if (curAngle > 360) {
curAngle = 0;
}
// Tell the base class to start their
// matrices to unit matrices.
this.initializeMatrices();
// The order of these model transformations matter
// Each model transformation is specified with
// respect to the last one, and not the very first.
// Center the cube
this.trnslate(0, 0, -1);
// Rotate it around y axis
this.rotate(curAngle, 0, -1, 0);
// Decenter it to where ever you want
this.trnslate(0, -2, 2);
// Go ahead calculate the ModelViewMatrix as
// we are done with ALL of our model transformations
this.setupMatrices();
// Call glDrawArrays to use the vertices and draw
int vertexCount = mTriangleVerticesData.length / 3;
GLES20.glDrawArrays(GLES20.GL_TRIANGLES, // what primitives to use
0, // at what point to start
vertexCount); // Starting there how many points to use
// Check if there are errors
checkGlError("glDrawArrays");
}
Thanks in advance!

You are rotating at 1 degree per frame, so it will take 360 frames to do a complete rotation.
If you want it to rotate in 2 seconds, and you were running at 30 frames per second, you would want to rotate by 6 degrees per frame, by changing this section:
if (stopFlag == true) {
// stop rotation
curAngle = stoppedAtAngle;
} else {
curAngle += 6.0f;
}
if (curAngle > 360) {
curAngle = 0;
}

Related

How can I make AR video to always face user's camera with marker vuforia native android

I am working on marker AR with native android vuforia what I am trying to do is to move my video object according to camera (it should always face camera) I tried following but its not working but sometime its working but its not persistence
public void renderFrame(State state, float[] projectionMatrix) {
mSampleAppRenderer.renderVideoBackground();
GLES20.glEnable(GLES20.GL_DEPTH_TEST);
isTracking = false;
for (int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++) {
TrackableResult trackableResult = state.getTrackableResult(tIdx);
ImageTarget imageTarget = (ImageTarget) trackableResult
.getTrackable();
imageTarget.startExtendedTracking();
isTracking = true;
float[] modelViewMatrixVideo = Tool.convertPose2GLMatrix(
trackableResult.getPose()).getData();
float[] modelViewProjectionVideo = new float[16];
Matrix44F invTranspMV = SampleMath.Matrix44FTranspose(SampleMath.Matrix44FInverse(Tool.convertPose2GLMatrix(trackableResult.getPose())));
Matrix.translateM(modelViewMatrixVideo, 0, 0f, 0f, 1f);
Matrix.rotateM(modelViewMatrixVideo, 0, (float) Math.toDegrees(Math.asin(-invTranspMV.getData()[6])), 0.0f, 0.f, 1.0f);
Matrix.multiplyMM(modelViewProjectionVideo, 0,
projectionMatrix, 0, modelViewMatrixVideo, 0);
GLES20.glEnable(GLES20.GL_BLEND);
GLES20.glBlendFunc(GLES20.GL_SRC_ALPHA, GLES20.GL_ONE_MINUS_SRC_ALPHA);
GLES20.glUseProgram(videoPlaybackShaderID);
GLES20.glVertexAttribPointer(videoPlaybackVertexHandle, 3,
GLES20.GL_FLOAT, false, 0, quadVertices);
GLES20.glVertexAttribPointer(videoPlaybackTexCoordHandle,
2, GLES20.GL_FLOAT, false, 0,
fillBuffer(videoQuadTextureCoordsTransformedStones));
GLES20.glEnableVertexAttribArray(videoPlaybackVertexHandle);
GLES20.glEnableVertexAttribArray(videoPlaybackTexCoordHandle);
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES,
videoPlaybackTextureID);
GLES20.glUniformMatrix4fv(videoPlaybackMVPMatrixHandle, 1,
false, modelViewProjectionVideo, 0);
GLES20.glDrawElements(GLES20.GL_TRIANGLES, NUM_QUAD_INDEX,
GLES20.GL_UNSIGNED_SHORT, quadIndices);
GLES20.glDisableVertexAttribArray(videoPlaybackVertexHandle);
GLES20.glDisableVertexAttribArray(videoPlaybackTexCoordHandle);
GLES20.glUseProgram(0);
GLES20.glDisable(GLES20.GL_BLEND);
SampleUtils.checkGLError("VideoPlayback renderFrame");
}
GLES20.glDisable(GLES20.GL_DEPTH_TEST);
Renderer.getInstance().end();
}
I have tried above so far and its working sometime but not properly
Please help me i am trying to do this from a week.
I am trying to do is to move my video object according to camera (it should always face camera)
If you want that a object faces the camera (Billboarding), then you have to use a model matrix which is the inverse view matrix, but without the translation part.
Use Matrix44FInverse, to geht the inverse matrix of a Matrix44F:
public void renderFrame(State state, float[] projectionMatrix) {
.....
// get the view matrix and set translation part to (0, 0, 0)
float[] tempViewMat = Tool.convertPose2GLMatrix(trackableResult.getPose()).getData();
tempViewMat[12] = 0;
tempViewMat[13] = 0;
tempViewMat[14] = 0;
// create the billboard matrix
Matrix44F billboardMatrix = new Matrix44F();
billboardMatrix.setData(tempViewMat);
billboardMatrix = SampleMath.Matrix44FInverse(billboardMatrix);
// calculate the model view projection matrix
float[] viewMatrixVideo = Tool.convertPose2GLMatrix(trackableResult.getPose()).getData();
float[] modelViewVideo = new float[16];
Matrix.multiplyMM(modelViewVideo, 0, viewMatrixVideo, 0, billboardMatrix.getData(), 0);
float[] modelViewProjectionVideo = new float[16];
Matrix.multiplyMM(modelViewProjectionVideo, 0, projectionMatrix, 0, modelViewVideo, 0);
.....
}
I used your code but my AR is rotating in every direction but i want some different thing I have one video that I want to place it vertical on marker and if user move left or right only then i want to rotate it to face camera so that i will see my AR video same that is vertical from any view
What you want to do is to fix the upwards direction, but to orientate the normal vector to the line of sight.
The line of sight is the inverse Z-axis of the view space in a Right-Handed Coordinate System.
Matrix44F inverse_view = SampleMath.Matrix44FInverse(
Tool.convertPose2GLMatrix(trackableResult.getPose()));
// line of sight
Vec3F los = new Vec3F(-inverse_view[8], -inverse_view[9], -inverse_view[10] );
Eiterh the Y-Axis (0, 1, 0) or the Z-Axis (0, 1, 0) of the model has to be the Z-axis of the orientation matrix.
The X-Axis is the cross product (Vec3FCross) the line of sight and the Z-axis of the orientation matrix.
e.g.
Vec3F z_axis = new Vec3F(0, 0, 1);
Vec3F x_axis = Vec3FNormalize(Vec3FCross(los, z_axis));
Vec3F y_axis = Vec3FCross(z_axis, x_axis);
float[] orientationMatrix = new float[]{
x_axis.getData()[0], x_axis.getData()[1], x_axis.getData()[2], 0,
y_axis.getData()[0], y_axis.getData()[1], y_axis.getData()[2], 0,
z_axis.getData()[0], z_axis.getData()[1], z_axis.getData()[2], 0,
0, 0, 0, 1
};
// calculate the model view projection matrix
float[] viewMatrixVideo = Tool.convertPose2GLMatrix(trackableResult.getPose()).getData();
float[] modelViewVideo = new float[16];
Matrix.multiplyMM(modelViewVideo, 0, viewMatrixVideo, 0, orientationMatrix, 0);
float[] modelViewProjectionVideo = new float[16];
Matrix.multiplyMM(modelViewProjectionVideo, 0, projectionMatrix, 0, modelViewVideo, 0);

OpenGL ES object position changes despite 0 translation value

#Override
public void onDrawFrame(GL10 gl) {
float[] scratch = new float[16];
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
// Set the camera position (View matrix)
Matrix.setLookAtM(mViewMatrix, 0, 0, 0, -3, 0f, 0f, 0f, 0f, 1.0f, 0.0f);
// Calculate the projection and view transformation
Matrix.multiplyMM(mMVPMatrix, 0, mProjectionMatrix, 0, mViewMatrix, 0);
if(move_x) x=-tiltx/100;
else{
x=0;
if(side_x) {
if (tiltx < -0.1f) {
x = -tiltx / 100;
move_x = true;
}
}
if(!side_x) {
if (tiltx > 0.1f) {
x = -tiltx / 100;
move_x = true;
}
}
}
if(move_y) y=-tilty/100;
else{
y=0;
if(side_y){
if(tilty <-0.1f) {
y=-tilty/100;
move_y=true;
}
}
if(!side_y) {
if(tilty >0.1f) {
y = -tilty / 100;
move_y = true;
}
}
}
Part above checks if it is allowed to move, if it is not it assigns 0 to translate value (x and y). Than (below) the translation is applied, despite the 0 x or y values it moves the object a little futher more and that causes to object end partly behind the view. It also causes to trigger the
if (Math.abs(scratch[12])>=2.4 && move_x)
again when coming back which means playing the sound again.
Matrix.translateM(mBall.mModelMatrix, 0, (float)x*1f, (float)y*1f, 0f);
// Combine the rotation matrix with the projection and camera view
// Note that the mMVPMatrix factor *must be first* in order
// for the matrix multiplication product to be correct.
Matrix.multiplyMM(scratch, 0, mMVPMatrix, 0, mBall.mModelMatrix, 0);
if (Math.abs(scratch[12])>=2.4 && move_x){
setMyBoolean(true);
move_x=false;
if(scratch[12]<0) {
side_x=false;
scratch[12]=-(float)2.4;
}
else {
side_x=true;
scratch[12]=(float)2.4;
}
// true - prawa strona, false - lewa
}
if (Math.abs(scratch[13])>=1.8 && move_y){
move_y=false;
setMyBoolean(true);
if(scratch[13]<0){
side_y=true;
scratch[13]=-(float)1.8;
}
else {
side_y=false;
scratch[13]=(float)1.8;
}
// true - prawa strona, false - lewa
}
mBall.draw(scratch);
The code above checks if the object did hits the wall and if it does it blocks futher movement, plays the sound via setMyBoolean, and whats most important - sets the object to exactly touch the wall by changing the scratch[12] and 13 values.
As explained above, in the next loop the object position is calculated different (despite the fact the translate variable is set to 0) and the ball ends in the wall again.
It's worth mentioning that it does not change the position futher in the next loops. It stays in the wall until I tilt the phone to the other side triggering the if that allows the object to move away from the wall.
The answer is actually simple, my method didn't or actually did work but only from one frame, since I didn't correct the wrong position by translating the object. What I did was only correcting the value of matrix that was used to draw the object once. But since the ball remembers its position, it would still draw iteself in the wall in the next frame.
What I did to fix this issue is by adjusting the position of the ball when I've detected it just hit the wall and didn't yet go back. Now the adjustments are made by actually translating the object and it is done every frame.
Heres the code that does that:
if(!move_x) {
if(side_x) {
Matrix.translateM(mBall.mModelMatrix, 0, (scratch[12] - (float) 2.4) * 1f, 0, 0f);
Matrix.multiplyMM(scratch, 0, mMVPMatrix, 0, mBall.mModelMatrix, 0);
} else {
Matrix.translateM(mBall.mModelMatrix, 0, (scratch[12] + (float) 2.4) * 1f, 0, 0f);
Matrix.multiplyMM(scratch, 0, mMVPMatrix, 0, mBall.mModelMatrix, 0);
}
}
if(!move_y) {
if(side_y){
Matrix.translateM(mBall.mModelMatrix, 0, 0, (1.8f - scratch[13]) * 0.5f, 0f);
Matrix.multiplyMM(scratch, 0, mMVPMatrix, 0, mBall.mModelMatrix, 0);
} else {
Matrix.translateM(mBall.mModelMatrix, 0, 0, -(1.8f + scratch[13]) * 0.5f, 0f);
Matrix.multiplyMM(scratch, 0, mMVPMatrix, 0, mBall.mModelMatrix, 0);
}
}

Using OpenCV solvePnP for Augmented Reality in OpenGL

I'm trying to build an Augmented Reality application in Android using BoofCV (OpenCV alternative for Java) and OpenGL ES 2.0. I have a marker which I can get the image points of and "world to cam" transformation using BoofCV's solvePnP function. I want to be able to draw the marker in 3D using OpenGL. Here's what I have so far:
On every frame of the camera, I call solvePnP
Se3_F64 worldToCam = MathUtils.worldToCam(__qrWorldPoints, imagePoints);
mGLAssetSurfaceView.setWorldToCam(worldToCam);
This is what I have defined as the world points
static float qrSideLength = 79.365f; // mm
private static final double[][] __qrWorldPoints = {
{qrSideLength * -0.5, qrSideLength * 0.5, 0},
{qrSideLength * -0.5, qrSideLength * -0.5, 0},
{qrSideLength * 0.5, qrSideLength * -0.5, 0},
{qrSideLength * 0.5, qrSideLength * 0.5, 0}
};
I'm feeding it a square that has origin at its center, with a sidelength in millimeters.
I can confirm that the rotation vector and translation vector I'm getting back from solvePnP are reasonable, so I don't know if there's a problem here.
I pass the result from solvePnP into my renderer
public void setWorldToCam(Se3_F64 worldToCam) {
DenseMatrix64F _R = worldToCam.R;
Vector3D_F64 _T = worldToCam.T;
// Concatenating the the rotation and translation vector into
// a View matrix
double[][] __view = {
{_R.get(0, 0), _R.get(0, 1), _R.get(0, 2), _T.getX()},
{_R.get(1, 0), _R.get(1, 1), _R.get(1, 2), _T.getY()},
{_R.get(2, 0), _R.get(2, 1), _R.get(2, 2), _T.getZ()},
{0, 0, 0, 1}
};
DenseMatrix64F _view = new DenseMatrix64F(__view);
// Matrix to convert from BoofCV (OpenCV) coordinate system to OpenGL coordinate system
double[][] __cv_to_gl = {
{1, 0, 0, 0},
{0, -1, 0, 0},
{0, -1, 0, 0},
{0, 0, 0, 1}
};
DenseMatrix64F _cv_to_gl = new DenseMatrix64F(__cv_to_gl);
// Multiply the View Matrix by the BoofCV to OpenGL matrix to apply the coordinate transform
DenseMatrix64F view = new SimpleMatrix(__view).mult(new SimpleMatrix(__cv_to_gl)).getMatrix();
// BoofCV stores matrices in row major order, but OpenGL likes column major order
// I transpose the view matrix and get a flattened list of 16,
// Then I convert them to floating point
double[] viewd = new SimpleMatrix(view).transpose().getMatrix().getData();
for (int i = 0; i < mViewMatrix.length; i++) {
mViewMatrix[i] = (float) viewd[i];
}
}
I'm also using the camera intrinsics I get from camera calibration to feed into the projection matrix of OpenGL
#Override
public void onSurfaceChanged(GL10 gl, int width, int height) {
// this projection matrix is applied to object coordinates
// in the onDrawFrame() method
double fx = MathUtils.fx;
double fy = MathUtils.fy;
float fovy = (float) (2 * Math.atan(0.5 * height / fy) * 180 / Math.PI);
float aspect = (float) ((width * fy) / (height * fx));
// be careful with this, it could explain why you don't see certain objects
float near = 0.1f;
float far = 100.0f;
Matrix.perspectiveM(mProjectionMatrix, 0, fovy, aspect, near, far);
GLES20.glViewport(0, 0, width, height);
}
The square I'm drawing is the one defined in this Google example.
#Override
public void onDrawFrame(GL10 gl) {
// redraw background color
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
// Set the camera position (View matrix)
// Matrix.setLookAtM(mViewMatrix, 0, 0, 0, -3, 0f, 0f, 0f, 0f, 1.0f, 0.0f);
// Combine the rotation matrix with the projection and camera view
// Note that the mMVPMatrix factor *must be the first* in order
// for matrix multiplication product to be correct
// Calculate the projection and view transformation
Matrix.multiplyMM(mMVPMatrix, 0, mProjectionMatrix, 0, mViewMatrix, 0);
// Draw shape
mSquare.draw(mMVPMatrix);
}
I believe the problem has to do with the fact that this definition of a square in Google's example code doesn't take the real world side length into account. I understand that the OpenGL coordinate system has the corners (-1, 1), (-1, -1), (-1, 1), (1, 1) which doesn't correspond to the millimeter object points I have defined for use in BoofCV, even though they are in the right order.
static float squareCoords[] = {
-0.5f, 0.5f, 0.0f, // top left
-0.5f, -0.5f, 0.0f, // bottom left
0.5f, -0.5f, 0.0f, // bottom right
0.5f, 0.5f, 0.0f }; // top right

Android: performance issue multiple textures Opengl ES

The game I made in android is based on a canvas, and for performance reason I am to lift this game to OpenGL ES instead.
The problem is still performance - I have a background and a sprite bouncing around the screen - this works very well but it seem to be the same performance issue as with using a Canvas. Now and then the sprite animation are not perfectly smooth. I guess my design is bad or really bad.
Is it wrong to call texImage2D for every frame? What are the alternatives to this?
Thanks!!!
#Override
public void onDrawFrame(GL10 unused) {
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT | GLES20.GL_DEPTH_BUFFER_BIT);
long speedFactor = 5000;
long time = SystemClock.uptimeMillis() % speedFactor;
float angleInDegrees = (360.0f / speedFactor) * ((int) time);
GLES20.glUseProgram(mProgramHandle);
mMVPMatrixHandle = GLES20.glGetUniformLocation(mProgramHandle, "u_MVPMatrix");
mMVMatrixHandle = GLES20.glGetUniformLocation(mProgramHandle, "u_MVMatrix");
mColorHandle = GLES20.glGetAttribLocation(mProgramHandle, "a_Color");
mNormalHandle = GLES20.glGetAttribLocation(mProgramHandle, "a_Normal");
mTextureCoordinateHandle = GLES20.glGetAttribLocation(mProgramHandle, "a_TexCoordinate");
GLES20.glEnable(GLES20.GL_BLEND);
GLES20.glBlendFunc(GLES20.GL_ONE, GLES20.GL_ONE_MINUS_SRC_ALPHA);
//BACKGROUND
GLUtils.texImage2D(GLES20.GL_TEXTURE_2D, 0, background, 0);
//GLUtils.texSubImage2D(GLES20.GL_TEXTURE_2D, 0, 0, 0, background);
Matrix.setIdentityM(mModelMatrix, 0);
Matrix.translateM(mModelMatrix, 0, 0, 0.0f, -2.00001f);
drawTriangle(mCubePositions);
// THE MOVING SPRITE.
int rest = 0;
if (modulo > 0) {
rest = frameCntr % modulo;
}
GLUtils.texImage2D(GLES20.GL_TEXTURE_2D, 0, alien[bitmapIndex], 0);
//GLUtils.texSubImage2D(GLES20.GL_TEXTURE_2D, 0, 0, 0, alien[bitmapIndex]);
if (rest == 0) {
bitmapIndex++;
}
if (bitmapIndex == alien.length) {
bitmapIndex = 0;
}
frameCntr++;
float deltaX = (motion.get_velocityX() * motion.get_xDirection());
float deltaY = (motion.get_velocityY() * motion.get_yDirection());
anim_x += deltaX;
anim_y += deltaY;
Matrix.setIdentityM(mModelMatrix, 0);
Matrix.translateM(mModelMatrix, 0, anim_x, anim_y, -7f);
//Matrix.rotateM(mModelMatrix, 0, angleInDegrees, 0.0f, 1.0f, 0.0f);
drawTriangle(mAlienPositions);
calcPos();
collision.collisionWalls(motion, xPos, yPos, game_width, game_height);
}
Actually, GLUtils.texImage2D() calls glTexImage2D().
glTexImage2D(), on the other hand is used for loading the bitmap. You only need to call it once to load the texture. It loads the bitmap into the currently bound texture.
In the draw call you should bind to the needed texture by calling GLES20.glBindTexture( target, texture ). target is either GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP. texture is a texture name that was bound before calling GLUtils.texImage2D().
Also, don't forget to unbind the current texture using GLES20.glBindTexture( target, 0 ) after each draw call and after initialization.

Convert OpenGL 3D point to 2D using GLU.glProject

I have an OpenGL scene with a sphere having a radius of 1, and the camera being at the center of the sphere (it's a 360° picture viewer). The user can rotate the sphere by panning.
Now I need to display 2D pins "attached" to some parts of the picture. To do so, I want to convert the 3D coordinates of my pins into 2D screen coordinates, to add the pin image at that screen coordinates.
I'm using GLU.glProject and the following classes from android-apidemo:
MatrixGrabber
MatrixStack
MatrixTrackingGL
I save the projection matrix in the onSurfaceChanged method and the model-view matrix in the onDraw method (after having drawn my sphere). Then I feed GLU.glProject with them when the user rotates the sphere to update the pins position.
When I pan horizontally, the pins pan correctly, but when I pan vertically, the texture pans "faster" than the pin image (like if the pin was closer to the camera than the sphere).
Here are some relevant parts of my code:
public class CustomRenderer implements GLSurfaceView.Renderer {
MatrixGrabber mMatrixGrabber = new MatrixGrabber();
private float[] mModelView = null;
private float[] mProjection = null;
[...]
#Override
public void onSurfaceChanged(GL10 gl, int width, int height) {
// Get the sizes:
float side = Math.max(width, height);
int x = (int) (width - side) / 2;
int y = (int) (height - side) / 2;
// Set the viewport:
gl.glViewport(x, y, (int) side, (int) side);
// Set the perspective:
gl.glMatrixMode(GL10.GL_PROJECTION);
gl.glLoadIdentity();
GLU.gluPerspective(gl, FIELD_OF_VIEW_Y, 1, Z_NEAR, Z_FAR);
// Grab the projection matrix:
mMatrixGrabber.getCurrentProjection(gl);
mProjection = mMatrixGrabber.mProjection;
// Set to MODELVIEW mode:
gl.glMatrixMode(GL10.GL_MODELVIEW);
gl.glLoadIdentity();
}
#Override
public void onDrawFrame(GL10 gl) {
// Load the texture if needed:
if(mTextureToLoad != null) {
mSphere.loadGLTexture(gl, mTextureToLoad);
mTextureToLoad = null;
}
// Clear:
gl.glClearColor(0.5f, 0.5f, 0.5f, 0.0f);
gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
gl.glLoadIdentity();
// Rotate the scene:
gl.glRotatef( (1 - mRotationY + 0.25f) * 360, 1, 0, 0); // 0.25 is used to adjust the texture position
gl.glRotatef( (1 - mRotationX + 0.25f) * 360, 0, 1, 0); // 0.25 is used to adjust the texture position
// Draw the sphere:
mSphere.draw(gl);
// Grab the model-view matrix:
mMatrixGrabber.getCurrentModelView(gl);
mModelView = mMatrixGrabber.mModelView;
}
public float[] getScreenCoords(float x, float y, float z) {
if(mModelView == null || mProjection == null) return null;
float[] result = new float[3];
int[] view = new int[] {0, 0, (int) mSurfaceViewSize.getWidth(), (int) mSurfaceViewSize.getHeight()};
GLU.gluProject(x, y, z,
mModelView, 0,
mProjection, 0,
view, 0,
result, 0);
result[1] = mSurfaceViewSize.getHeight() - result[1];
return result;
}
}
I use the result of the getScreenCoords method to display my pins. The y value is wrong.
What am I doing wrong?

Categories

Resources