I'm drawing a simple quad using gles2.0 in c++. If i draw it with a basic vertex shader there is no problem and this is the result
uniform mat4 u_mvp;
attribute vec4 a_Position;
void main(){
gl_Position = a_Position;
}
if i add MVP matrix
gl_Position = u_mvp * a_Position;
then there's nothing on screen.
This is the perspective matrix:
Matrix4 Matrix4::getPerspective(float angle, float ratio, float near, float far) {
// angle = 45; ratio = 1.438; near = 1; far = 100
Matrix4 matrix;
float top = (float)(near * Math::tangentDegrees(angle / 2.0f));
float bottom = -top;
float right = top * ratio;
float left = -right;
matrix.m[0] = (2 * near) / (right - left);
matrix.m[1] = 0.0f;
matrix.m[2] = (right + left) / (right - left);
matrix.m[3] = 0.0f;
matrix.m[4] = 0.0f;
matrix.m[5] = (2 * near) / (top - bottom);
matrix.m[6] = (top + bottom) / (top - bottom);
matrix.m[7] = 0.0f;
matrix.m[8] = 0.0f;
matrix.m[9] = 0.0f;
matrix.m[10] = -(far + near) / (far - near);
matrix.m[11] = -(2 * far * near) / (far - near);
matrix.m[12] = 0.0f;
matrix.m[13] = 0.0f;
matrix.m[14] = -1.0f;
matrix.m[15] = 0.0f;
return matrix;
}
The modelView matrix is obtained as follow:
modelView = translation * rotation * scale
mvp = perspective * modelView
Each element of this multiplication is obtaied following this example
http://www.opengl-tutorial.org/beginners-tutorials/tutorial-3-matrices/
and all of them are initalized at the identity matrix.
With this command i obtain the handle
transformUniformHandle = (GLuint)glGetUniformLocation(getProgramId(), "u_mvp");
With this command i set the mvp matrix in the shader (tried both GL_FALSE, GL_TRUE)
glUniformMatrix4fv(transformUniformHandle, 1, GL_TRUE, modelViewProjection.m);
OpenGL uses column-major storage for matrices, while your projection matrix is set up in row-major order. So you need to transpose the order of the matrix elements:
matrix.m[0] = (2 * near) / (right - left);
matrix.m[4] = 0.0f;
matrix.m[8] = (right + left) / (right - left);
matrix.m[12] = 0.0f;
matrix.m[1] = 0.0f;
matrix.m[5] = (2 * near) / (top - bottom);
matrix.m[9] = (top + bottom) / (top - bottom);
matrix.m[13] = 0.0f;
matrix.m[2] = 0.0f;
matrix.m[6] = 0.0f;
matrix.m[10] = -(far + near) / (far - near);
matrix.m[14] = -(2 * far * near) / (far - near);
matrix.m[3] = 0.0f;
matrix.m[7] = 0.0f;
matrix.m[11] = -1.0f;
matrix.m[15] = 0.0f;
You tried to transpose the matrix by passing GL_TRUE as the 3rd argument here:
glUniformMatrix4fv(transformUniformHandle, 1, GL_TRUE, modelViewProjection.m);
This is not supported in ES 2.0, the only valid argument value is GL_FALSE. You will see a GL_INVALID_VALUE error when you call glGetError() after this. Calling glGetError() should be routine if you have any problems with your OpenGL code.
You could keep the matrix row-major if you really wanted to, and change the shader code accordingly by multiplying the vector from the right:
gl_Position = a_Position * u_mvp;
But it's probably better to just use column-major order for your matrices.
Also, you're not showing your model-view matrix. You'll have to make sure that the view transformation translates the geometry in the negative z-direction, so that it's placed between the near and far planes of the projection transformation your are using.
Given you haven't shown your whole code, I can't be sure, but it could be that you haven't called glUseProgram before calling glUniformMatrix4fv. It might also be that your triangles are clipped out by the projection matrix. Make sure the points are within the clipping ranges. If that doesn't fix it you should get errors from glGetError. This page will explain its use: OpenGL error checking
Related
i'm quite new to opengl , and I'm trying to display a square with opengl es 2.0 with an orthographics projection ( on android using c++ and ndk ) , but all I get is a blank screen.
If i don't use the projection matrix I can get the square but it's stretched depending on surface's aspect ratio.
I'm building the matrix with the following code ( referring to https://www.opengl.org/sdk/docs/man2/xhtml/glOrtho.xml ):
GLfloat projMat[16];
void ortho(float left, float right, float top, float bottom, float near, float far){
float tx = ((right+left)/(right-left))*-1;
float ty = ((top+bottom)/(top-bottom))*-1;
float tz = ((far+near)/(far-near))*-1;
projMat[0] = 2/right-left;
projMat[1] = 0.0f;
projMat[2] = 0.0f;
projMat[3] = 0.0f;
projMat[4] = 0.0f;
projMat[5] = 2/top-bottom;
projMat[6] = 0.0f;
projMat[7] = 0.0f;
projMat[8] = 0.0f;
projMat[9] = 0.0f;
projMat[10] = -2/far-near;
projMat[11] = 0.0f;
projMat[12] = tx;
projMat[13] = ty;
projMat[14] = tz;
projMat[15] = 1.0f; }
And i call this function with : ortho(0.0,width,0.0, height,-1.0,1.0); , where width and height are surface's width and height.
My vertex shader :
attribute vec4 vPosition;
uniform mat4 projMatrix;
void main() {
gl_Position = projMatrix * vPosition;
};
My fragment shader :
precision mediump float;
void main() {
gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);
}
And my draw function :
// 3D drawing
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(mProgram);
glUniformMatrix4fv(mPerspectivehandler, 1, GL_FALSE, projMat);
glEnableVertexAttribArray(mvPositionHandle);
glVertexAttribPointer(mvPositionHandle, 3, GL_FLOAT, GL_FALSE, 0, quadverts);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableVertexAttribArray(mvPositionHandle);
Do you know where's the mistake?
Edit :
I added parentheses as suggested, but it still doesn't work
GLfloat projMat[16];
void ortho(float left, float right, float top, float bottom, float near, float far){
float tx = ((right+left)/(right-left))*-1;
float ty = ((top+bottom)/(top-bottom))*-1;
float tz = ((far+near)/(far-near))*-1;
projMat[0] = 2/(right-left);
projMat[1] = 0.0f;
projMat[2] = 0.0f;
projMat[3] = 0.0f;
projMat[4] = 0.0f;
projMat[5] = 2/(top-bottom);
projMat[6] = 0.0f;
projMat[7] = 0.0f;
projMat[8] = 0.0f;
projMat[9] = 0.0f;
projMat[10] = -2/(far-near);
projMat[11] = 0.0f;
projMat[12] = tx;
projMat[13] = ty;
projMat[14] = tz;
projMat[15] = 1.0f; }
You're missing parentheses in the matrix calculation for the diagonal elements. They should be:
projMat[0] = 2.0f / (right - left);
projMat[5] = 2.0f / (top - bottom);
projMat[10] = -2.0f / (far - near);
It can be hard to debug issues where nothing is rendered. You should simplify until you can isolate what is going wrong.
You've declared vPosition as a vec4, but in your comment, your vertex position array only has 12 floats (3 per vertex). a vec4 needs 4 floats per vertex. this is also a problem here:
glVertexAttribPointer(mvPositionHandle, 3, GL_FLOAT, GL_FALSE, 0, quadverts);
You use 3 as the second parameter in the line above, but vec4 in your shader means use 4 here. Be sure that quadVerts holds 4 floats per vertex!
To debug, don't use a projection matrix - just render your geometry directly to clip-coordinates in your shader like so:
attribute vec4 vPosition;
uniform mat4 projMatrix;
void main() {
gl_Position = vPosition;
};
If the above shader renders (after you fix your vec4 problem), then you can start to debug your projection matrix.
Does your glClear() call actually take effect? try changing the color with glClearColor() and make sure the background color changes accordingly. If it doesn't, you might have set up your GL window incorrectly.
This question is 4 months old and I don't know if you still need an aswer.
I also had a lot of problems with orto projection matrices in OpenGL, but this works (for me):
result[0] = 2.0 / (right - left);
result[1] = 0.0;
result[2] = 0.0;
result[3] = 0.0;
result[4] = 0.0;
result[5] = 2.0 / (top - bottom);
result[6] = 0.0;
result[7] = 0.0;
result[8] = 0.0;
result[9] = 0.0;
result[10] = (1.0f / (near - far));
result[11] = 0.0;
result[12] = ((left + right) / (left - right));
result[13] = ((top + bottom) / (bottom - top));
result[14] = (near / (near - far));
result[15] = 1;
I've just stated learning Opengl ES 2.0 in android and I run into a problem that i don't know how to solve.
I want to create a large plane field, I created it, put on texture but here comes my problem
It doesn't draw all of it it only displays about 10 unit on the Z axe.. X i fine.
So i want to create a big square but it displays a rectangle. It is like someone took a scissors and cut it off a certaint Z coordinate.
I don't even know what part of my code should i put it here, shader ? plane coordinates ? camera settings ?
Thank you for your patient.
It sounds like your plane is getting clipped by the frustum or viewing volume. That is typically set by either glOrtho() or glPerspective(). Try increasing the distance between the near and far plane parameters to these functions.
If you are relying on a default frustum provided by Android, you may have to construct your own frustum, which would look something like this for glOrtho():
typedef struct
{
float f0;
float f1;
float f2;
float f3;
float f4;
float f5;
float f6;
float f7;
float f8;
float f9;
float f10;
float f11;
float f12;
float f13;
float f14;
float f15;
} Mat4;
void Ortho(Mat4 * pMat4, float left, float top, float right, float bottom, float nearPlane, float farPlane)
{
float rcplmr = 1.0f / (left - right);
float rcpbmt = 1.0f / (bottom - top);
float rcpnmf = 1.0f / (nearPlane - farPlane);
pMat4->f0 = -2.0f * rcplmr;
pMat4->f1 = 0.0f;
pMat4->f2 = 0.0f;
pMat4->f3 = 0.0f;
pMat4->f4 = 0.0f;
pMat4->f5 = -2.0f * rcpbmt;
pMat4->f6 = 0.0f;
pMat4->f7 = 0.0f;
pMat4->f8 = 0.0f;
pMat4->f9 = 0.0f;
pMat4->f10 = -2.0f * rcpnmf;
pMat4->f11 = 0.0f;
pMat4->f12 = (right + left) * rcplmr;
pMat4->f13 = (top + bottom) * rcpbmt;
pMat4->f14 = (nearPlane + farPlane) * rcpnmf;
pMat4->f15 = 1.0f;
}
I'm implementing cubic bezier curve logic in my one of Android Application.
I've implemented cubic bezier curve code on canvas in onDraw of custom view.
// Path to draw cubic bezier curve
Path cubePath = new Path();
// Move to startPoint(200,200) (P0)
cubePath.moveTo(200,200);
// Cubic to with ControlPoint1(200,100) (C1), ControlPoint2(300,100) (C2) , EndPoint(300,200) (P1)
cubePath.cubicTo(200,100,300,100,300,200);
// Draw on Canvas
canvas.drawPath(cubePath, paint);
I visualize above code in following image.
[Updated]
Logic for selecting first control points, I've taken ,
baseX = 200 , baseY = 200 and curve_size = X of Endpoint - X of Start Point
Start Point : x = baseX and y = baseY
Control Point 1 : x = baseX and y = baseY - curve_size
Control Point 2 : x = baseX + curve_size and y = baseY - curve_size
End Point : x = baseX + curve_size and y = baseY
I want to allow user to change EndPoint of above curve, and based on the new End points, I invalidate the canvas.
But problem is that, Curve maintain by two control points, which needs to be recalculate upon the change in EndPoint.
Like, I just want to find new Control Points when EndPoint change from (300,200) to (250,250)
Like in following image :
Please help me to calculate two new Control Points based on new End Point that curve shape will maintain same as previous end point.
I refer following reference links during searching:
http://pomax.github.io/bezierinfo/
http://jsfiddle.net/hitesh24by365/jHbVE/3/
http://en.wikipedia.org/wiki/B%C3%A9zier_curve
http://cubic-bezier.com/
Any reference link also appreciated in answer of this question.
changing the endpoint means two things, a rotation along P1 and a scaling factor.
The scaling factor (lets call it s) is len(p1 - p0) / len(p2 - p0)
For the rotation factor (lets call it r) i defer you to Calculating the angle between three points in android , which also gives a platform specific implementation, but you can check correctness by scaling/rotationg p1 in relation to p0, and you should get p2 as a result.
next, apply scaling and rotation with respect to p0 to c1 and c2. for convenience i will call the new c1 'd1' and the new d2.
d1 = rot(c1 - p0, factor) * s + p0
d2 = rot(c2 - p0, factor) * s + p0
to define some pseudocode for rot() (rotation http://en.wikipedia.org/wiki/Rotation_%28mathematics%29)
rot(point p, double angle){
point q;
q.x = p.x * cos(angle) - p.y * sin(angle);
q.y = p.x * sin(angle) + p.y * cos(angle);
}
Your bezier curve is now scaled and rotated in relation to p0, with p1 changed to p2,
Firstly I would ask you to look into following articles :
Bezier Curves
Why B-Spline Curve
B-Spline Curve Summary
What you are trying to implement is a piecewise composite Bézier curve. From the Summary page for n control points (include start/end) you get (n - 1)/3 piecewise Bézier curves.
The control points shape the curve literally. If you don't give proper control points with new point, you will not be able to create smoothly connected bezier curve. Generating them will not work, as it is too complex and there is no universally accepted way.
If you don't have/want to give extra control points, you should use Catmull-Rom spline, which passes through all control points and will be C1 continous (derivative is continuous at any point on curve).
Links for Catmull Rom Spline in java/android :
http://hawkesy.blogspot.in/2010/05/catmull-rom-spline-curve-implementation.html
https://github.com/Dongseob-Park/catmull-rom-spline-curve-android
catmull-rom splines for Android (similar to your question)
Bottom line is if you don't have the control points don't use cubic bezier curve. Generating them is a problem not the solution.
It seems that you are here rotating and scaling a square where you know the bottom two points and need to calculate the other two. The two known points form two triangles with the other two, so we just need to find the third point in a triangle. Supose the end point is x1, y1:
PointF c1 = calculateTriangle(x0, y0, x1, y1, true); //find left third point
PointF c2 = calculateTriangle(x0, y0, x1, y1, false); //find right third point
cubePath.reset();
cubePath.moveTo(x0, y0);
cubePath.cubicTo(c1.x, c1.y, c2.x, c2.y, x1, y1);
private PointF calculateTriangle(float x1, float y1, float x2, float y2, boolean left) {
PointF result = new PointF(0,0);
float dy = y2 - y1;
float dx = x2 - x1;
float dangle = (float) (Math.atan2(dy, dx) - Math.PI /2f);
float sideDist = (float) Math.sqrt(dx * dx + dy * dy); //square
if (left){
result.x = (int) (Math.cos(dangle) * sideDist + x1);
result.y = (int) (Math.sin(dangle) * sideDist + y1);
}else{
result.x = (int) (Math.cos(dangle) * sideDist + x2);
result.y = (int) (Math.sin(dangle) * sideDist + y2);
}
return result;
}
...
There is other way to do this where it does not matter how many points you have in between the first and the last point in the path or event its shape.
//Find scale
Float oldDist = (float) Math.sqrt((x1 - x0) * (x1 - x0) + (y1 - y0) * (y1 - y0));
Float newDist = (float) Math.sqrt((x2 - x0) * (x2 - x0) + (y2 - y0) * (y2 - y0));
Float scale = newDist/oldDist;
//find angle
Float oldAngle = (float) (Math.atan2(y1 - y0, x1 - x0) - Math.PI /2f);
Float newAngle = (float) (Math.atan2(y2 - y0, x2 - x0) - Math.PI /2f);
Float angle = newAngle - oldAngle;
//set matrix
Matrix matrix = new Matrix();
matrix.postScale(scale, scale, x0, y0);
matrix.postRotate(angle, x0, y0);
//transform the path
cubePath.transform(matrix);
A small variant on the suggestion by Lumis
// Find scale
Float oldDist = (float) Math.sqrt((x1 - x0) * (x1 - x0) + (y1 - y0) * (y1 - y0));
Float newDist = (float) Math.sqrt((x2 - x0) * (x2 - x0) + (y2 - y0) * (y2 - y0));
Float scale = newDist/oldDist;
// Find angle
Float oldAngle = (float) (Math.atan2(y1 - y0, x1 - x0));
Float newAngle = (float) (Math.atan2(y2 - y0, x2 - x0));
Float angle = newAngle - oldAngle;
Matrix matrix = new Matrix();
matrix.postScale(scale, scale);
matrix.postRotate(angle);
float[] p = { c1.x, c1.y, c2.x, c2.y };
matrix.mapVectors(p);
PointF newC1 = new PointF(p[0], p[1]);
PointF newC2 = new PointF(p[2], p[3]);
I m woring on an android opengl 1.1 2d game with a top view on a vehicule and a camera zoom relative to the vehicule speed. When the speed increases the camera zoom out to offer the player a best road visibility.
I have litte trouble finding the exact way to detect if a sprite is visible or not regarding his position and the current camera zoom.
Important precision, all of my game's objects are on the same z coord. I use 3d just for camera effect. (that's why I do not need frustrum complicated calculations)
here is a sample of my GLSurfaceView.Renderer class
public static float fov_degrees = 45f;
public static float fov_radians = fov_degrees / 180 * (float) Math.PI;
public static float aspect; //1.15572 on my device
public static float camZ; //927 on my device
#Override
public void onSurfaceChanged(GL10 gl, int x, int y) {
aspect = (float) x / (float) y;
camZ = y / 2 / (float) Math.tan(fov_radians / 2);
Camera.MINIDECAL = y / 4; // minimum cam zoom out (192 on my device)
if (x == 0) { // Prevent A Divide By Zero By
x = 1; // Making Height Equal One
}
gl.glViewport(0, 0, x, y); // Reset The Current Viewport
gl.glMatrixMode(GL10.GL_PROJECTION); // Select The Projection Matrix
gl.glLoadIdentity(); // Reset The Projection Matrix
// Calculate The Aspect Ratio Of The Window
GLU.gluPerspective(gl, fov_degrees, aspect , camZ / 10, camZ * 10);
GLU.gluLookAt(gl, 0, 0, camZ, 0, 0, 0, 0, 1, 0); // move camera back
gl.glMatrixMode(GL10.GL_MODELVIEW); // Select The Modelview Matrix
gl.glLoadIdentity(); // Reset The Modelview Matrix
when I draw any camera relative object I use this translation method :
gl.glTranslatef(position.x - camera.centerPosition.x , position.y -camera.centerPosition.y , - camera.zDecal);
Eveything is displayed fine, the problem comes from my physic thread when he checks if an object is visible or not:
public static boolean isElementVisible(Element element) {
xDelta = (float) ((camera.zDecal + GameRenderer.camZ) * GameRenderer.aspect * Math.atan(GameRenderer.fov_radians));
yDelta = (float) ((camera.zDecal + GameRenderer.camZ)* Math.atan(GameRenderer.fov_radians));
//(xDelta and yDelta are in reallity updated only ones a frame or when camera zoom change)
Camera camera = ObjectRegistry.INSTANCE.camera;
float xMin = camera.centerPosition.x - xDelta/2;
float xMax = camera.centerPosition.x + xDelta/2;
float yMin = camera.centerPosition.y - yDelta/2;
float yMax = camera.centerPosition.y + yDelta/2;
//xMin and yMin are supposed to be the lower bounds x and y of the visible plan
// same for yMax and xMax
// then i just check that my sprite is visible on this rectangle.
Vector2 phD = element.getDimToTestIfVisibleOnScreen();
int sizeXd2 = (int) phD.x / 2;
int sizeYd2 = (int) phD.y / 2;
return (element.position.x + sizeXd2 > xMin)
&& (element.position.x - sizeXd2 < xMax)
&& (element.position.y - sizeYd2 < yMax)
&& (element.position.y + sizeYd2 > yMin);
}
Unfortunately the object were disapearing too soon and appearing to late so i manuelly added some zoom out on the camera for test purpose.
I did some manual test and found that by adding approx 260 to the camera z index while calculating xDelta and yDelta it, was good.
So the line is now :
xDelta = (float) ((camera.zDecal + GameRenderer.camZ + 260) * GameRenderer.aspect * Math.atan(GameRenderer.fov_radians));
yDelta = (float) ((camera.zDecal + GameRenderer.camZ + 260)* Math.atan(GameRenderer.fov_radians));
Because it's a hack and the magic number may not work on every device I would like to understand what i missed. I guess there is something in that "260" magic number that comes from the fov or ration width/height and that could be set as a formula parameter for pixel perfect detection.
Any guess ?
My guess is that you should be using Math.tan(GameRenderer.fov_radians) instead of Math.atan(GameRenderer.fov_radians).
Reasoning:
If you used a camera with 90 degree fov, then xDelta and yDelta should be infinitely large, right? Since the camera would have to view the entire infinite plane.
tan(pi/2) is infinite (and negative infinity). atan(pi/2) is merely 1.00388...
tan(pi/4) is 1, compared to atan(pi/4) of 0.66577...
I want to animate a 2d Sprite sheet. I hava a sprite sheet with a lot of character animation with different frame size. For a single animation, I scale a vertex to fit one frame and then change Texture position for animation. Works pretty well for one animation, but when switching to another animation with different frame size and scale vertex and fitting texture again, I get side effect where texture is stretcht and not fitting, it is just on one animation frame, but make the change between two animations look very bad.
I think, that is because of the vertex-size change. So my idea ist, to have a fixed vertex size and fit the texture without strechting it to the full vertex (height for every animation is fixed).
Maybe a image will help, so I created one:
Here is my code, hope it is enough:
public boolean nextFrame() {
float textureWidth = textureMap()[currentAnimation][0];
float frameCount = textureMap()[currentAnimation][1];
float frameWidth = textureWidth / frameCount;
if (loop) {
if (currentFrame == frameCount)
currentFrame = 0;
} else {
if (currentFrame == frameCount) {
setAnimation(AnimationConstants.IDLE);
loop = true;
return false;
}
}
float x_left = (float) currentFrame * frameWidth / textureWidth;
float x_right = (float) (currentFrame * frameWidth + frameWidth)
/ textureWidth;
texture[0] = x_left; // top left x
texture[1] = 1.0f; // top left y
texture[2] = x_left; // bottom left x
texture[3] = 0.0f; // bottom left y
texture[4] = x_right; // top right x
texture[5] = 1.0f; // top right y
texture[6] = x_right; // bottom right x
texture[7] = 0.0f; // bottom right y
ByteBuffer byteBuffer = ByteBuffer.allocateDirect(texture.length * 4);
byteBuffer.order(ByteOrder.nativeOrder());
textureBuffer = byteBuffer.asFloatBuffer();
textureBuffer.put(texture);
textureBuffer.position(0);
currentFrame++;
return true;
}
private void newVertex() {
float textureWidth = textureMap()[currentAnimation][0];
float frameCount = textureMap()[currentAnimation][1];
float frameWidth = textureWidth / frameCount;
float width = (float) frameWidth / (float) frameHeight;
vertices[0] = pos_x; // bottom left x
vertices[1] = pos_y; // bottom left y
vertices[3] = pos_x; // top left x
vertices[4] = pos_y + (1.0f * scale); // top left y
vertices[6] = pos_x + (width * scale); // bottom right x
vertices[7] = pos_y; // bottom right y
vertices[9] = pos_x + (width * scale); // top right x
vertices[10] = pos_y + (1.0f * scale); // top right y
// z values
vertices[2] = -0.2f; // bottom left z
vertices[5] = -0.2f; // top left z
vertices[8] = -0.2f; // bottom right z
vertices[11] = -0.2f; // top right z
ByteBuffer byteBuffer = ByteBuffer.allocateDirect(vertices.length * 4);
byteBuffer.order(ByteOrder.nativeOrder());
vertexBuffer = byteBuffer.asFloatBuffer();
vertexBuffer.put(vertices);
vertexBuffer.position(0);
}
so for every new animation, I call newVertex().
Check this out.
I suppose you should use the same general idea. If needed I can describe in detail how to place texture correctly in your case.