I'm new to OpenGL.
My goal is to work on an alpha video in a OpenGL structure in a textureview.
I started with Video Effects and try to modify some colors (for start: black to transparent) using a custom [EDITED] shader:
#extension GL_OES_EGL_image_external : require
precision mediump float;
uniform samplerExternalOES sTexture;
vec3 first;
vec4 second;
varying vec2 vTextureCoord;
vec2 oTextureCoord;
void main() {
first[0] = 0.0;
first[1] = 0.0;
first[2] = 0.0;
second[0] = 0.0;
second[1] = 1.0;
second[2] = 0.0;
second[3] = 1.0;
vec4 color = texture2D(sTexture, vTextureCoord);
oTextureCoord = vec2(vTextureCoord.x ,vTextureCoord.y + 0.5);
vec4 color2 = texture2D(sTexture, oTextureCoord);
if(vTextureCoord.y < 0.5){
gl_FragColor = vec4(color.r , color.g, color.b, color2.b);
}else{
gl_FragColor = color;
}
But I never saw the background under the view.
After some research I added
GLES20.glEnable(GLES20.GL_BLEND);
But now the "transparent color" is a green one :/
To add more information: the source video is a combination of colored frames on the upper part and the alpha mask on the bottom one.
Am I doing it wrong?
thanks by advance.
[EDIT]
Here is my actual onDrawFame method:
#Override
public void onDrawFrame(GL10 glUnused) {
synchronized (this) {
if (updateSurface) {
mSurface.updateTexImage();
mSurface.getTransformMatrix(mSTMatrix);
updateSurface = false;
}
}
GLES20.glEnable(GLES20.GL_BLEND);
GLES20.glBlendFunc(GLES20.GL_SRC_ALPHA, GLES20.GL_ONE_MINUS_SRC_ALPHA);
GLES20.glClearColor(1.0f,1.0f, 1.0f, 0.0f);
GLES20.glClear(GLES20.GL_DEPTH_BUFFER_BIT
| GLES20.GL_COLOR_BUFFER_BIT);
GLES20.glUseProgram(mProgram);
checkGlError("glUseProgram");
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GL_TEXTURE_EXTERNAL_OES, mTextureID[0]);
mTriangleVertices.position(TRIANGLE_VERTICES_DATA_POS_OFFSET);
GLES20.glVertexAttribPointer(maPositionHandle, 3, GLES20.GL_FLOAT,
false, TRIANGLE_VERTICES_DATA_STRIDE_BYTES,
mTriangleVertices);
checkGlError("glVertexAttribPointer maPosition");
GLES20.glEnableVertexAttribArray(maPositionHandle);
checkGlError("glEnableVertexAttribArray maPositionHandle");
mTriangleVertices.position(TRIANGLE_VERTICES_DATA_UV_OFFSET);
GLES20.glVertexAttribPointer(maTextureHandle, 3, GLES20.GL_FLOAT,
false, TRIANGLE_VERTICES_DATA_STRIDE_BYTES,
mTriangleVertices);
checkGlError("glVertexAttribPointer maTextureHandle");
GLES20.glEnableVertexAttribArray(maTextureHandle);
checkGlError("glEnableVertexAttribArray maTextureHandle");
Matrix.setIdentityM(mMVPMatrix, 0);
GLES20.glUniformMatrix4fv(muMVPMatrixHandle, 1, false, mMVPMatrix,
0);
GLES20.glUniformMatrix4fv(muSTMatrixHandle, 1, false, mSTMatrix, 0);
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
checkGlError("glDrawArrays");
GLES20.glEnable(GLES20.GL_BLEND);
GLES20.glBlendFunc(GLES20.GL_SRC_ALPHA, GLES20.GL_ONE_MINUS_SRC_ALPHA);
GLES20.glFinish();
}
--[EDIT]--
Here is my vertexShader :
uniform mat4 uMVPMatrix;
uniform mat4 uSTMatrix;
attribute vec4 aPosition;
attribute vec4 aTextureCoord;
varying vec2 vTextureCoord
void main() {
gl_Position = uMVPMatrix * aPosition;
vTextureCoord = (uSTMatrix * aTextureCoord).xy;
}
To improve my question here is some details.
Here is my source:
And here is my goal (alpha chanel):
when I let clear color with :
GLES20.glClearColor(1f, 1f, 0.9f, 0f);
There is also the yellow background as you can see ! How can I got a transparent view to see the blue one on this picture?
[ANSWER : HOW TO RESOLVE]
1.First, You need to set your GLSurfaceView to allow Transparency in your constructor as:
this.getHolder().setFormat(PixelFormat.RGB_565);
this.getHolder().setFormat(PixelFormat.TRANSPARENT);
setEGLConfigChooser(8,8,8,8,16,0);
setEGLContextClientVersion(2);// all this before setRenderer()
2.Ask for transparency in your onDrawFrame method :
GLES20.glClearColor(0.0f, 0.0f, 0.0f, .0f);//
GLES20.glClear(GLES20.GL_DEPTH_BUFFER_BIT|GLES20.GL_COLOR_BUFFER_BIT);
GLES20.glEnable(GLES20.GL_BLEND);
GLES20.glBlendFunc(GLES20.GL_SRC_ALPHA, GLES20.GL_ONE_MINUS_SRC_ALPHA);
For me only black transparent color worked rgba(0,0,0,0).
Thanks again for contributors who helped me.
--[EDIT : REMAINING PROBLEM]--
Here some exemple :
On top it's an GLSurfaceView with an alpha video and down a custom GLTextureView, both are in an horizontalView.
Top appears to be good as I want! But check this when I scroll to right:
Top still appear when bottom hide as it needed!
Just a quick look through your code seems to already have quite a few strange peaces. I wish you included your vertex data (the position and texture coordinates) at least. First please check the line
GLES20.glVertexAttribPointer(maTextureHandle, 3, GLES20.GL_FLOAT,
false, TRIANGLE_VERTICES_DATA_STRIDE_BYTES,
mTriangleVertices);
These look like texture coordinates and you are having 3 dimensions? Is that correct? Fortunately if the stride is correct the dimensions will make no effect but still.
So if I understand that the top part of the video frame represents the color and the bottom does the alpha then your shader makes little sense to me. Could you show the vertex shader as well? Anyway what I would expect you to do is use the texture coordinates in range [0, 1], representing the whole texture. Then in the vertex shader have 2 varying vec2 texture coordinates such that:
varying vec2 colorTextureCoordinates = vec2(textureCoordinates.x, textureCoordinates.y*.5); // representing the top part
varying vec2 alphaTextureCoordinates = vec2(textureCoordinates.x, textureCoordinates.y*.5 + .5); // representing the bottom part
Then in the fragment shader all you do is mix the 2:
vec3 color = texture2D(sTexture, colorTextureCoordinates).rgb;
float alpha = texture2D(sTexture, alphaTextureCoordinates).b; // Why .b?
gl_FragColor = vec4(color, alpha);
Or am I missing something? Why is there an IF statement in your fragment shader that suggest that the top of the texture would use the color and the bottom part would use the color plus the alpha (or the other way around).
I suggest you first try to draw the image you receive from the video with the most common texture shader to confirm the whole frame is being drawn correctly. Then you should check the alpha is working correctly by feeding the semitransparent shader gl_FragColor = vec4(color.rgb, .5). After you confirm both of these produce expected results you should try drawing only the top part of the frame (the color) by using colorTextureCoordinates to get the expected results (the transparent parts should be black). Then check the alpha part by using alphaTextureCoordinates and gl_FragColor = vec4(alpha, alpha, alpha, 1.0) which should produce a grayscale image where white is a full alpha and black is no alpha. After all of these are confirmed try putting it all together as written above.
To make the surface view blend with other views you need to set the transparency and you need to fill the transparent pixels with the transparent color.
Since you are using blend you will need to clear the background to clear color (.0f, .0f, .0f, .0f). Simply disabling the blend should produce the same result in your case.
After doing so you will still see a black background because the surface view will not implicitly blend with other views (the background) and only RGB part is used to fill the rendered view. To enable the transparency you should take a look at some other answers and also this one.
Related
I am currently rendering a camera preview using GL ES 2.0 on android to a SurfaceTexture, rendering it with opengl, then transferring it to a media codec's input surface to for recording. It is displayed to the user in a surface view and by setting that surface view's aspect ratio the camera preview is not distorted based on screen size.
The recording is in portrait, but at some point the incoming texture will start coming in landscape, at which point I'd like to zoom out and display it as a "movie" stretched wide to fit to the edge of the screen horizonatally with black bars on the top and bottom to maintain the aspect ratio of the texture.
The drawing code in onDrawFrame is pretty simple. The link has the rest of the setup code for shaders and the like but it's just setting up a triangle strip to draw.
private final float[] mTriangleVerticesData = {
// X, Y, Z, U, V
-1.f, -1.f, 0, 0.f, 0.f,
1.f, -1.f, 0, 1.f, 0.f,
-1.f, 1.f, 0, 0.f, 1.f,
1.f, 1.f, 0, 1.f, 1.f,
};
public static final String VERTEX_SHADER =
"uniform mat4 uMVPMatrix;\n" +
"uniform mat4 uSTMatrix;\n" +
"attribute vec4 aPosition;\n" +
"attribute vec4 aTextureCoord;\n" +
"varying vec2 vTextureCoord;\n" +
"void main() {\n" +
" gl_Position = uMVPMatrix * aPosition;\n" +
" vTextureCoord = (uSTMatrix * aTextureCoord).xy;\n" +
"}\n";
private float[] mMVPMatrix = new float[16];
private float[] mSTMatrix = new float[16];
public TextureManager() {
mTriangleVertices = ByteBuffer.allocateDirect(
mTriangleVerticesData.length * FLOAT_SIZE_BYTES)
.order(ByteOrder.nativeOrder()).asFloatBuffer();
mTriangleVertices.put(mTriangleVerticesData).position(0);
mTriangleHalfVertices = ByteBuffer.allocateDirect(
mTriangleVerticesHalfData.length * FLOAT_SIZE_BYTES)
.order(ByteOrder.nativeOrder()).asFloatBuffer();
mTriangleHalfVertices.put(mTriangleVerticesHalfData).position(0);
Matrix.setIdentityM(mSTMatrix, 0);
}
onDrawFrame(){
mSurfaceTexture.getTransformMatrix(mSTMatrix);
GLES20.glUseProgram(mProgram);
checkGlError("glUseProgram");
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, 0);
GLES20.glBindTexture(GLES11Ext.GL_TEXTURE_EXTERNAL_OES, mTextureID);
mTriangleVertices.position(TRIANGLE_VERTICES_DATA_POS_OFFSET);
GLES20.glVertexAttribPointer(maPositionHandle, 3, GLES20.GL_FLOAT, false,
TRIANGLE_VERTICES_DATA_STRIDE_BYTES, mTriangleVertices);
checkGlError("glVertexAttribPointer maPosition");
GLES20.glEnableVertexAttribArray(maPositionHandle);
checkGlError("glEnableVertexAttribArray maPositionHandle");
mTriangleVertices.position(TRIANGLE_VERTICES_DATA_UV_OFFSET);
GLES20.glVertexAttribPointer(maTextureHandle, 2, GLES20.GL_FLOAT, false,
TRIANGLE_VERTICES_DATA_STRIDE_BYTES, mTriangleVertices);
checkGlError("glVertexAttribPointer maTextureHandle");
GLES20.glEnableVertexAttribArray(maTextureHandle);
checkGlError("glEnableVertexAttribArray maTextureHandle");
Matrix.setIdentityM(mMVPMatrix, 0);
GLES20.glUniformMatrix4fv(muMVPMatrixHandle, 1, false, mMVPMatrix, 0);
GLES20.glUniformMatrix4fv(muSTMatrixHandle, 1, false, mSTMatrix, 0);
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
checkGlError("glDrawArrays");
GLES20.glFinish();`
}
Things I've tried that haven't quite worked: Scaling mMVPMatrix or mSTMatrix to zoom in. I can zoom in, so I can get the "center slice" of the landscape video to display without distortion, but this cuts off a huge 40% portion of the video, so it isn't a great solution. Zooming out by scaling these matricies causes the texture to repeat the pixel on the edge because of the clamp to edge behavior.
Halving the x,y,z parts of mTriangleVerticesData gives some of the desired behavior as seen in the screenshot below, exact aspect ratio aside. The center part of the picture is halved and centered, as expected. However, the texture is repeated to the left, right, and bottom, and there is distortion to the top left. What I want is the center to be as it is, with black/nothing surrounding it.
I could scale out then translate mMVPMatrix or mSTMatrix and then change my shader to display black for anything outside (0,1) but eventually I want to overlay multiple textures on top of one another, like a full size background and partial size foreground texture. To do this I must eventually figure out how to only display a texture in a portion of the available space, not just manipulate the texture so it looks like that's what's happening.
Thanks for reading all that. Any help, suggestions, or wild guesses are appreciated.
The repeated image chunks look like GPU tiling artifacts, not texture repeating. Add a glClear() call to erase the background.
It would seem what you are looking for is a "fit" system to get the correct frame of your element. It would mean for instance you are having a 100x200 image and you want to display it in a frame of 50x50. The result should then be seeing the whole image in rectangle (25, 0, 25, 50). So the resulting frame must respect the image ratio (25/50 = 100/200) and original frame boundaries must be respected.
To achieve this generally you need to compare the image ratio and the target frame ratio: imageRatio = imageWidth/imageHeight and frameRatio = frameWidth/frameHeight. Then if image ratio is larger then the frame ratio it means you need black borders on top and bottom while if the frame ratio is larger then you will see black borders on left and right side.
So to compute the target frame:
imageRatio = imageWidth/imageHeight
frameRatio = frameWidth/frameHeight
if(imageRatio > frameRatio) {
targetFrame = {0, (frameHeight-frameWidth/imageRatio)*.5, frameWidth, frameWidth/imageRatio} // frame as: {x, y, width, height}
}
else {
targetFrame = {(frameWidth-frameHeight*imageRatio)*.5, 0, frameHeight*imageRatio, frameHeight} // frame as: {x, y, width, height}
}
In your case the image width and height are the ones received from the stream; frame width and height are from your target frame which seems to be a result from matrices but for full screen case that would simply be values from glOrtho if you use it. The target frame should then be used to construct the vertices positions so you get exactly correct vertex data to display the full texture.
I see you use matrices to do all the computation in your case and the same algorithm may be used to be converted to matrix but I discourage you to do so. You seem to be over-abusing matrices which makes your code completely unmaintainable. I suggest in your case you keep to "ortho" projection matrix, use frames to draw textures and only use matrix scale and translations where it makes sense to do so.
I'm rendering some super resolution nexrad radar data with a GLSurfaceView. There are 1377156 vertices in my test file. XY with type GL_FLOAT for the position, RGBA with type GL_UNSIGNED_BYTE for the color. 12 bytes total per vertex.
On the new Moto X, everything looks exactly as intended. But on a Nexus 7, it only renders about 3/4 of the triangles in the correct place. There are also some triangles that are showing up where they aren't supposed to.
Nexus 7 : http://i.imgur.com/cRvpa5I.png
Moto X 2014: http://i.imgur.com/YhX5AgU.png
GLES20.glGetError returns GLES20.GL_NO_ERROR. I have an iOS version sharing approximately the same rendering code and vertex/shader program with the exact same test file that works too (on iPad 2).
Draw code
if(renderVertices) {
int numVertices = 1377156;
if(loadVertices){
GLES20.glGenBuffers(1, radarVBO, 0);
GLES20.glGenBuffers(1, colorVBO, 0);
myBuffer.position(0);
GLES20.glBindBuffer(GLES20.GL_ARRAY_BUFFER, radarVBO[0]);
GLES20.glBufferData(GLES20.GL_ARRAY_BUFFER, numVertices*8 , myBuffer, GLES20.GL_STATIC_DRAW);
colorBuffer.position(0);
GLES20.glBindBuffer(GLES20.GL_ARRAY_BUFFER, colorVBO[0]);
GLES20.glBufferData(GLES20.GL_ARRAY_BUFFER, numVertices*4 , colorBuffer, GLES20.GL_STATIC_DRAW);
loadVertices = false;
}
GLES20.glBindBuffer(GLES20.GL_ARRAY_BUFFER, radarVBO[0]);
GLES20.glVertexAttribPointer(positionHandle, VERTEX_POS_SIZE, GLES20.GL_FLOAT, false, 0, 0);
GLES20.glBindBuffer(GLES20.GL_ARRAY_BUFFER, colorVBO[0]);
GLES20.glVertexAttribPointer(colorHandle, VERTEX_COLOR_SIZE, GLES20.GL_UNSIGNED_BYTE, true, 0,0);
GLES20.glDrawArrays(GLES20.GL_TRIANGLES, 0,numVertices);
}
Fragment program
varying lowp vec4 colorVarying;
void main()
{
gl_FragColor = colorVarying.rgba;
}
Vertex program
attribute vec2 position;
attribute vec4 color;
varying lowp vec4 colorVarying;
uniform mat4 modelViewProjectionMatrix;
void main()
{
colorVarying = color;
gl_Position = modelViewProjectionMatrix * vec4(position, 0.0, 1.0);
}
The problem is that the result of the FBO copy is filled with whatever pixel is at texture coordinate 0,0 of the source texture.
If I edit the shader to render a gradient based on texture coordinate position, the fragment shader fills the whole result as if it had texture coordinate 0, 0 fed into it.
If I edit the triangle strip vertices, things behave as expected, so I think the camera and geometry is setup right. It's just that the 2-tri quad is all the same color when it should reflect either my input texture or at least my position-gradient shaders!
I've ported this code nearly line for line from a working iOS example.
This is running alongside Unity3D, so don't assume any GL settings are default, as the engine is likely fiddling with them before my code starts.
Here's the FBO copy operation
GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, mFrameBuffer);
checkGlError("glBindFramebuffer");
GLES20.glViewport(0, 0, TEXTURE_WIDTH*4, TEXTURE_HEIGHT*4);
checkGlError("glViewport");
GLES20.glDisable(GLES20.GL_BLEND);
GLES20.glDisable(GLES20.GL_DEPTH_TEST);
GLES20.glDepthMask(false);
GLES20.glDisable(GLES20.GL_CULL_FACE);
GLES20.glBindBuffer(GLES20.GL_ARRAY_BUFFER, 0);
GLES20.glBindBuffer(GLES20.GL_ELEMENT_ARRAY_BUFFER, 0);
GLES20.glPolygonOffset(0.0f, 0.0f);
GLES20.glDisable(GLES20.GL_POLYGON_OFFSET_FILL);
checkGlError("fbo setup");
// Load the shaders if we have not done so
if (mProgram <= 0) {
createProgram();
Log.i(TAG, "InitializeTexture created program with ID: " + mProgram);
if (mProgram <= 0)
Log.e(TAG, "Failed to initialize shaders!");
}
// Set up the program
GLES20.glUseProgram(mProgram);
checkGlError("glUseProgram");
GLES20.glUniform1i(mUniforms[UNIFORM_TEXTURE], 0);
checkGlError("glUniform1i");
// clear the scene
GLES20.glClearColor(0.0f,0.0f, 0.1f, 1.0f);
checkGlError("glClearColor");
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
// Bind out source texture
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
checkGlError("glActiveTexture");
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, mSourceTexture);
checkGlError("glBindTexture");
GLES20.glFrontFace( GLES20.GL_CW );
// Our object to render
ByteBuffer imageVerticesBB = ByteBuffer.allocateDirect(8 * 4);
imageVerticesBB.order(ByteOrder.nativeOrder());
FloatBuffer imageVertices = imageVerticesBB.asFloatBuffer();
imageVertices.put(new float[]{
-1.0f, -1.0f,
1.0f, -1.0f,
-1.0f, 1.0f,
1.0f, 1.0f}
);
imageVertices.position(0);
// The object's texture coordinates
ByteBuffer textureCoordinatesBB = ByteBuffer.allocateDirect(8 * 4);
imageVerticesBB.order(ByteOrder.nativeOrder());
FloatBuffer textureCoordinates = textureCoordinatesBB.asFloatBuffer();
textureCoordinates.put(new float[]{
0.0f, 1.0f,
1.0f, 1.0f,
0.0f, 0.0f,
1.0f, 0.0f}
);
textureCoordinates.position(0);
// Update attribute values.
GLES20.glEnableVertexAttribArray(ATTRIB_VERTEX);
GLES20.glVertexAttribPointer(ATTRIB_VERTEX, 2, GLES20.GL_FLOAT, false, 0, imageVertices);
GLES20.glEnableVertexAttribArray(ATTRIB_TEXTUREPOSITON);
GLES20.glVertexAttribPointer(ATTRIB_TEXTUREPOSITON, 2, GLES20.GL_FLOAT, false, 0, textureCoordinates);
// Draw the quad
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, 4);
If you want to dive in, I've put up a nice gist with the update loop, setup and shaders here: https://gist.github.com/acgourley/7783624
I'm checking the result of this as an Android port to UnityFBO (MIT License) so all help is both appreciated and will be shared more broadly.
The declaration of your vertex shader output and fragment shader input do not mach for the texture coordinate varying (different precision qualifiers). Ordinarily this would not be an issue, but for reasons I will discuss below using highp in your fragment shader may come back to bite you in the butt.
Vertex shader:
attribute vec4 position;
attribute mediump vec4 textureCoordinate;
varying mediump vec2 coordinate;
void main()
{
gl_Position = position;
coordinate = textureCoordinate.xy;
}
Fragment shader:
varying highp vec2 coordinate;
uniform sampler2D texture;
void main()
{
gl_FragColor = texture2D(texture, coordinate);
}
In OpenGL ES 2.0 highp is an optional feature in fragment shaders. You should not declare anything highp in a fragment shader unless GL_FRAGMENT_PRECISION_HIGH is defined by the pre-processor.
GLSL ES 1.0 Specification - 4.5.4: Available Precision Qualifiers - pp. 36
The built-in macro GL_FRAGMENT_PRECISION_HIGH is defined to one on systems supporting highp precision in the fragment language
#define GL_FRAGMENT_PRECISION_HIGH 1
and is not defined on systems not supporting highp precision in the fragment language. When defined, this macro is available in both the vertex and fragment languages. The highp qualifier is an optional feature in the fragment language and is not enabled by #extension.
The bottom line is you need to check whether the fragment shader supports highp precision before declaring something highp or re-write your declaration in the fragment shader to use mediump. I cannot see much reason for arbitrarily increasing the precision of the vertex shader coordinates in the fragment shader, I would honestly expect to see it written as highp in both the vertex shader and fragment shader or kept mediump.
EDIT: Right, fixed it :D Issue was that I was trying to set the Projection matrix before calling glUseProgram()
I'm starting out with GL ES 2.0 on Android, and am trying to migrate some of my code over from 1.1 I've defined vertex and frag shaders as per the official docs, and after some googling I understand how the Model/Projection matrices work together, yet I can't seem to get anything but a blank screen.
I'm passing in a model view matrix to my vert shader, and am multiplying it with the ortho projection before multiplying the resulting mvp matrix with the vertex position. Here are my shaders to clarify:
Vertex Shader
attribute vec3 Position;
uniform mat4 Projection;
uniform mat4 ModelView;
void main() {
mat4 mvp = Projection * ModelView;
gl_Position = mvp * vec4(Position.xyz, 1);
}
Fragment Shader
precision mediump float;
uniform vec4 Color;
void main() {
gl_FragColor = Color;
}
I'm building the projection matrix in my renderer's onSurfaceChangedFunction():
int projectionHandle = GLES20.glGetUniformLocation(shaderProg, "Projection");
Matrix.orthoM(projection, 0, -width / 2, width / 2, -height / 2, height / 2, -10, 10);
GLES20.glUniformMatrix4fv(projectionHandle, 1, false, projection, 0);
Then in my onDrawFrame(), I call each actor's draw routine, which looks like
nt positionHandle = GLES20.glGetAttribLocation(Renderer.getShaderProg(), "Position");
nt colorHandle = GLES20.glGetAttribLocation(Renderer.getShaderProg(), "Color");
nt modelHandle = GLES20.glGetUniformLocation(Renderer.getShaderProg(), "ModelView");
float[] modelView = new float[16];
Matrix.setIdentityM(modelView, 0);
Matrix.rotateM(modelView, 0, rotation, 0, 0, 1.0f);
Matrix.translateM(modelView, 0, position.x, position.y, 1.0f);
GLES20.glUniformMatrix4fv(modelHandle, 1, false, modelView, 0);
GLES20.glUniform4fv(colorHandle, 1, color.toFloatArray(), 0);
GLES20.glVertexAttribPointer(positionHandle, 3, GLES20.GL_FLOAT, false, 0, vertBuffer);
GLES20.glEnableVertexAttribArray(positionHandle);
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_STRIP, 0, vertices.length / 3);
GLES20.glDisableVertexAttribArray(positionHandle);
I realize that I can optimize this a bit, but I just want to get it to work first. The vertices are in a FloatBuffer, and centered around the origin. Any thoughts on what I am doing wrong? I've been checking my code against various tutorials and SO questions/answers, and can't see what I'm doing wrong.
Right, fixed it :D Issue was that I was trying to set the Projection matrix before calling glUseProgram()
I'm trying to draw multiple hexagons on the screen that have an alpha channel. the image is this:
So, I load the texture into the program and that's ok. When it runs, the alpha channel is blended with the background color and that's ok but, when two hexagons overlap themselves, the overlapped part becomes the color of the background! Below the picture:
Of course, this is not the effect that I expected.. I want them to overlap without this background being drawn over the other texture. Here is my code for drawing:
GLES20.glUseProgram(Program);
hVertex = GLES20.glGetAttribLocation(Program,"vPosition");
hColor = GLES20.glGetUniformLocation(Program, "vColor");
uTexture = GLES20.glGetUniformLocation(Program, "u_Texture");
hTexture = GLES20.glGetAttribLocation(Program, "a_TexCoordinate");
hMatrix = GLES20.glGetUniformLocation(Program, "uMVPMatrix");
GLES20.glVertexAttribPointer(hVertex, 3, GLES20.GL_FLOAT, false, 0, bVertex);
GLES20.glEnableVertexAttribArray(hVertex);
GLES20.glUniform4fv(hColor, 1, Color, 0);
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, hTexture);
GLES20.glUniform1i(uTexture, 0);
GLES20.glVertexAttribPointer(hTexture, 2, GLES20.GL_FLOAT, false, 0, bTexture);
GLES20.glEnableVertexAttribArray(hTexture);
GLES20.glBlendFunc(GLES20.GL_ONE, GLES20.GL_ONE_MINUS_SRC_ALPHA);
GLES20.glEnable(GLES20.GL_BLEND);
x=-1;y=0;z=0;
for (int i=0;i<10;i++) {
Matrix.setIdentityM(ModelMatrix, 0);
Matrix.translateM(ModelMatrix, 0, x, y, z);
x+=0.6f;
Matrix.multiplyMM(ModelMatrix, 0, ModelMatrix, 0, ProjectionMatrix, 0);
GLES20.glUniformMatrix4fv(hMatrix, 1, false, ModelMatrix, 0);
GLES20.glDrawElements(GLES20.GL_TRIANGLES, DrawOrder.length, GLES20.GL_UNSIGNED_SHORT, bDrawOrder);
}
GLES20.glDisable(GLES20.GL_BLEND);
GLES20.glDisableVertexAttribArray(hVertex);
}
And My fragment shader:
public final String fragmentShaderCode =
"precision mediump float;" +
"uniform vec4 vColor;" +
"uniform sampler2D u_Texture;" +
"varying vec2 v_TexCoordinate;" +
"void main() {" +
" gl_FragColor = vColor * texture2D(u_Texture, v_TexCoordinate);" +
"}";
and my renderer code:
super(context);
setEGLContextClientVersion(2);
getHolder().setFormat(PixelFormat.TRANSLUCENT);
setEGLConfigChooser(8, 8, 8, 8, 8, 8);
renderer = new GLRenderer(context);
setRenderer(renderer);
I already tried to use diferent functions on glBlendFunc but nothing seems to work.. Does Anyone knows what the problem is? I'm really lost.. If needs anymore code just ask!
Thank you!
My guess is that you need to disable the depth test when drawing these. Since they all appear at the same depth, when you draw your leftmost ring, it writes into the depth buffer for every pixel in the quad, even the transparent ones.
Then when you draw the next quad to the right, the pixels which overlap don't get drawn because they fail the depth test, so you just get a blank area where it intersects with the first quad.