I am using PBO in opengl es3 on android but after using them, performance of data unpacking has degraded. Please help me in finding fault in the code.
Background: My app consist of a module which generates images at a very fast rate. These images are uploaded to textures which are finally drawn on TextureView. Bottleneck in my app is time taken in uploading images to textures.
I decided to try PBOs to improve this perf. Below is my code:
One time initialization
CheckGLErr( glGenTextures( 1, &texName ) );
if( usePBO )
{
CheckGLErr( glGenBuffers( 1, &pboId ) );
CheckGLErr( glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pboId ) );
CheckGLErr( glBufferData( GL_PIXEL_UNPACK_BUFFER, textureSize, 0, GL_STREAM_DRAW ) );
CheckGLErr( glBindBuffer( GL_PIXEL_UNPACK_BUFFER, 0 ) );
}
glBindTexture( GL_TEXTURE_2D, texName );
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, windowWidth, windowHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0 );
glBindTexture( GL_TEXTURE_2D, 0 );
RenderLoop:
CheckGLErr(glClearColor(0, 1.0f, 0, 1));
CheckGLErr(glClear(GL_COLOR_BUFFER_BIT));
glBindTexture( GL_TEXTURE_2D, texName );
if( usePBO )
{
CheckGLErr(glBindBuffer( GL_PIXEL_UNPACK_BUFFER, pboId ));
void* pBuff = ::glMapBufferRange( GL_PIXEL_UNPACK_BUFFER, 0, textureSize , GL_MAP_WRITE_BIT );
memcpy( pBuff, buffer.get(), textureSize );
CheckGLErr(glUnmapBuffer( GL_PIXEL_UNPACK_BUFFER ));
}
//std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) );
glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, windowWidth, windowHeight, GL_RGBA, GL_UNSIGNED_BYTE, usePBO ? 0 : buffer.get() );
// Setup texture properties.
CheckGLErr(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST));
CheckGLErr(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST));
CheckGLErr(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE));
CheckGLErr(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE));
CheckGLErr( glUseProgram( programObject ) );
// Enable position vertex attribute. Its size is 3 floats corresponding for co-ordinates in three directions.
CheckGLErr(glEnableVertexAttribArray(0));
CheckGLErr(glVertexAttribPointer(0, 3, GL_FLOAT, false, 5 * sizeof(float)/*vertexStride*/, (const GLvoid *)0));
// Enable UV vertex attribute which starts after position. Hence offset = 3 floats.
CheckGLErr(glEnableVertexAttribArray(1));
CheckGLErr(glVertexAttribPointer(1, 2, GL_FLOAT, false, 5 * sizeof(float)/*vertexStride*/, (const GLvoid *)(3*sizeof(float))/*offset worth position*/));
// Setup ViewPort
CheckGLErr(glViewport(0, 0, windowWidth, windowHeight));
if(usePBO )
CheckGLErr( glBindBuffer( GL_PIXEL_UNPACK_BUFFER, 0 ) );
CheckGLErr(glDrawElements(GL_TRIANGLE_STRIP, _countof(indices), GL_UNSIGNED_SHORT, (const GLvoid *)0x00000000));
eglSwapBuffers(eglDisplay, eglWindowSurface);
Measurements Done:
On Nexus 7 (4.4.4) Adreno GPU
Screen Size = 1624 * 1200 pixels
Without PBO : 17 ms
With PBO : 18 ms (~ 5ms for buffer population and rest in glTexSubImage2D call)
On Nexus 6 (5.0)
Screen Size = 2042 * 1440 pixels
Without PBO : 6 ms
With PBO : 20 ms
My assumption was on using PBOs glTexSubImage2D should return instanteously(or at least in < 5ms) because image data is already in VRAM but in fact it is taking much longer specially on Nexus 6.
As an experiment I put std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) ); between glUnmapBuffer and glTexSubImage2d to see whether it makes any difference in time taken by only glTexSubImage2D but it remained same.
Related
I am trying to "software render" a pixel array to the screen in Android 12, using C and OpenGLES.
Relevant code - where I am expecting a 0xff0000ff (red) or 0xff00ffff (violet) rectangle:
GLbitfield *rectangle;
GLbitfield *picture;
void Renderer::render() {
for (int i = 0; i<1024*768; i++)
*(picture + i) = 0xff00ffff;
glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, 1024, 768, GL_RGB_INTEGER, GL_UNSIGNED_INT, picture);
// necessary IDK
glBlitFramebuffer( 0, 0, 1024, 768,0, 0, 1024, 768, GL_COLOR_BUFFER_BIT, GL_LINEAR);
eglSwapBuffers( display_, surface_ );
}
GLuint texid;
void Renderer::initRenderer() {
rectangle = (GLbitfield*)malloc(1024 * 768 * sizeof(GLbitfield));
picture = (GLbitfield*)malloc(1024 * 768 * sizeof(GLbitfield));
for( int i=0; i<1024*768; i++ )
*(rectangle + i ) = 0xff0000ff;
glGenTextures(1, &texid);
glBindTexture( GL_TEXTURE_2D, texid);
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, 1024, 768, 0, GL_RGBA, GL_UNSIGNED_INT, rectangle);
glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texid, 0 );
[...]
}
so yeah this compiles and runs with a black screen. I just want GL to "let me through" with my red/purple pixels so that I can change the array *picture later in my render-loop to sth cool.
Or is it kinda totally wrong or even unfeasible? Performance-wise that ARM in the phone should be more than capable to do some software trickery, IMHO..
btw, I really thank Khronos for removing glWritePixels() from GLES as this would have seemed to be the ideal solution to me. No need for some stupid texture workaround!
Thanks in advance for your expertise.
i'm trying to capture with camera and record frames as mp4 file in android Oreo.
I implemented the capture progress using NDK.
After that, i did create preview surface(GLSurfaceview) and get input surface from encoder(Mediacodec).
However, I don't know how to render captured frames to encoder's input surface.
/* This is onDrawFrame of Renderer in GLSurfaceView instance
* The native function onDrawFrame(texMatrix) is called */
#Override
public void onDrawFrame(GL10 gl) {
synchronized (lock) {
if ( frameAvailable ) {
Log.d("yoo", "Frame availablee...updating");
surfaceTexture.updateTexImage();
surfaceTexture.getTransformMatrix(texMatrix);
frameAvailable = false;
}
}
onDrawFrame(texMatrix);
}
/* This is drawing function in NDK part that is executed when onDrawFrame(texMatrix) is called */
static void drawFrame ( JNIEnv* env, jfloatArray texMatArray )
{
LOGD("DrawFrame called");
glClear ( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT );
glClearColor ( 0, 0, 0, 1 );
glUseProgram ( prog );
// Configure main transformations
float mvp[] = {
1.0f, 0, 0, 0,
0, 1.0f, 0, 0,
0, 0, 1.0f, 0,
0, 0, 0, 1.0f
};
// give mvp data to shader
glUniformMatrix4fv ( mvpMatrix, 1, false, mvp );
// Prepare texture for drawing
glActiveTexture ( GL_TEXTURE0 );
glBindTexture ( GL_TEXTURE_EXTERNAL_OES, textureId );
glTexParameteri ( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri ( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
// Pass SurfaceTexture transformations to shader
float* tm = env->GetFloatArrayElements ( texMatArray, 0 );
glUniformMatrix4fv ( texMatrix, 1, false, tm );
env->ReleaseFloatArrayElements ( texMatArray, tm, 0 );
// Set the SurfaceTexture sampler
glUniform1i ( texSampler, 0 );
// specify color to mix with camera frames
float c[] = { 1, 1, 1, 1 };
glUniform4fv ( color, 1, (GLfloat*)c );
// Size of the window is used in fragment shader
// to split the window
float sz[2] = {0};
sz[0] = width/2;
sz[1] = height/2;
glUniform2fv ( size, 1, (GLfloat*)sz );
// Set up vertices/indices and draw
glBindBuffer ( GL_ARRAY_BUFFER, buf[0] );
glBindBuffer ( GL_ELEMENT_ARRAY_BUFFER, buf[1] );
glEnableVertexAttribArray ( vtxPosAttrib ) ;
glVertexAttribPointer ( vtxPosAttrib, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 5, (void*)0 );
glEnableVertexAttribArray ( uvsAttrib );
glVertexAttribPointer ( uvsAttrib, 2, GL_FLOAT, GL_FALSE, sizeof(float) * 5, (void*)(3 * sizeof(float) ) );
glViewport ( 0, 0, width, height );
glTexParameteri ( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri ( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glDrawElements ( GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0 );
}
Actualy, i tried to create the encoder instance of Mediacodec and receive the input surface of that and render the captured frames. (I refered grafika's exmaple.)
However, i don't know the way how to render captured frames after rendering captured frames into GLSurfaceView.
-EDIT-
I tried to create EGLSurface using input surface of encoder but the below error message is showed.
android.opengl.EGL14.eglCreateWindowSurface(mEGLCore.getDisplay(), mEGLCore.getConfig(), mEncoderSurface, surfaceAttribs, 0);
/* error message */
E/BufferQueueProducer: [GraphicBufferSource] connect: BufferQueue has been abandoned
E/libEGL: eglCreateWindowSurface: native_window_api_connect (win=0x92b7f808) failed (0xffffffed) (already connected to another API?)
E/libEGL: eglCreateWindowSurface:693 error 3003 (EGL_BAD_ALLOC)
Any reply will be appreciated.
I'm building an Android app to render 3d (wavefront.obj) model. By using tinyobjloader, I can successfully load the model.
Code:
std::vector<glm::vec3> vertices;
std::vector<glm::vec2> uvs;
std::vector<glm::vec3> normals;
tinyobj::attrib_t attrib;
std::vector<tinyobj::shape_t> shapes;
std::vector<tinyobj::material_t> materials;
for(size_t s =0; s < shapes.size(); s++)
{
size_t index_offset = 0;
for(size_t f = 0; f < shapes[s].mesh.num_face_vertices.size(); f++)
{
int fv = shapes[s].mesh.num_face_vertices[f];
for(size_t v = 0; v < fv; v++)
{
tinyobj::index_t idx = shapes[s].mesh.indices[index_offset + v];
tinyobj::real_t vx = attrib.vertices[3*idx.vertex_index+0];
tinyobj::real_t vy = attrib.vertices[3*idx.vertex_index+1];
tinyobj::real_t vz = attrib.vertices[3*idx.vertex_index+2];
tinyobj::real_t nx = attrib.normals[3*idx.normal_index+0];
tinyobj::real_t ny = attrib.normals[3*idx.normal_index+1];
tinyobj::real_t nz = attrib.normals[3*idx.normal_index+2];
tinyobj::real_t ux = attrib.texcoords[2*idx.texcoord_index+0];
tinyobj::real_t uy = attrib.texcoords[2*idx.texcoord_index+1];
vertices.push_back(glm::vec3(vx,vy,vz));
normals.push_back(glm::vec3(nx,ny,nz));
uvs.push_back(glm::vec2(ux,uy));
}
index_offset += fv;
}
}
Because the original .obj file has multiple faces format, i.e:
f 1/2/3 3/2/1 3/2/3
f 1/2/3 1/3/4 1/4/5 6/7/2
so that I use Blender Triangulate with 'Beauty' option to convert quad to triangle. But the rendered result is weird
I have built two function to initOpenGL (run once) and render().
initOpenGL code:
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(glm::vec3), &vertices[0], GL_STATIC_DRAW);
glGenBuffers(1, &UVBO);
glBindBuffer(GL_ARRAY_BUFFER, UVBO);
glBufferData(GL_ARRAY_BUFFER, uvs.size() * sizeof(glm::vec2), &uvs[0], GL_STATIC_DRAW);
//Linking Vertex Attribute
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
//bind texture
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, UVBO);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*)0);
//load texture
//texture1
glGenTextures(1, &texture1);
glBindTexture(GL_TEXTURE_2D, texture1);
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
if(patternSrc1)
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, patWidth1, patHeight1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
patternSrc1);
//glGenerateMipmap(GL_TEXTURE_2D);
}
render() code:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(shaderProgram);
camera.ProcessOneFinger(moveStpX, moveStpY);
camera.ProcessTwoFinger(move2X, move2Y);
projection = glm::perspective(camera.GetZoom(), (GLfloat)600/(GLfloat)1024, nearPlane, farPlane);
view = camera.GetViewMatrix();
glUniformMatrix4fv(glGetUniformLocation(shaderProgram, "projection"), 1, GL_FALSE, glm::value_ptr(projection));
glUniformMatrix4fv(glGetUniformLocation(shaderProgram, "view"), 1, GL_FALSE, glm::value_ptr(view));
glm::mat4 model;
model = glm::translate(model, glm::vec3(0.0f, 0.0f, 0.0f));
GLfloat angle = 20.0f;
model = glm::rotate(model, angle, glm::vec3( 1.0f, 0.3f, 0.5f));
glUniformMatrix4fv(glGetUniformLocation(shaderProgram, "model"), 1, GL_FALSE, glm::value_ptr( model ) );
glDrawArrays(GL_TRIANGLES, 0, vertices.size());
Model detail
Texture:
Model.obj file after triangulated:
https://pastebin.com/vUjHv8Fr
Thank you!!!
This looks to me like your texture is upside down. There's basically two potential errors that could each lead to this happening. Most likely, the texture image itself is upside down, e.g., because the image data passed to glTexImage2D() is in the wrong order. Unlike most other APIs, OpenGL (by default) expects pixel data in row-wise order starting from the bottom row. Check the way you load the texture data to make sure it's in the right order.
If this is not the problem, then it might be that your texture coordinates are for a left-handed texture coordinate system. OpenGL, however, uses right-handed texture coordinates where the origin is the lower-left corner of the texture image rather than the upper left corner. I'm not a Blender guy, but there's probably an export setting for this…
I am attempting to create a framebuffer object, and then blit it to the primary display. The purpose of this to to cache a screen shot that I can blit to the display whenever I want without re-rendering the object. I am using Open GLES 3.0 with android NDK.
I created a frame buffer in the normal way.
GLuint framebuffer = NULL;
glGenFramebuffers( 1, &framebuffer );
glBindFramebuffer( GL_FRAMEBUFFER, framebuffer );
GLuint colorRenderBuffer = NULL;
glGenRenderbuffers( 1, &colorRenderbuffer );
glBindRenderbuffer( GL_RENDERBUFFER, colorRenderbuffer );
glRenderbufferStorage( GL_RENDERBUFFER, GL_RGBA8, engine->width, engine->height );
glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, colorRenderbuffer );
GLuint depthRenderbuffer = NULL;
glGenRenderbuffers( 1, &depthRenderbuffer );
glBindRenderbuffer( GL_RENDERBUFFER, depthRenderbuffer );
glRenderbufferStorage( GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, engine->width, engine->height );
glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthRenderbuffer );
GLenum status = glCheckFramebufferStatus( GL_FRAMEBUFFER );
if (status != GL_FRAMEBUFFER_COMPLETE) {
LOGI( "failed to make complete framebuffer object %d", status );
}
else {
LOGI( "Frame buffer is complete." );
}
This works without error. I then am able to edit the frame buffer, successfully.
glBindFramebuffer( GL_DRAW_FRAMEBUFFER, framebuffer );
glBindFramebuffer( GL_READ_FRAMEBUFFER, framebuffer );
// Just fill the screen with a color.
glClearColor( red, green, blue, 255 );
glClear( GL_COLOR_BUFFER_BIT );
read_pixels( ); //wrapper for glReadPixels. Shows that all pixels in the frame buffer object have the desired color.
However, attempting to blit this to the main draw buffer fails.
glBindFramebuffer( GL_DRAW_FRAMEBUFFER, 0 );
glBindFramebuffer( GL_READ_FRAMEBUFFER, framebuffer );
glReadBuffer( GL_COLOR_ATTACHMENT0 );
glBlitFramebuffer( 0, 0, engine->width, engine->height, 0, 0, engine->width, engine->height, GL_COLOR_BUFFER_BIT, GL_LINEAR );
LOGI("GL error after blit: %d", glGetError()); //no error reported
glBindFramebuffer( GL_DRAW_FRAMEBUFFER, 0 );
glBindFramebuffer( GL_READ_FRAMEBUFFER, 0 );
read_pixels( engine ); //gives zero'd buffer (wrong result)
eglSwapBuffers( engine->display, engine->surface ); //shows a black screen, unless I use glClearColor directly on the primary display buffer.
I was able to get around the problem by using a texture (instead of a render buffer) frame buffer object, drawing to that frame buffer, and then drawing that frame buffer texture to the default buffer. This isn't really an answer, but a work around, so I'd still appreciate it if anyone has an answer to the original question.
For reference, I include the key code below:
GLuint LoadFrameBufferTexture( ESContext *context, int width, int height ) {
glGenFramebuffers( 1, &(context->framebuffer) );
glBindFramebuffer( GL_FRAMEBUFFER, context->framebuffer );
// create the texture
GLuint texture;
glGenTextures( 1, &texture );
glBindTexture( GL_TEXTURE_2D, texture );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL );
glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0 );
GLuint depthRenderbuffer;
glGenRenderbuffers( 1, &depthRenderbuffer );
glBindRenderbuffer( GL_RENDERBUFFER, depthRenderbuffer );
glRenderbufferStorage( GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, width, height );
glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthRenderbuffer );
GLenum status = glCheckFramebufferStatus( GL_FRAMEBUFFER );
if (status != GL_FRAMEBUFFER_COMPLETE) {
LOGI("failed to make complete framebuffer object ");
} else {
LOGI("Frame buffer is complete.");
}
glBindFramebuffer( GL_FRAMEBUFFER, 0 );
return texture;
}
void draw_frame(int red, int green, int blue) {
glBindFramebuffer( GL_FRAMEBUFFER, esContext->framebuffer );
glClearColor( red, green, blue, 255 );
glClear( GL_COLOR_BUFFER_BIT );
glBindFramebuffer(GL_FRAMEBUFFER, 0 );
DrawFrame( esContext );
eglSwapBuffers( engine->display, engine->surface );
}
void DrawFrame( ESContext *esContext ) {
UserData *userData = (UserData *)esContext->userData;
GLfloat vVertices[] = {-1.0f, 1.0f, 0.0f, 0.0f, 1.0f, -1.0f, -1.0f, 0.0f, 0.0f, 0.0f, 1.0f, -1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f };
GLushort indices[] = { 0, 1, 2, 0, 2, 3 };
glViewport( 0, 0, esContext->width, esContext->height );
glUseProgram( userData->programObject );
glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof( GLfloat ), vVertices );
glVertexAttribPointer( 1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof( GLfloat ), &vVertices[3] );
glEnableVertexAttribArray( 0 );
glEnableVertexAttribArray( 1 );
glActiveTexture( GL_TEXTURE0 );
glBindTexture( GL_TEXTURE_2D, userData->frameTexId );
glUniform1i( userData->baseMapLoc, 0 );
glDrawElements( GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, indices );
}
When rendering two different surfaces in the same thread, I need to call eglMakeCurrent every time to unbind the current surface to the thread and to bind the other surface to the current thread before rendering a certain surface , while the operation of unbinding a surface to a thread consumes too much time!
SourceCode:
int DisplayPicture(...)
{
if (!eglMakeCurrent(m_glesDisplay ,m_glesSurface, m_glesSurface, m_glesContext))
{
return 0;
}
glUseProgram(programObject);
glActiveTexture( GL_TEXTURE0 );
glUniform1i(m_nUniformLocation[0],0);
glBindTexture( GL_TEXTURE_2D, 0);
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D( GL_TEXTURE_2D, 0, GL_LUMINANCE, nWidth, nHeight, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, pData );
....
// Update attribute values.
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, squareVertices);
glEnableVertexAttribArray(ATTRIB_VERTEX);
glVertexAttribPointer(ATTRIB_TEXCOORD, 2, GL_FLOAT, 0, 0, texCoords);
glEnableVertexAttribArray(ATTRIB_TEXCOORD);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
eglSwapBuffers(m_glesDisplay, m_glesSurface);
glUseProgram(0);
return 0;
}
ps:
1. When using texture in OpenGLES, eglMakeCurrent(display, NULL, NULL, NULL) will cost a long time, why?
2. In some devices, eglMakeCurrent will return EGL_BAD_ALLOC while normal in other devices, why?