OpenGL ES ANDROID C++ ERROR - android

I created an "native android app" project in Visual Studio 2017.
It compiles and runs well but when I add simple triangle code triangle is not showing up.
My code :
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#define LOGI(...)((void) __android_log_print(ANDROID_LOG_INFO, "AndroidProject1.NativeActivity", __VA_ARGS__))# define LOGW(...)((void) __android_log_print(ANDROID_LOG_WARN, "AndroidProject1.NativeActivity", __VA_ARGS__))
#define LOG_TAG "libgl2jni"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS__)
static void printGLString(const char * name, GLenum s) {
const char * v = (const char * ) glGetString(s);
LOGI("GL %s = %s\n", name, v);
}
static void checkGlError(const char * op) {
for (GLint error = glGetError(); error; error = glGetError()) {
LOGI("after %s() glError (0x%x)\n", op, error);
}
}
static const char gVertexShader[] =
"attribute vec4 vPosition;\n"
"void main() {\n"
" gl_Position = vPosition;\n"
"}\n";
static const char gFragmentShader[] =
"precision mediump float;\n"
"void main() {\n"
" gl_FragColor = vec4(0.0, 1.0, 0.0, 1.0);\n"
"}\n";
GLuint loadShader(GLenum shaderType,
const char * pSource) {
GLuint shader = glCreateShader(shaderType);
if (shader) {
glShaderSource(shader, 1, & pSource, NULL);
glCompileShader(shader);
GLint compiled = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, & compiled);
if (!compiled) {
GLint infoLen = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, & infoLen);
if (infoLen) {
char * buf = (char * ) malloc(infoLen);
if (buf) {
glGetShaderInfoLog(shader, infoLen, NULL, buf);
LOGE("Could not compile shader %d:\n%s\n",
shaderType, buf);
free(buf);
}
glDeleteShader(shader);
shader = 0;
}
}
}
return shader;
}
GLuint createProgram(const char * pVertexSource,
const char * pFragmentSource) {
GLuint vertexShader = loadShader(GL_VERTEX_SHADER, pVertexSource);
if (!vertexShader) {
return 0;
}
GLuint pixelShader = loadShader(GL_FRAGMENT_SHADER, pFragmentSource);
if (!pixelShader) {
return 0;
}
GLuint program = glCreateProgram();
if (program) {
glAttachShader(program, vertexShader);
checkGlError("glAttachShader");
glAttachShader(program, pixelShader);
checkGlError("glAttachShader");
glLinkProgram(program);
GLint linkStatus = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, & linkStatus);
if (linkStatus != GL_TRUE) {
GLint bufLength = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, & bufLength);
if (bufLength) {
char * buf = (char * ) malloc(bufLength);
if (buf) {
glGetProgramInfoLog(program, bufLength, NULL, buf);
LOGE("Could not link program:\n%s\n", buf);
free(buf);
}
}
glDeleteProgram(program);
program = 0;
}
}
return program;
}
GLuint gProgram;
GLuint gvPositionHandle;
/**
* Our saved state data.
*/
struct saved_state {
float angle;
int32_t x;
int32_t y;
};
/**
* Shared state for our app.
*/
struct engine {
struct android_app * app;
ASensorManager * sensorManager;
const ASensor * accelerometerSensor;
ASensorEventQueue * sensorEventQueue;
int animating;
EGLDisplay display;
EGLSurface surface;
EGLContext context;
int32_t width;
int32_t height;
struct saved_state state;
};
/**
* Initialize an EGL context for the current display.
*/
static int engine_init_display(struct engine * engine) {
// initialize OpenGL ES and EGL
/*
* Here specify the attributes of the desired configuration.
* Below, we select an EGLConfig with at least 8 bits per color
* component compatible with on-screen windows
*/
const EGLint attribs[] = {
EGL_SURFACE_TYPE,
EGL_WINDOW_BIT,
EGL_BLUE_SIZE,
8,
EGL_GREEN_SIZE,
8,
EGL_RED_SIZE,
8,
EGL_NONE
};
EGLint w, h, format;
EGLint numConfigs;
EGLConfig config;
EGLSurface surface;
EGLContext context;
EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
eglInitialize(display, 0, 0);
/* Here, the application chooses the configuration it desires. In this
* sample, we have a very simplified selection process, where we pick
* the first EGLConfig that matches our criteria */
eglChooseConfig(display, attribs, & config, 1, & numConfigs);
/* EGL_NATIVE_VISUAL_ID is an attribute of the EGLConfig that is
* guaranteed to be accepted by ANativeWindow_setBuffersGeometry().
* As soon as we picked a EGLConfig, we can safely reconfigure the
* ANativeWindow buffers to match, using EGL_NATIVE_VISUAL_ID. */
eglGetConfigAttrib(display, config, EGL_NATIVE_VISUAL_ID, & format);
ANativeWindow_setBuffersGeometry(engine->app->window, 0, 0, format);
surface = eglCreateWindowSurface(display, config, engine->app-> window, NULL);
context = eglCreateContext(display, config, NULL, NULL);
if (eglMakeCurrent(display, surface, surface, context) == EGL_FALSE) {
LOGW("Unable to eglMakeCurrent");
return -1;
}
eglQuerySurface(display, surface, EGL_WIDTH, & w);
eglQuerySurface(display, surface, EGL_HEIGHT, & h);
engine->display = display;
engine->context = context;
engine->surface = surface;
engine->width = w;
engine->height = h;
engine->state.angle = 0;
// Initialize GL state.
//glEnable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
gProgram = createProgram(gVertexShader, gFragmentShader);
gvPositionHandle = glGetAttribLocation(gProgram, "vPosition");
glViewport(0, 0, w, h);
return 0;
}
const GLfloat gTriangleVertices[] = {
0.0 f,
0.5 f,
-0.5 f,
-0.5 f,
0.5 f,
-0.5 f
};
/**
* Just the current frame in the display.
*/
static void engine_draw_frame(struct engine * engine) {
if (engine->display == NULL) {
// No display.
return;
}
glClearColor(0.0 f, 0.0 f, 0.0 f, 1.0 f);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(gProgram);
checkGlError("glUseProgram");
glVertexAttribPointer(gvPositionHandle, 2, GL_FLOAT, GL_FALSE, 0, gTriangleVertices);
checkGlError("glVertexAttribPointer");
glEnableVertexAttribArray(gvPositionHandle);
checkGlError("glEnableVertexAttribArray");
glDrawArrays(GL_TRIANGLES, 0, 3);
checkGlError("glDrawArrays");
eglSwapBuffers(engine->display, engine->surface);
}
/**
* Tear down the EGL context currently associated with the display.
*/
static void engine_term_display(struct engine * engine) {
if (engine->display != EGL_NO_DISPLAY) {
eglMakeCurrent(engine->display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
if (engine-> context != EGL_NO_CONTEXT) {
eglDestroyContext(engine->display, engine->context);
}
if (engine->surface != EGL_NO_SURFACE) {
eglDestroySurface(engine->display, engine->surface);
}
eglTerminate(engine->display);
}
engine->animating = 0;
engine->display = EGL_NO_DISPLAY;
engine->context = EGL_NO_CONTEXT;
engine->surface = EGL_NO_SURFACE;
}
/**
* Process the next input event.
*/
static int32_t engine_handle_input(struct android_app * app, AInputEvent * event) {
struct engine * engine = (struct engine * ) app->userData;
if (AInputEvent_getType(event) == AINPUT_EVENT_TYPE_MOTION) {
engine->state.x = AMotionEvent_getX(event, 0);
engine->state.y = AMotionEvent_getY(event, 0);
return 1;
}
return 0;
}
/**
* Process the next main command.
*/
static void engine_handle_cmd(struct android_app * app, int32_t cmd) {
struct engine * engine = (struct engine * ) app->userData;
switch (cmd) {
case APP_CMD_SAVE_STATE:
// The system has asked us to save our current state. Do so.
engine->app-> savedState = malloc(sizeof(struct saved_state)); * ((struct saved_state * ) engine->app->savedState) = engine->state;
engine->app->savedStateSize = sizeof(struct saved_state);
break;
case APP_CMD_INIT_WINDOW:
// The window is being shown, get it ready.
if (engine->app-> window != NULL) {
engine_init_display(engine);
engine_draw_frame(engine);
}
break;
case APP_CMD_TERM_WINDOW:
// The window is being hidden or closed, clean it up.
engine_term_display(engine);
break;
case APP_CMD_GAINED_FOCUS:
// When our app gains focus, we start monitoring the accelerometer.
if (engine->accelerometerSensor != NULL) {
ASensorEventQueue_enableSensor(engine->sensorEventQueue,
engine->accelerometerSensor);
// We'd like to get 60 events per second (in us).
ASensorEventQueue_setEventRate(engine->sensorEventQueue,
engine->accelerometerSensor, (1000 L / 60) * 1000);
}
break;
case APP_CMD_LOST_FOCUS:
// When our app loses focus, we stop monitoring the accelerometer.
// This is to avoid consuming battery while not being used.
if (engine->accelerometerSensor != NULL) {
ASensorEventQueue_disableSensor(engine->sensorEventQueue,
engine->accelerometerSensor);
}
// Also stop animating.
engine->animating = 0;
engine_draw_frame(engine);
break;
}
}
/**
* This is the main entry point of a native application that is using
* android_native_app_glue. It runs in its own thread, with its own
* event loop for receiving input events and doing other things.
*/
void android_main(struct android_app * state) {
struct engine engine;
memset( & engine, 0, sizeof(engine));
state->userData = & engine;
state->onAppCmd = engine_handle_cmd;
state->onInputEvent = engine_handle_input;
engine.app = state;
// Prepare to monitor accelerometer
engine.sensorManager = ASensorManager_getInstance();
engine.accelerometerSensor = ASensorManager_getDefaultSensor(engine.sensorManager,
ASENSOR_TYPE_ACCELEROMETER);
engine.sensorEventQueue = ASensorManager_createEventQueue(engine.sensorManager,
state->looper, LOOPER_ID_USER, NULL, NULL);
if (state->savedState != NULL) {
// We are starting with a previous saved state; restore from it.
engine.state = * (struct saved_state * ) state->savedState;
}
engine.animating = 1;
// loop waiting for stuff to do.
while (1) {
// Read all pending events.
int ident;
int events;
struct android_poll_source * source;
// If not animating, we will block forever waiting for events.
// If animating, we loop until all events are read, then continue
// to draw the next frame of animation.
while ((ident = ALooper_pollAll(engine.animating ? 0 : -1, NULL, & events,
(void * * ) & source)) >= 0) {
// Process this event.
if (source != NULL) {
source->process(state, source);
}
// Check if we are exiting.
if (state->destroyRequested != 0) {
engine_term_display( & engine);
return;
}
}
if (engine.animating) {
// Drawing is throttled to the screen update rate, so there
// is no need to do timing here.
engine_draw_frame(&engine);
}
}
}
I also updated pch.h to import OpenGL ES 2.0

I fixed it.
If any of you have this problem this is what you should do :
add the following code to the static int engine_init_display(struct engine* engine) function
const EGLint contextAttribs[] = {
EGL_CONTEXT_CLIENT_VERSION, 2,
EGL_NONE
};
and replace
context = eglCreateContext(display, config, NULL, NULL);
with
context = eglCreateContext(display, config, EGL_NO_CONTEXT, contextAttribs);

Related

android app development with ndk and tcc possible?

Is it possible to create android apps with the help of tcc (tiny C compiler) ?
Normal ndk APK's looks simple zip to me.
There were only some signatures under /META-INF folder, dynamic *.so libraries under /lib, AndroidManifest.xml and resources.arsc.
├───lib
│ └───x86
│ └───gdbserver
│ └───libAndroid1.so
├───META-INF
│ └───CERT.RSA
│ └───CERT.SF
│ └───MANIFEST.MF
├───AndroidManifest.xml
├───resources.arsc
And also, it seems like gcc/clang compilers can be replaced by tcc. I got managed to compile an general *.so file with command:
tcc -r dllmain.c -o dllmain.o
tcc -shared dllmain.o -o dll.so
But now I'm NOT sure, how gonna I compile a full ndk project by replacing gcc/clang with tcc.
Here's my dead attempt:
C:\Users\gray\source\repos\Android1\Android1\Android1.NativeActivity>tcc -shared -r main.c -o libAndroid1.so -IC:\Microsoft\AndroidNDK\android-ndk-r15c\platforms\android-19\arch-x86\usr\include\
tcc: warning: -r: overriding compiler action already specified
In file included from main.c:24:
In file included from C:/Microsoft/AndroidNDK/android-ndk-r15c/platforms/android-19/arch-x86/usr/include//android/sensor.h:43:
In file included from C:/Microsoft/AndroidNDK/android-ndk-r15c/platforms/android-19/arch-x86/usr/include//sys/types.h:35:
C:/Microsoft/AndroidNDK/android-ndk-r15c/platforms/android-19/arch-x86/usr/include//sys/cdefs.h:283: error: #error "No function renaming possible"
And also, I renamed main.cpp to main.c to make it work with tcc.
main.c:
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "AndroidProject1.NativeActivity", __VA_ARGS__))
#define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, "AndroidProject1.NativeActivity", __VA_ARGS__))
/**
* Our saved state data.
*/
#include <android/sensor.h>
struct saved_state {
float angle;
int x;
int y;
};
/**
* Shared state for our app.
*/
struct engine {
struct android_app* app;
ASensorManager* sensorManager;
const ASensor* accelerometerSensor;
ASensorEventQueue* sensorEventQueue;
int animating;
EGLDisplay display;
EGLSurface surface;
EGLContext context;
int width;
int height;
struct saved_state state;
};
/**
* Initialize an EGL context for the current display.
*/
static int engine_init_display(struct engine* engine) {
// initialize OpenGL ES and EGL
/*
* Here specify the attributes of the desired configuration.
* Below, we select an EGLConfig with at least 8 bits per color
* component compatible with on-screen windows
*/
const EGLint attribs[] = {
EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
EGL_BLUE_SIZE, 8,
EGL_GREEN_SIZE, 8,
EGL_RED_SIZE, 8,
EGL_NONE
};
EGLint w, h, format;
EGLint numConfigs;
EGLConfig config;
EGLSurface surface;
EGLContext context;
EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
eglInitialize(display, 0, 0);
/* Here, the application chooses the configuration it desires. In this
* sample, we have a very simplified selection process, where we pick
* the first EGLConfig that matches our criteria */
eglChooseConfig(display, attribs, &config, 1, &numConfigs);
/* EGL_NATIVE_VISUAL_ID is an attribute of the EGLConfig that is
* guaranteed to be accepted by ANativeWindow_setBuffersGeometry().
* As soon as we picked a EGLConfig, we can safely reconfigure the
* ANativeWindow buffers to match, using EGL_NATIVE_VISUAL_ID. */
eglGetConfigAttrib(display, config, EGL_NATIVE_VISUAL_ID, &format);
ANativeWindow_setBuffersGeometry(engine->app->window, 0, 0, format);
surface = eglCreateWindowSurface(display, config, engine->app->window, NULL);
context = eglCreateContext(display, config, NULL, NULL);
if (eglMakeCurrent(display, surface, surface, context) == EGL_FALSE) {
LOGW("Unable to eglMakeCurrent");
return -1;
}
eglQuerySurface(display, surface, EGL_WIDTH, &w);
eglQuerySurface(display, surface, EGL_HEIGHT, &h);
engine->display = display;
engine->context = context;
engine->surface = surface;
engine->width = w;
engine->height = h;
engine->state.angle = 0;
// Initialize GL state.
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);
glEnable(GL_CULL_FACE);
glShadeModel(GL_SMOOTH);
glDisable(GL_DEPTH_TEST);
return 0;
}
/**
* Just the current frame in the display.
*/
static void engine_draw_frame(struct engine* engine) {
if (engine->display == NULL) {
// No display.
return;
}
// Just fill the screen with a color.
glClearColor(((float)engine->state.x) / engine->width, engine->state.angle,
((float)engine->state.y) / engine->height, 1);
glClear(GL_COLOR_BUFFER_BIT);
eglSwapBuffers(engine->display, engine->surface);
}
/**
* Tear down the EGL context currently associated with the display.
*/
static void engine_term_display(struct engine* engine) {
if (engine->display != EGL_NO_DISPLAY) {
eglMakeCurrent(engine->display, EGL_NO_SURFACE, EGL_NO_SURFACE, EGL_NO_CONTEXT);
if (engine->context != EGL_NO_CONTEXT) {
eglDestroyContext(engine->display, engine->context);
}
if (engine->surface != EGL_NO_SURFACE) {
eglDestroySurface(engine->display, engine->surface);
}
eglTerminate(engine->display);
}
engine->animating = 0;
engine->display = EGL_NO_DISPLAY;
engine->context = EGL_NO_CONTEXT;
engine->surface = EGL_NO_SURFACE;
}
/**
* Process the next input event.
*/
static int engine_handle_input(struct android_app* app, AInputEvent* event) {
struct engine* engine = (struct engine*)app->userData;
if (AInputEvent_getType(event) == AINPUT_EVENT_TYPE_MOTION) {
engine->state.x = AMotionEvent_getX(event, 0);
engine->state.y = AMotionEvent_getY(event, 0);
return 1;
}
return 0;
}
/**
* Process the next main command.
*/
static void engine_handle_cmd(struct android_app* app, int cmd) {
struct engine* engine = (struct engine*)app->userData;
switch (cmd) {
case APP_CMD_SAVE_STATE:
// The system has asked us to save our current state. Do so.
engine->app->savedState = malloc(sizeof(struct saved_state));
*((struct saved_state*)engine->app->savedState) = engine->state;
engine->app->savedStateSize = sizeof(struct saved_state);
break;
case APP_CMD_INIT_WINDOW:
// The window is being shown, get it ready.
if (engine->app->window != NULL) {
engine_init_display(engine);
engine_draw_frame(engine);
}
break;
case APP_CMD_TERM_WINDOW:
// The window is being hidden or closed, clean it up.
engine_term_display(engine);
break;
case APP_CMD_GAINED_FOCUS:
// When our app gains focus, we start monitoring the accelerometer.
if (engine->accelerometerSensor != NULL) {
ASensorEventQueue_enableSensor(engine->sensorEventQueue,
engine->accelerometerSensor);
// We'd like to get 60 events per second (in us).
ASensorEventQueue_setEventRate(engine->sensorEventQueue,
engine->accelerometerSensor, (1000L / 60) * 1000);
}
break;
case APP_CMD_LOST_FOCUS:
// When our app loses focus, we stop monitoring the accelerometer.
// This is to avoid consuming battery while not being used.
if (engine->accelerometerSensor != NULL) {
ASensorEventQueue_disableSensor(engine->sensorEventQueue,
engine->accelerometerSensor);
}
// Also stop animating.
engine->animating = 0;
engine_draw_frame(engine);
break;
}
}
/**
* This is the main entry point of a native application that is using
* android_native_app_glue. It runs in its own thread, with its own
* event loop for receiving input events and doing other things.
*/
void android_main(struct android_app* state) {
struct engine engine;
memset(&engine, 0, sizeof(engine));
state->userData = &engine;
state->onAppCmd = engine_handle_cmd;
state->onInputEvent = engine_handle_input;
engine.app = state;
// Prepare to monitor accelerometer
engine.sensorManager = ASensorManager_getInstance();
engine.accelerometerSensor = ASensorManager_getDefaultSensor(engine.sensorManager,
ASENSOR_TYPE_ACCELEROMETER);
engine.sensorEventQueue = ASensorManager_createEventQueue(engine.sensorManager,
state->looper, LOOPER_ID_USER, NULL, NULL);
if (state->savedState != NULL) {
// We are starting with a previous saved state; restore from it.
engine.state = *(struct saved_state*)state->savedState;
}
engine.animating = 1;
// loop waiting for stuff to do.
while (1) {
// Read all pending events.
int ident;
int events;
struct android_poll_source* source;
// If not animating, we will block forever waiting for events.
// If animating, we loop until all events are read, then continue
// to draw the next frame of animation.
while ((ident = ALooper_pollAll(engine.animating ? 0 : -1, NULL, &events,
(void**)&source)) >= 0) {
// Process this event.
if (source != NULL) {
source->process(state, source);
}
// If a sensor has data, process it now.
if (ident == LOOPER_ID_USER) {
if (engine.accelerometerSensor != NULL) {
ASensorEvent event;
while (ASensorEventQueue_getEvents(engine.sensorEventQueue,
&event, 1) > 0) {
LOGI("accelerometer: x=%f y=%f z=%f",
event.acceleration.x, event.acceleration.y,
event.acceleration.z);
}
}
}
// Check if we are exiting.
if (state->destroyRequested != 0) {
engine_term_display(&engine);
return;
}
}
if (engine.animating) {
// Done with events; draw next animation frame.
engine.state.angle += .01f;
if (engine.state.angle > 1) {
engine.state.angle = 0;
}
// Drawing is throttled to the screen update rate, so there
// is no need to do timing here.
engine_draw_frame(&engine);
}
}
}

Read from GL_TEXTURE_EXTERNAL_OES to GL_TEXTURE_2D have perfomance issues and glitches

I'm need to send data from GL_TEXTURE_EXTERNAL_OES to simple GL_TEXTURE_2D (Render image from Android player to Unity texture) and currently do it through read pixels from buffer with attached source texture. This process work correctly on my OnePlus 5 phone, but have some glitches with image on phones like xiaomi note 4, mi a2 and etc (like image is very green), and also there is perfomance issues becouse of this process works every frame and than more pixels to read, than worser perfomance (even my phone has low fps at 4k resolution). Any idea how to optimize this process or do it in some other way?
Thanks and best regards!
GLuint FramebufferName;
glGenFramebuffers(1, &FramebufferName);
glBindFramebuffer(GL_FRAMEBUFFER, FramebufferName);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_EXTERNAL_OES, g_ExtTexturePointer, 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
LOGD("%s", "Error: Could not setup frame buffer.");
}
unsigned char* data = new unsigned char[g_SourceWidth * g_SourceHeight * 4];
glReadPixels(0, 0, g_SourceWidth, g_SourceHeight, GL_RGBA, GL_UNSIGNED_BYTE, data);
glBindTexture(GL_TEXTURE_2D, g_TexturePointer);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, g_SourceWidth, g_SourceHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glDeleteFramebuffers(1, &FramebufferName);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
delete[] data;
UPDATE.
Function which contain this code and function which calls it from Unity side
static void UNITY_INTERFACE_API OnRenderEvent(int eventID) { ... }
extern "C" UnityRenderingEvent UNITY_INTERFACE_EXPORT UNITY_INTERFACE_API UMDGetRenderEventFunc()
{
return OnRenderEvent;
}
Which called from Unity Update function like this:
[DllImport("RenderingPlugin")]
static extern IntPtr UMDGetRenderEventFunc();
IEnumerator UpdateVideoTexture()
{
while (true)
{
...
androidPlugin.UpdateSurfaceTexture();
GL.IssuePluginEvent(UMDGetRenderEventFunc, 1);
}
}
And Android plugin do this on its side (surfaceTexture its texture which contain this external texture on which ExoPlayer render video)
public void exportUpdateSurfaceTexture() {
synchronized (this) {
if (this.mIsStopped) {
return;
}
surfaceTexture.updateTexImage();
}
}
On the C++ side:
You're creating and destroying pixel data every frame when you do new unsigned char[g_SourceWidth * g_SourceHeight * 4]; and delete[] data and that's expensive depending on the Texture size. Create the texture data once then re-use it.
One way to do this is to have static variables on the C++ side hold the texture information then a function to initialize those variables::
static void* pixelData = nullptr;
static int _x;
static int _y;
static int _width;
static int _height;
void initPixelData(void* buffer, int x, int y, int width, int height) {
pixelData = buffer;
_x = x;
_y = y;
_width = width;
_height = height;
}
Then your capture function should be re-written to remove new unsigned char[g_SourceWidth * g_SourceHeight * 4]; and delete[] data but use the static variables.
static void UNITY_INTERFACE_API OnRenderEvent(int eventID)
{
if (pixelData == nullptr) {
//Debug::Log("Pointer is null", Color::Red);
return;
}
GLuint FramebufferName;
glGenFramebuffers(1, &FramebufferName);
glBindFramebuffer(GL_FRAMEBUFFER, FramebufferName);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_EXTERNAL_OES, g_ExtTexturePointer, 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
LOGD("%s", "Error: Could not setup frame buffer.");
}
glReadPixels(_x, _y, _width, _height, GL_RGBA, GL_UNSIGNED_BYTE, pixelData);
glBindTexture(GL_TEXTURE_2D, g_TexturePointer);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, _width, _height, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixelData);
glDeleteFramebuffers(1, &FramebufferName);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
}
extern "C" UnityRenderingEvent UNITY_INTERFACE_EXPORT UNITY_INTERFACE_API
UMDGetRenderEventFunc()
{
return OnRenderEvent;
}
On the C# side:
[DllImport("RenderingPlugin", CallingConvention = CallingConvention.Cdecl)]
public static extern void initPixelData(IntPtr buffer, int x, int y, int width, int height);
[DllImport("RenderingPlugin", CallingConvention = CallingConvention.StdCall)]
private static extern IntPtr UMDGetRenderEventFunc();
Create the Texture information, pin it and send the pointer to C++:
int width = 500;
int height = 500;
//Where Pixel data will be saved
byte[] screenData;
//Where handle that pins the Pixel data will stay
GCHandle pinHandler;
//Used to test the color
public RawImage rawImageColor;
private Texture2D texture;
// Use this for initialization
void Awake()
{
Resolution res = Screen.currentResolution;
width = res.width;
height = res.height;
//Allocate array to be used
screenData = new byte[width * height * 4];
texture = new Texture2D(width, height, TextureFormat.RGBA32, false, false);
//Pin the Array so that it doesn't move around
pinHandler = GCHandle.Alloc(screenData, GCHandleType.Pinned);
//Register the screenshot and pass the array that will receive the pixels
IntPtr arrayPtr = pinHandler.AddrOfPinnedObject();
initPixelData(arrayPtr, 0, 0, width, height);
StartCoroutine(UpdateVideoTexture());
}
Then to update the texture, see the sample below. Note that there are two methods to update the texture as shown on the code below. If you run into issues with Method1, comment out the two lines which uses texture.LoadRawTextureData and texture.Apply and un-comment the Method2 code which uses the ByteArrayToColor, texture.SetPixels and texture.Apply function:
IEnumerator UpdateVideoTexture()
{
while (true)
{
//Take screenshot of the screen
GL.IssuePluginEvent(UMDGetRenderEventFunc(), 1);
//Update Texture Method1
texture.LoadRawTextureData(screenData);
texture.Apply();
//Update Texture Method2. Use this if the Method1 above crashes
/*
ByteArrayToColor();
texture.SetPixels(colors);
texture.Apply();
*/
//Test it by assigning the texture to a raw image
rawImageColor.texture = texture;
//Wait for a frame
yield return null;
}
}
Color[] colors = null;
void ByteArrayToColor()
{
if (colors == null)
{
colors = new Color[screenData.Length / 4];
}
for (int i = 0; i < screenData.Length; i += 4)
{
colors[i / 4] = new Color(screenData[i],
screenData[i + 1],
screenData[i + 2],
screenData[i + 3]);
}
}
Unpin the array when done or when the script is about to be destroyed:
void OnDisable()
{
//Unpin the array when disabled
pinHandler.Free();
}
Calling glReadPixels is always going to be slow; CPUs are not good at bulk data transfer.
Ideally you'd managed to convince Unity to accept an external image handle, and do the whole process zero copy, but failing that I would use a GPU render-to-texture and use a shader to transfer from the external image to the RGB surface.

Android native activity, GLES 3.0 shader compilations fails

I wanted to experiment with Native Activity and GLES 3.0, for a personal project, until I hit this roadblock: The shaders won't load and/or compile, I'm not really sure, as OPENGL's logs are non-existent.
Here are both of my shaders:
static const char glVertexShader[] =
"#version 300 es\n"
"in vec4 vPosition;\n"
"void main()\n"
"{\n"
" gl_Position = vPosition;\n"
"}\n\0";
static const char glFragmentShader[] =
"#version 300 es\n"
"precision mediump float;\n"
"void main()\n"
"{\n"
" gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);\n"
"}\n\0";
And here are my shader loading and program creation functions:
GLuint loadShader(GLenum shaderType, const char* shaderSource)
{
GLuint shader = glCreateShader(shaderType);
if (shader)
{
glShaderSource(shader, 1, &shaderSource, NULL);
glCompileShader(shader);
GLint compiled = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
if (!compiled)
{
GLint infoLen = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
if (infoLen)
{
char * buf = new char[infoLen];
if (buf)
{
glGetShaderInfoLog(shader, infoLen, NULL, buf);
log.log_error("Could not Compile Shader %d:\n%s\n", shaderType, buf);
delete[] buf;
}
glDeleteShader(shader);
shader = 0;
}
}
}
return shader;
}
GLuint createProgram(const char* vertexSource, const char * fragmentSource)
{
log.log_info("Loading vertex shader");
GLuint vertexShader = loadShader(GL_VERTEX_SHADER, vertexSource);
if (!vertexShader)
{
log.log_info("Vertex shader load failure!");
return 0;
}
log.log_info("Loading fragment shader");
GLuint fragmentShader = loadShader(GL_FRAGMENT_SHADER, fragmentSource);
if (!fragmentShader)
{
log.log_info("Fragment shader load failure!");
return 0;
}
GLuint program = glCreateProgram();
if (program)
{
glAttachShader(program, vertexShader);
glAttachShader(program, fragmentShader);
glLinkProgram(program);
GLint linkStatus = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
if (linkStatus != GL_TRUE)
{
GLint bufLength = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufLength);
if (bufLength)
{
char* buf = new char[bufLength];
if (buf)
{
glGetProgramInfoLog(program, bufLength, NULL, buf);
log.log_error("Could not link program:\n%s\n", buf);
delete[] buf;
}
}
glDeleteProgram(program);
program = 0;
}
}
return program;
}
I'll also include my setup code and drawing code below:
Setup code:
GLuint simpleTriangleProgram;
GLuint vPosition;
bool setupGraphics(int w, int h)
{
simpleTriangleProgram = createProgram(glVertexShader, glFragmentShader);
if (!simpleTriangleProgram)
{
log.log_error("Could not create program");
return false;
}
vPosition = glGetAttribLocation(simpleTriangleProgram, "vPosition");
glViewport(0, 0, w, h);
return true;
}
Drawing code:
//-------------------------------
const GLfloat triangleVertices[] = {
0.0f, 1.0f,
-1.0f, -1.0f,
1.0f, -1.0f
};
//-------------------------------
/**
* Just the current frame in the display.
*/
static void engine_draw_frame(struct engine* engine) {
if (engine->display == NULL) {
// No display.
return;
}
// Just fill the screen with a color.
glClearColor(((float)engine->state.x) / engine->width, engine->state.angle,
((float)engine->state.y) / engine->height, 1);
glClear(GL_COLOR_BUFFER_BIT);
//---------------------------------------------------
glUseProgram(simpleTriangleProgram);
glVertexAttribPointer(vPosition, 2, GL_FLOAT, GL_FALSE, 0, triangleVertices);
glEnableVertexAttribArray(vPosition);
glDrawArrays(GL_TRIANGLES, 0, 3);
//---------------------------------------------------
eglSwapBuffers(engine->display, engine->surface);
}
And yes, the GLES context is properly created. But let me know if I should post that code too.
Here's my context creation code:
/**
* Initialize an EGL context for the current display.
*/
static int engine_init_display(struct engine* engine) {
// initialize OpenGL ES and EGL
/*
* Here specify the attributes of the desired configuration.
* Below, we select an EGLConfig with at least 8 bits per color
* component compatible with on-screen windows
*/
const EGLint attribs[] = {
EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
EGL_SURFACE_TYPE, EGL_WINDOW_BIT,
EGL_BLUE_SIZE, 8,
EGL_GREEN_SIZE, 8,
EGL_RED_SIZE, 8,
EGL_NONE
};
EGLint w, h, format;
EGLint numConfigs;
EGLConfig config;
EGLSurface surface;
EGLContext context;
EGLDisplay display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
eglInitialize(display, 0, 0);
/* Here, the application chooses the configuration it desires.
* find the best match if possible, otherwise use the very first one
*/
eglChooseConfig(display, attribs, NULL, 0, &numConfigs);
std::unique_ptr<EGLConfig[]> supportedConfigs(new EGLConfig[numConfigs]);
assert(supportedConfigs);
eglChooseConfig(display, attribs, supportedConfigs.get(), numConfigs, &numConfigs);
assert(numConfigs);
auto i = 0;
for (; i < numConfigs; i++) {
auto& cfg = supportedConfigs[i];
EGLint r, g, b, d;
if (eglGetConfigAttrib(display, cfg, EGL_RED_SIZE, &r) &&
eglGetConfigAttrib(display, cfg, EGL_GREEN_SIZE, &g) &&
eglGetConfigAttrib(display, cfg, EGL_BLUE_SIZE, &b) &&
eglGetConfigAttrib(display, cfg, EGL_DEPTH_SIZE, &d) &&
r == 8 && g == 8 && b == 8 && d == 0) {
config = supportedConfigs[i];
break;
}
}
if (i == numConfigs) {
config = supportedConfigs[0];
}
EGLint AttribList[] =
{
EGL_CONTEXT_CLIENT_VERSION, 3,
EGL_NONE
};
/* EGL_NATIVE_VISUAL_ID is an attribute of the EGLConfig that is
* guaranteed to be accepted by ANativeWindow_setBuffersGeometry().
* As soon as we picked a EGLConfig, we can safely reconfigure the
* ANativeWindow buffers to match, using EGL_NATIVE_VISUAL_ID. */
eglGetConfigAttrib(display, config, EGL_NATIVE_VISUAL_ID, &format);
surface = eglCreateWindowSurface(display, config, engine->app->window, NULL);
context = eglCreateContext(display, config, NULL, AttribList);
if (eglMakeCurrent(display, surface, surface, context) == EGL_FALSE) {
log.log_warning("Unable to eglMakeCurrent");
return -1;
}
eglQuerySurface(display, surface, EGL_WIDTH, &w);
eglQuerySurface(display, surface, EGL_HEIGHT, &h);
engine->display = display;
engine->context = context;
engine->surface = surface;
engine->width = w;
engine->height = h;
engine->state.angle = 0;
// Check openGL on the system
auto opengl_info = { GL_VENDOR, GL_RENDERER, GL_VERSION, GL_EXTENSIONS };
for (auto name : opengl_info) {
auto info = glGetString(name);
log.log_info("OpenGL Info: %s", info);
}
// Initialize GL state.
// glEnable(GL_CULL_FACE);
// glEnable(GL_DEPTH_TEST);
return 0;
}
Full log:
Log pic
Note: my logger class is just a wrapper for __android_log_vprint(). I didn't include it because it isn't relevant in the slightest.
This:
#version 300 es
attribute vec4 vPosition;
void main()
{
gl_Position = vPosition;
}
... isn't a legal ESSL version 300 shader, so you should be getting a compile error log returned by the compiler on your platform. Do you not get anything from your log_info log channel?
To make the shader legal replace attribute with in.
Based on the log posted, glCreateShader is returning 0 which only happens when the context is not valid.
Looking at the time stamps in the log, you appear to be loading your shaders before you are creating the OpenGLES context.

Android: OpenGL drawing only on one quarter of screen on Exynos devices

Problem.
I have a problem with specific android devices. I'm using native C++ library to draw stuff in my app. For a long time the solution I use worked well on different devices, until I received a negative feedback from Samsung Galaxy S4 user (GT-I9500, Android 4.4.2, Exynos 5410). The result of my OpenGL drawing was corrupted. The texture that usually draw fullscreen in this case was shrinked to a quarter of the screen space and aligned to upper right corner. The background drawn with glClearColor was filling full screen tho. I was able to check on three other S4s - out of total four phones my app the drawing is corrupted only on Exynos devices. The other two had Snapdragon and there were no problems on them.
Code.
I've simmplified the code a bit so I can show it here. The task is basic: draw red background and black fullscreen rectangle on top of it.
Below you can see my drawing method. Data I pass to the shaders don't affect anything in this simplified case.
// Use program
glUseProgram(_shaderProgram);
ERROR_STATUS
//bind quad mesh buffer
glBindBuffer(GL_ARRAY_BUFFER, _vbo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _ibo);
ERROR_STATUS
//set attribs
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void *) (2 * sizeof(float)));
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
ERROR_STATUS
// Clear background to red
glClear(GL_COLOR_BUFFER_BIT);
glClearColor(1, 0, 0, 1);
glUniform2fv(_scaleUniform, 1, _eyes[0].scale.data);
glUniform1f(_offsetUniform, _eyes[0].offset);
glUniform1f(_offsetYUniform, _eyes[0].offsetY);
BindTextures(TEX);
// Draw
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, NULL);
ERROR_STATUS
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
ERROR_STATUS
And here is my ConfigChooser(grafika was great help here).
private static class ConfigChooser implements GLSurfaceView.EGLConfigChooser {
public ConfigChooser(int r, int g, int b, int a, int depth, int stencil) {
mRedSize = r;
mGreenSize = g;
mBlueSize = b;
mAlphaSize = a;
mDepthSize = depth;
mStencilSize = stencil;
}
/* This EGL config specification is used to specify 2.0 rendering.
* We use a minimum size of 4 bits for red/green/blue, but will
* perform actual matching in chooseConfig() below.
*/
private static int EGL_OPENGL_ES2_BIT = 4;
private static int[] s_configAttribs2 =
{
EGL10.EGL_RED_SIZE, 4,
EGL10.EGL_GREEN_SIZE, 4,
EGL10.EGL_BLUE_SIZE, 4,
EGL10.EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
EGL10.EGL_NONE
};
public EGLConfig chooseConfig(EGL10 egl, EGLDisplay display) {
/* Get the number of minimally matching EGL configurations
*/
int[] num_config = new int[1];
egl.eglChooseConfig(display, s_configAttribs2, null, 0, num_config);
int numConfigs = num_config[0];
if (numConfigs <= 0) {
throw new IllegalArgumentException("No configs match configSpec");
}
/* Allocate then read the array of minimally matching EGL configs
*/
EGLConfig[] configs = new EGLConfig[numConfigs];
egl.eglChooseConfig(display, s_configAttribs2, configs, numConfigs, num_config);
if (DEBUG) {
printConfigs(egl, display, configs);
}
/* Now return the "best" one
*/
return chooseConfig(egl, display, configs);
}
public EGLConfig chooseConfig(EGL10 egl, EGLDisplay display,
EGLConfig[] configs) {
for(EGLConfig config : configs) {
int d = findConfigAttrib(egl, display, config,
EGL10.EGL_DEPTH_SIZE, 0);
int s = findConfigAttrib(egl, display, config,
EGL10.EGL_STENCIL_SIZE, 0);
// We need at least mDepthSize and mStencilSize bits
if (d < mDepthSize || s < mStencilSize)
continue;
// We want an *exact* match for red/green/blue/alpha
int r = findConfigAttrib(egl, display, config,
EGL10.EGL_RED_SIZE, 0);
int g = findConfigAttrib(egl, display, config,
EGL10.EGL_GREEN_SIZE, 0);
int b = findConfigAttrib(egl, display, config,
EGL10.EGL_BLUE_SIZE, 0);
int a = findConfigAttrib(egl, display, config,
EGL10.EGL_ALPHA_SIZE, 0);
if (r == mRedSize && g == mGreenSize && b == mBlueSize && a == mAlphaSize)
return config;
}
return null;
}
private int findConfigAttrib(EGL10 egl, EGLDisplay display,
EGLConfig config, int attribute, int defaultValue) {
if (egl.eglGetConfigAttrib(display, config, attribute, mValue)) {
return mValue[0];
}
return defaultValue;
}
private void printConfigs(EGL10 egl, EGLDisplay display,
EGLConfig[] configs) {
int numConfigs = configs.length;
Log.w(TAG, String.format("%d configurations", numConfigs));
for (int i = 0; i < numConfigs; i++) {
Log.w(TAG, String.format("Configuration %d:\n", i));
printConfig(egl, display, configs[i]);
}
}
private void printConfig(EGL10 egl, EGLDisplay display,
EGLConfig config) {
int[] attributes = {
EGL10.EGL_BUFFER_SIZE,
EGL10.EGL_ALPHA_SIZE,
EGL10.EGL_BLUE_SIZE,
EGL10.EGL_GREEN_SIZE,
EGL10.EGL_RED_SIZE,
EGL10.EGL_DEPTH_SIZE,
EGL10.EGL_STENCIL_SIZE,
EGL10.EGL_CONFIG_CAVEAT,
EGL10.EGL_CONFIG_ID,
EGL10.EGL_LEVEL,
EGL10.EGL_MAX_PBUFFER_HEIGHT,
EGL10.EGL_MAX_PBUFFER_PIXELS,
EGL10.EGL_MAX_PBUFFER_WIDTH,
EGL10.EGL_NATIVE_RENDERABLE,
EGL10.EGL_NATIVE_VISUAL_ID,
EGL10.EGL_NATIVE_VISUAL_TYPE,
0x3030, // EGL10.EGL_PRESERVED_RESOURCES,
EGL10.EGL_SAMPLES,
EGL10.EGL_SAMPLE_BUFFERS,
EGL10.EGL_SURFACE_TYPE,
EGL10.EGL_TRANSPARENT_TYPE,
EGL10.EGL_TRANSPARENT_RED_VALUE,
EGL10.EGL_TRANSPARENT_GREEN_VALUE,
EGL10.EGL_TRANSPARENT_BLUE_VALUE,
0x3039, // EGL10.EGL_BIND_TO_TEXTURE_RGB,
0x303A, // EGL10.EGL_BIND_TO_TEXTURE_RGBA,
0x303B, // EGL10.EGL_MIN_SWAP_INTERVAL,
0x303C, // EGL10.EGL_MAX_SWAP_INTERVAL,
EGL10.EGL_LUMINANCE_SIZE,
EGL10.EGL_ALPHA_MASK_SIZE,
EGL10.EGL_COLOR_BUFFER_TYPE,
EGL10.EGL_RENDERABLE_TYPE,
0x3042 // EGL10.EGL_CONFORMANT
};
String[] names = {
"EGL_BUFFER_SIZE",
"EGL_ALPHA_SIZE",
"EGL_BLUE_SIZE",
"EGL_GREEN_SIZE",
"EGL_RED_SIZE",
"EGL_DEPTH_SIZE",
"EGL_STENCIL_SIZE",
"EGL_CONFIG_CAVEAT",
"EGL_CONFIG_ID",
"EGL_LEVEL",
"EGL_MAX_PBUFFER_HEIGHT",
"EGL_MAX_PBUFFER_PIXELS",
"EGL_MAX_PBUFFER_WIDTH",
"EGL_NATIVE_RENDERABLE",
"EGL_NATIVE_VISUAL_ID",
"EGL_NATIVE_VISUAL_TYPE",
"EGL_PRESERVED_RESOURCES",
"EGL_SAMPLES",
"EGL_SAMPLE_BUFFERS",
"EGL_SURFACE_TYPE",
"EGL_TRANSPARENT_TYPE",
"EGL_TRANSPARENT_RED_VALUE",
"EGL_TRANSPARENT_GREEN_VALUE",
"EGL_TRANSPARENT_BLUE_VALUE",
"EGL_BIND_TO_TEXTURE_RGB",
"EGL_BIND_TO_TEXTURE_RGBA",
"EGL_MIN_SWAP_INTERVAL",
"EGL_MAX_SWAP_INTERVAL",
"EGL_LUMINANCE_SIZE",
"EGL_ALPHA_MASK_SIZE",
"EGL_COLOR_BUFFER_TYPE",
"EGL_RENDERABLE_TYPE",
"EGL_CONFORMANT"
};
int[] value = new int[1];
for (int i = 0; i < attributes.length; i++) {
int attribute = attributes[i];
String name = names[i];
if ( egl.eglGetConfigAttrib(display, config, attribute, value)) {
Log.w(TAG, String.format(" %s: %d\n", name, value[0]));
} else {
// Log.w(TAG, String.format(" %s: failed\n", name));
while (egl.eglGetError() != EGL10.EGL_SUCCESS);
}
}
}
// Subclasses can adjust these values:
protected int mRedSize;
protected int mGreenSize;
protected int mBlueSize;
protected int mAlphaSize;
protected int mDepthSize;
protected int mStencilSize;
private int[] mValue = new int[1];
}
I attach it with setEGLConfigChooser(new ConfigChooser(5, 6, 5, 0, 0, 0)).Context factory:
private static class ContextFactory implements GLSurfaceView.EGLContextFactory {
private static int EGL_CONTEXT_CLIENT_VERSION = 0x3098;
public EGLContext createContext(EGL10 egl, EGLDisplay display, EGLConfig eglConfig) {
Log.w(TAG, "creating OpenGL ES 2.0 context");
checkEglError("Before eglCreateContext", egl);
int[] attrib_list = {EGL_CONTEXT_CLIENT_VERSION, 2, EGL10.EGL_NONE };
EGLContext context = egl.eglCreateContext(display, eglConfig, EGL10.EGL_NO_CONTEXT, attrib_list);
checkEglError("After eglCreateContext", egl);
return context;
}
public void destroyContext(EGL10 egl, EGLDisplay display, EGLContext context) {
egl.eglDestroyContext(display, context);
}
}
I can provide other parts of my code if necessary.
Results.
While the usual, valid result is just black screen (the black, empty texture is overlapping the red background), on S4 Exynos the screen looks like this (the arrow on the right is just system button):
So here is the question. How to fix the problem, so the app is displaying the same thing different devices?
I was able to fix my code thanks to RetoKoradi suggestion. The problem was that I've used fixed locations of uniforms in shaders (0 and 1). I'm not sure why this worked on most of deviced and on some not tho.
So instead of calls like:
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0);
glEnableVertexAttribArray(0);
I'm now using:
_posAttribLocation = glGetAttribLocation(_shaderProgram, "a_position");
to store location of uniform after shader program is ready and using it instead of fixed one:
glVertexAttribPointer(_posAttribLocation, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0);
glEnableVertexAttribArray(_posAttribLocation);

Rendering sprites on a Mali-400 MP device

I started developing a small cross platform game engine using OpenGL, I am working actually on a sprite batch renderer.
Everything is working fine on OS X, iOS, Win32, and some Android Devices. Here is the Results :
The image on the left show the correct result, and was tested on Samsung galaxy S1, Galaxy J5, Galaxy S3, Galaxy Tab 3.
The device that give these weird results on the right is a Samsung Galaxy Core 2, With a Mali-400 MP renderer.
I start getting that weird result when I use more than one texture.
Here is my Sprite Batch class :
Glyph::Glyph(const maths::vec2 &position, const maths::vec2 &dimensions, const maths::vec4 &uvRect, GLuint texture, unsigned int color, float zOrder) :
textureID(texture) {
a_zOrder = zOrder;
topLeft.m_color = color;
topLeft.setPosition(position.x, position.y + dimensions.y);
topLeft.setUV(uvRect.x, uvRect.y + uvRect.w);
bottomLeft.m_color = color;
bottomLeft.setPosition(position.x, position.y);
bottomLeft.setUV(uvRect.x, uvRect.y);
bottomRight.m_color = color;
bottomRight.setPosition(position.x + dimensions.x, position.y);
bottomRight.setUV(uvRect.x + uvRect.z, uvRect.y);
topRight.m_color = color;
topRight.setPosition(position.x + dimensions.x, position.y + dimensions.y);
topRight.setUV(uvRect.x + uvRect.z, uvRect.y + uvRect.w);
}
//SpriteBatch
ORendererSpriteBatch::ORendererSpriteBatch(): m_vboID(0), m_vaoID(0)
{
Init();
}
ORendererSpriteBatch::~ORendererSpriteBatch()
{
if (m_vboID != 0) {
glDeleteBuffers(1, &m_vboID);
}
if (m_vaoID != 0) {
glDeleteVertexArrays(1, &m_vaoID);
}
}
void ORendererSpriteBatch::Init()
{
createVertexArray();
}
void ORendererSpriteBatch::Begin()
{
m_renderBatches.clear();
// Makes _glpyhs.size() == 0, however it does not free internal memory.
// So when we later call emplace_back it doesn't need to internally call new.
m_glyphs.clear();
}
void ORendererSpriteBatch::Submit(const OSprite* renderable)
{
m_glyphs.emplace_back(renderable->GetPosition(), renderable->GetSize(), renderable->GetUV(), renderable->GetTID(), renderable->GetColor(), renderable->GetZOrder());
}
void ORendererSpriteBatch::End()
{
// Set up all pointers for fast sorting
m_glyphPointers.resize(m_glyphs.size());
for (size_t i = 0; i < m_glyphs.size(); i++) {
m_glyphPointers[i] = &m_glyphs[i];
}
sortGlyphs();
createRenderBatches();
}
void ORendererSpriteBatch::Flush(OLayer2D *layer)
{
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
ORendererFactory::OShader_Simple2D->bind();
glActiveTexture(GL_TEXTURE0);
ORendererFactory::OShader_Simple2D->setUniform1i("u_diffuse", 0);
ORendererFactory::OShader_Simple2D->setUniformMat4("u_MVP", layer->getCamera()->getCameraMatrix());
glBindVertexArray(m_vaoID);
for (size_t i = 0; i < m_renderBatches.size(); i++) {
glBindTexture(GL_TEXTURE_2D, m_renderBatches[i].texture);
glDrawArrays(GL_TRIANGLES, m_renderBatches[i].offset, m_renderBatches[i].numVertices);
}
ORendererFactory::OShader_Simple2D->unbind();
glBindVertexArray(0);
}
void ORendererSpriteBatch::createRenderBatches() {
// This will store all the vertices that we need to upload
std::vector <VertexData2D> vertices;
// Resize the buffer to the exact size we need so we can treat
// it like an array
vertices.resize(m_glyphPointers.size() * 6);
if (m_glyphPointers.empty()) {
return;
}
int offset = 0; // current offset
int cv = 0; // current vertex
//Add the first batch
m_renderBatches.emplace_back(offset, 6, m_glyphPointers[0]->textureID);
vertices[cv++] = m_glyphPointers[0]->topLeft;
vertices[cv++] = m_glyphPointers[0]->bottomLeft;
vertices[cv++] = m_glyphPointers[0]->bottomRight;
vertices[cv++] = m_glyphPointers[0]->bottomRight;
vertices[cv++] = m_glyphPointers[0]->topRight;
vertices[cv++] = m_glyphPointers[0]->topLeft;
offset += 6;
//Add all the rest of the glyphs
for (size_t cg = 1; cg < m_glyphPointers.size(); cg++) {
// Check if this glyph can be part of the current batch
if (m_glyphPointers[cg]->textureID != m_glyphPointers[cg - 1]->textureID) {
// Make a new batch
m_renderBatches.emplace_back(offset, 6, m_glyphPointers[cg]->textureID);
} else {
// If its part of the current batch, just increase numVertices
m_renderBatches.back().numVertices += 6;
}
vertices[cv++] = m_glyphPointers[cg]->topLeft;
vertices[cv++] = m_glyphPointers[cg]->bottomLeft;
vertices[cv++] = m_glyphPointers[cg]->bottomRight;
vertices[cv++] = m_glyphPointers[cg]->bottomRight;
vertices[cv++] = m_glyphPointers[cg]->topRight;
vertices[cv++] = m_glyphPointers[cg]->topLeft;
offset += 6;
}
glBindVertexArray(m_vaoID);
// Bind our VBO
glBindBuffer(GL_ARRAY_BUFFER, m_vboID);
// Orphan the buffer (for speed)
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(VertexData2D), NULL, GL_DYNAMIC_DRAW);
// Upload the data
glBufferSubData(GL_ARRAY_BUFFER, 0, vertices.size() * sizeof(VertexData2D), vertices.data());
glBindVertexArray(0);
// Unbind the VBO
// glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void ORendererSpriteBatch::createVertexArray() {
// Generate the VAO if it isn't already generated
if (m_vaoID == 0) {
glGenVertexArrays(1, &m_vaoID);
}
// Bind the VAO. All subsequent opengl calls will modify it's state.
glBindVertexArray(m_vaoID);
//G enerate the VBO if it isn't already generated
if (m_vboID == 0) {
glGenBuffers(1, &m_vboID);
}
glBindBuffer(GL_ARRAY_BUFFER, m_vboID);
//Tell opengl what attribute arrays we need
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(VertexData2D), (void *)offsetof(VertexData2D, m_vertex));
glVertexAttribPointer(1, 4, GL_UNSIGNED_BYTE, GL_TRUE , sizeof(VertexData2D), (void *)offsetof(VertexData2D, m_color));
glVertexAttribPointer(2, 2, GL_FLOAT , GL_FALSE, sizeof(VertexData2D), (void *)offsetof(VertexData2D, m_uv));
glBindVertexArray(0);
}
void ORendererSpriteBatch::sortGlyphs() {
std::stable_sort(m_glyphPointers.begin(), m_glyphPointers.end(), compareFunction);
}
bool ORendererSpriteBatch::compareFunction(Glyph* a, Glyph* b) {
if (a->a_zOrder == b->a_zOrder) {
return (a->textureID < b->textureID);
}
return (a->a_zOrder < b->a_zOrder);
}
And here is how I call that class :
m_CurrentRenderer->Begin();
for (const OSprite* renderable : m_Renderables)
if(m_Camera->isBoxInView(renderable->GetPosition(), renderable->GetSize())){
renderable->Submit(m_CurrentRenderer);
}
m_CurrentRenderer->End();
m_CurrentRenderer->Flush(this);
Any suggestion about what might cause that bug is welcome.
Your code:
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(VertexData2D), (void *)offsetof(VertexData2D, m_vertex));
glVertexAttribPointer(1, 4, GL_UNSIGNED_BYTE, GL_TRUE , sizeof(VertexData2D), (void *)offsetof(VertexData2D, m_color));
glVertexAttribPointer(2, 2, GL_FLOAT , GL_FALSE, sizeof(VertexData2D), (void *)offsetof(VertexData2D, m_uv));
Assumes that the vertex position is attribute #0, colour is attribute #1 and uv is attribute #2.
Do you enforce that in any way? Declaring the attributes in that order in your vertex shader doesn't guarantee that the attributes will take on that order.
You can specify the attribute locations using glBindAttribLocation prior to linking the program, or you can use glGetAttribLocation to query the location instead.

Categories

Resources