I am trying to use a point light animation for my game. It runs fine in Editor with Diffuse, Bumped Specular and VertexLit shaders. However it doesn't work on any Mobile shaders provided by default.
Is there a way to use Point lights in Android? Or is there any shader which can work on mobiles and supports point lights too?
Finally found the answer - this post on UnityAnswer helped me. I am reposting the Custom Shader here -
// Specular, Normal Maps with Main Texture
// Fragment based
Shader "SpecTest/SpecTest5"
{
Properties
{
_Shininess ("Shininess", Range (0, 1.5)) = 0.078125
_Color ("Main Color", Color) = (1,1,1,1)
_SpecColor ("Specular Color", Color) = (0, 0, 0, 0)
_MainTex ("Texture", 2D) = "white" {}
_BumpMap ("Bump Map", 2D) = "bump" {}
_NormalStrength ("Normal Strength", Range (0, 1.5)) = 1
} // eo Properties
SubShader
{
// pass for 4 vertex lights, ambient light & first pixel light
Tags { "RenderType"="Opaque" }
LOD 200
CGPROGRAM
#pragma surface surf MobileBlinnPhong
fixed4 LightingMobileBlinnPhong (SurfaceOutput s, fixed3 lightDir, fixed3 halfDir, fixed atten)
{
fixed diff = saturate(dot (s.Normal, lightDir));
fixed nh = saturate(dot (s.Normal, halfDir)); //Instead of injecting the normalized light+view, we just inject view, which is provided as halfasview in the initial surface shader CG parameters
fixed spec = pow (nh, s.Specular*128) * s.Gloss;
fixed4 c;
c.rgb = (s.Albedo * _LightColor0.rgb * diff + _SpecColor.rgb * spec) * (atten*2);
c.a = 0.0;
return c;
}
struct Input {
float2 uv_MainTex;
float2 uv_BumpMap;
};
// User-specified properties
uniform sampler2D _MainTex;
uniform sampler2D _BumpMap;
uniform float _Shininess;
uniform float _NormalStrength;
uniform fixed4 _Color;
float3 expand(float3 v) { return (v - 0.5) * 2; } // eo expand
void surf (Input IN, inout SurfaceOutput o) {
half4 tex = tex2D (_MainTex, IN.uv_MainTex) * _Color;
o.Albedo = tex.rgb;
o.Gloss = tex.a;
o.Alpha = tex.a;
o.Specular = _Shininess;
// fetch and expand range-compressed normal
float3 normalTex = UnpackNormal (tex2D (_BumpMap, IN.uv_BumpMap));
float3 normal = normalTex * _NormalStrength;
o.Normal = normal;
} // eo surf
ENDCG
}
//Fallback "Specular"
} // eo Shader
Remember to increase strength though. And obviously it's too costly on frame rate. I need it for just an animation, so I used it.
Related
I applied cell shading effect to the object, like:
This works well, but there are many conditional checks ("if" statements) in the fragment shader:
#version 300 es
precision lowp float;
in float v_CosViewAngle;
in float v_LightIntensity;
const lowp vec3 defaultColor = vec3(0.1, 0.7, 0.9);
void main() {
lowp float intensity = 0.0;
if (v_CosViewAngle > 0.33) {
intensity = 0.33;
if (v_LightIntensity > 0.76) {
intensity = 1.0;
} else if (v_LightIntensity > 0.51) {
intensity = 0.84;
} else if (v_LightIntensity > 0.26) {
intensity = 0.67;
} else if (v_LightIntensity > 0.1) {
intensity = 0.50;
}
}
outColor = vec4(defaultColor * intensity, 1.0);
}
I guess so many checks in the fragment shader can ultimately affect performance. In addition, shader size is increasing. Especially if there will be even more cell shading levels.
Is there any other way to get this effect? Maybe some glsl-function can be used here?
Thanks in advance!
Store your color bands in a Nx1 texture, do a texture lookup using v_LightIntensity as your texture coordinate. Want a different shading level count then just change the texture.
EDIT Store an NxM texture, doing a lookup using vLightIntensity and v_CosViewAngle as a 2D coordinate, and you can kill branches completely.
The main texture of my surface shader is a Google Maps image tile, similar to this:
.
I want to replace pixels that are close to a specified color with that from a separate texture. What is working now is the following:
Shader "MyShader"
{
Properties
{
_MainTex("Base (RGB) Trans (A)", 2D) = "white" {}
_GrassTexture("Grass Texture", 2D) = "white" {}
_RoadTexture("Road Texture", 2D) = "white" {}
_WaterTexture("Water Texture", 2D) = "white" {}
}
SubShader
{
Tags{ "Queue" = "Transparent-1" "IgnoreProjector" = "True" "ForceNoShadowCasting" = "True" "RenderType" = "Opaque" }
LOD 200
CGPROGRAM
#pragma surface surf Lambert alpha approxview halfasview noforwardadd nometa
uniform sampler2D _MainTex;
uniform sampler2D _GrassTexture;
uniform sampler2D _RoadTexture;
uniform sampler2D _WaterTexture;
struct Input
{
float2 uv_MainTex;
};
void surf(Input IN, inout SurfaceOutput o)
{
fixed4 ct = tex2D(_MainTex, IN.uv_MainTex);
// if the red (or blue) channel of the pixel is within a
// specific range, get either a 1 or a 0 (true/false).
int grassCond = int(ct.r >= 0.45) * int(0.46 >= ct.r);
int waterCond = int(ct.r >= 0.14) * int(0.15 >= ct.r);
int roadCond = int(ct.b >= 0.23) * int(0.24 >= ct.b);
// if none of the above conditions is a 1, then we want to keep our
// current pixel's color:
half defaultCond = 1 - grassCond - waterCond - roadCond;
// get the pixel from each texture, multiple by their check condition
// to get:
// fixed4(0,0,0,0) if this isn't the right texture for this pixel
// or fixed4(r,g,b,1) from the texture if it is the right pixel
fixed4 grass = grassCond * tex2D(_GrassTexture, IN.uv_MainTex);
fixed4 water = waterCond * tex2D(_WaterTexture, IN.uv_MainTex);
fixed4 road = roadCond * tex2D(_RoadTexture, IN.uv_MainTex);
fixed4 def = defaultCond * ct; // just used the MainTex pixel
// then use the found pixels as the Albedo
o.Albedo = (grass + road + water + def).rgb;
o.Alpha = 1;
}
ENDCG
}
Fallback "None"
}
This is the first shader I've ever written, and it probably isn't very performant. It seems counter intuitive to me to call tex2D on each texture for every pixel to just throw that data away, but I couldn't think of a better way to do this without if/else (which I read were bad for GPUs).
This is a Unity Surface Shader, and not a fragment/vertex shader. I know there is a step that happens behind the scenes that will generate the fragment/vertex shader for me (adding in the scene's lighting, fog, etc.). This shader is applied to 100 256x256px map tiles (2560x2560 pixels in total). The grass/road/water textures are all 256x256 pixels as well.
My question is: is there a better, more performant way of accomplishing what I'm doing here? The game runs on Android and iOS.
I'm not a specialist in Shader performance, but assuming you have a relatively small number of source tiles that you wish to render in the same frame it might make more sense to store the result of the pixel replacement and reuse it.
As you are stating that the resulting image is going to be the same size as your source tile, just render the source tile using your surface shader (without any lighting though, you may want to consider using a simple, flat pixel shader!) into a RenderTexture once and then use that RenderTexture as source for your world rendering. That way you are doing the expensive work only once per source tile and thus it isn't even important anymore whether your shader is well optimized.
If all textures are static, you might even consider not doing this at runtime, but just translate them once in the Editor.
I'm using OpenGL ES 2.0 on Android to add effects to a video. Everything works fine until I add another texture.
What I've tried:
Using a Texture2D instead of external (EOS) -> Impossible for an external source (video file)
Changed texture to a power of two after using the method that checks if the GPU supports non pow of 2 -> No change
Played with the chronological order of GLES20 methods -> No visible change in execution behaviour
Binded textures using different index logic -> No change
Not tried yet:
Using another bitmap format, but the GLUtils method to load bitmap is supposed to handle the format
Using the 'legacy' method to load a texture from a bitmap (GLES instead of GLUtils)
Using a array of pixels instead of a bitmap.
But at this point I would just surrender about bitmap and send an Uniform Buffer containing my pixels to the shader...
When my surface is created I do this:
var vertexShader = _videoView.Filtering.Shaders.Item1;
var fragmentShader = _videoView.Filtering.Shaders.Item2;
var progExists = glPrograms.ContainsKey (vertexShader + fragmentShader);
if (!progExists) {
var prog = createProgram (vertexShader, fragmentShader);
glPrograms.Add (vertexShader + fragmentShader, prog);
}
_glProgram = glPrograms [vertexShader + fragmentShader];
if (_glProgram == 0) {
// Should compile with default shaders from GlFilter.
throw new System.Exception ("Can't create GL Program. There is something wrong with EGL on this device.");
}
I also load my textures:
_aPositionHandle = GLES20.GlGetAttribLocation(_glProgram, "aPosition");
checkGlError("glGetAttribLocation aPosition");
if (_aPositionHandle == -1) {
throw new RuntimeException(
"Could not get attrib location for aPosition");
}
_aTextureCoord = GLES20.GlGetAttribLocation(_glProgram,
"aTextureCoord");
checkGlError("glGetAttribLocation aTextureCoord");
if (_aTextureCoord == -1) {
throw new RuntimeException (
"Could not get attrib location for aTextureCoord");
}
_uMVPMatrixHandle = GLES20.GlGetUniformLocation(_glProgram,
"uMVPMatrix");
checkGlError("glGetUniformLocation uMVPMatrix");
if (_uMVPMatrixHandle == -1) {
throw new RuntimeException(
"Could not get attrib location for uMVPMatrix");
}
_uSTMatrixHandle = GLES20.GlGetUniformLocation(_glProgram,
"uSTMatrix");
checkGlError("glGetUniformLocation uSTMatrix");
if (_uSTMatrixHandle == -1) {
throw new RuntimeException(
"Could not get attrib location for uSTMatrix");
}
_texelWidthOffsetUniform = GLES20.GlGetUniformLocation(_glProgram,
"texelWidthOffset");
checkGlError("glGetUniformLocation texelWidthOffset");
if (_texelWidthOffsetUniform == -1) {
throw new RuntimeException(
"Could not get attrib location for texelWidthOffset");
}
_texelHeightOffsetUniform = GLES20.GlGetUniformLocation(_glProgram,
"texelHeightOffset");
checkGlError("glGetUniformLocation texelHeightOffset");
if (_texelHeightOffsetUniform == -1) {
throw new RuntimeException(
"Could not get attrib location for texelHeightOffset");
}
_sTexture2Uniform = GLES20.GlGetUniformLocation(_glProgram,
"sTexture2");
checkGlError("glGetUniformLocation sTexture2Uniform");
if (_sTexture2Uniform == -1) {
throw new RuntimeException(
"Could not get attrib location for sTexture2");
}
int[] textures = new int[1];
GLES20.GlGenTextures(1, textures, 0);
_textureID = textures[0];
GLES20.GlBindTexture(GL_TEXTURE_EXTERNAL_OES, _textureID);
checkGlError("glBindTexture _textureID");
GLES20.GlTexParameterf(GL_TEXTURE_EXTERNAL_OES, GLES20.GlTextureMinFilter, GLES20.GlNearest);
GLES20.GlTexParameterf(GL_TEXTURE_EXTERNAL_OES, GLES20.GlTextureMagFilter, GLES20.GlLinear);
Still in Surface Created event, I add a texture by uploading a Bitmap
source = Bitmap.CreateBitmap(256, 256, Bitmap.Config.Argb8888);
source.EraseColor(Color.Pink.ToArgb());
for (int x = 0; x < 256; x++)
for (int y = 0; y < 256; y++) {
var cof = (int)(System.Math.Cos ((double)x / (double)256) * 255);
var sif = (int)(System.Math.Sin ((double)y / (double)256) * 255);
var p = (double)x / (double)256 * 360d;
var q = (double)y / (double)256 * 360d;
var c = (int)(((System.Math.Sin (p * q) / 2d) + 0.5d) * 255d);
source.SetPixel (x, y, Color.Argb (255, c, (c + cof) / 2, (c + sif) / 2));
}
int[] textures = new int[1];
GLES20.GlGenTextures(1, textures, 0);
_texture2Id = textures[0];
GLUtils.TexImage2D(GLES20.GlTexture2d, 0, source, 0);
source.Recycle ();
GLES20.GlBindTexture(GLES20.GlTexture2d, _texture2Id);
GLES20.GlTexParameterf(GLES20.GlTexture2d, GLES20.GlTextureWrapS, GLES20.GlClampToEdge);
GLES20.GlTexParameterf(GLES20.GlTexture2d, GLES20.GlTextureWrapT, GLES20.GlClampToEdge);
GLES20.GlTexParameterf(GLES20.GlTexture2d, GLES20.GlTextureMagFilter, GLES20.GlLinear);
GLES20.GlTexParameterf(GLES20.GlTexture2d, GLES20.GlTextureMinFilter, GLES20.GlLinear);
_surfaceTexture = new SurfaceTexture(_textureID);
_surfaceTexture.FrameAvailable += _surfaceTexture_FrameAvailable;
_mediaPlayer = new MediaPlayer();
Then in OnDraw:
var fragment = _videoView.Filtering.Shaders.Item2;
var vertex = _videoView.Filtering.Shaders.Item1;
var progExists = glPrograms.ContainsKey (vertex + fragment);
if (!progExists) {
var prog = createProgram (vertex, fragment);
glPrograms.Add (vertex + fragment, prog);
}
var progToUse = glPrograms [vertex + fragment];
GLES20.GlClearColor(0.0f, 0.0f, 0.0f, 0.0f);
GLES20.GlClear(GLES20.GlColorBufferBit);
lock (syncLock) {
_surfaceTexture.UpdateTexImage ();
_surfaceTexture.GetTransformMatrix (_STMatrix);
updateSurface = false;
}
GLES20.GlViewport(0, 0, _surfaceWidth, _surfaceHeight);
if (progToUse != _glProgram) {
GLES20.GlUseProgram (progToUse);
}
_glProgram = progToUse;
GLES20.GlUniform1i(_sTexture2Uniform, 1);
GLES20.GlActiveTexture (GLES20.GlTexture0);
GLES20.GlBindTexture (GL_TEXTURE_EXTERNAL_OES, _textureID);
GLES20.GlActiveTexture (GLES20.GlTexture1);
GLES20.GlBindTexture (GLES20.GlTexture2d, _texture2Id);
_triangleVertices.Position (TRIANGLE_VERTICES_DATA_POS_OFFSET);
GLES20.GlVertexAttribPointer (_aPositionHandle, 3, GLES20.GlFloat, false, TRIANGLE_VERTICES_DATA_STRIDE_BYTES, _triangleVertices);
GLES20.GlEnableVertexAttribArray (_aPositionHandle);
_textureVertices.Position (TRIANGLE_VERTICES_DATA_UV_OFFSET);
GLES20.GlVertexAttribPointer (_aTextureCoord, 2, GLES20.GlFloat, false, TEXTURE_VERTICES_DATA_STRIDE_BYTES, _textureVertices);
GLES20.GlEnableVertexAttribArray (_aTextureCoord);
Android.Opengl.Matrix.SetIdentityM (_MVPMatrix, 0);
GLES20.GlUniformMatrix4fv (_uMVPMatrixHandle, 1, false, _MVPMatrix, 0);
GLES20.GlUniformMatrix4fv (_uSTMatrixHandle, 1, false, _STMatrix, 0);
GLES20.GlDrawArrays(GLES20.GlTriangleStrip, 0, 4);
The problem is when I hit this line `GLES20.GlUniform1i(_sTexture2Uniform, 1);`
I keep getting this error then setting my bitmap texture uniform in OnDraw.
[Adreno200-ES20] <__load_uniform_float:539>: GL_INVALID_OPERATION
The two errors I get are:
[SurfaceTexture] [unnamed-22096-0] updateTexImage: clearing GL error: 0x502
[Adreno200-ES20] <__load_uniform_int:305>: GL_INVALID_OPERATION
Vertex looks like it:
uniform mat4 uMVPMatrix;
uniform mat4 uSTMatrix;
uniform highp float texelWidthOffset;
uniform highp float texelHeightOffset;
attribute vec4 aPosition;
attribute vec4 aTextureCoord;
varying vec2 vTextureCoord;
void main() {
highp float useTX = texelWidthOffset;
highp float useTY = texelHeightOffset;
gl_Position = uMVPMatrix * aPosition;
vTextureCoord = (uSTMatrix * aTextureCoord).xy;
}
Fragment looks like it:
#version 150
#extension GL_OES_EGL_image_external : require
precision mediump float;
uniform sampler2D sTexture2;
uniform samplerExternalOES sTexture;
varying vec2 vTextureCoord;
varying float texelWidthOffset;
varying float texelHeightOffset;
void main() {
highp float useTX = texelWidthOffset;
highp float useTY = texelHeightOffset;
lowp vec4 inputColor0 = texture2D(sTexture,vTextureCoord);
lowp vec4 someColor = mix(inputColor0, texture2D(sTexture2,vTextureCoord), useTY);
lowp vec4 outputColor0 = mix(inputColor0, texture2D(sTexture,vTextureCoord), useTX);
gl_FragColor=outputColor0;
}
I don't know what to say except "Help!", since I've maybe tried to follow 100+ tutorials to fix this GL_INVALID_OPERATION error.
None of the attempts I've made ever result in any colored pixel when mixing OES external source (a movie file) with a bitmap texture.
[EDIT]
Ok. In the code above I was setting _glProgram to the proper created program id, then before using the program, I was saying: "Is _glProgram equals to the proper created program id? If yes, then don't come here to call useProgram." So I was never using the proper program. So there is not a single OpenGL error now, but the screen is black like in zero. This is a good news. I know the pixels from my bitmap texture are correct. (Coded an a trigo algo randomly, happened to form cute polka four holes buttons.) So I'm revising my OpenGL ES code.
[EDIT 2]
I feel like I'm stuck with this problem.
There's one very clear problem in your code. In this call sequence:
GLES20.GlGenTextures(1, textures, 0);
_texture2Id = textures[0];
GLUtils.TexImage2D(GLES20.GlTexture2d, 0, source, 0);
source.Recycle ();
GLES20.GlBindTexture(GLES20.GlTexture2d, _texture2Id);
You need to bind the texture before calling GLUtils.texImage2D(). That method will load the texture data into the currently bound texture. The correct order is:
GLES20.GlGenTextures(1, textures, 0);
_texture2Id = textures[0];
GLES20.GlBindTexture(GLES20.GlTexture2d, _texture2Id);
GLUtils.TexImage2D(GLES20.GlTexture2d, 0, source, 0);
source.Recycle ();
This doesn't explain the errors from the glUniform1i() call, though. It can give a GL_INVALID_OPERATION error for the following reasons:
No program is bound.
The location is not valid for the currently bound program.
The type/size used for the call does not match the type/size of the uniform in the shader.
We can probably exclude options 1, or at the very least you should be able to check quickly that the program you just bound is valid.
This leaves the options that the location is invalid, or the location of the wrong uniform. Unless I missed something in the relatively lengthy code you posted, the only logical explanation is that the program used to retrieve the shader locations is not the same as the one you bind for setting the uniforms in onDraw().
I notice that you're using some kind of shader cache to reuse shaders. While that looks fairly straightforward, I would double (and triple, and quadruple) check that it really works as expected. Compare the program id used during setup where you get the uniform locations with the one used when preparing to draw. And make sure that no shaders were destroyed and recreated. And that everything happened in the same thread/context.
I am developing an android application in opengl ES2.0.In this Application I used to draw multiple lines and circles by touch event in GL surfaceView.
As opengl depends on GPU, Currently it works fine in Google Nexus 7(ULP GeForce).
In Samsung Galaxy Note 2(MALI 400MP) I'm trying to draw more than one line, but it clears the previous line and draw current line as new.
In Sony Xperia Neo V(Adreno 205) I'm trying to draw a new line, it crashes the surface as shown in below image.
Is it possible to make it work on all devices or do I need to write code for Individual GPU?
Source code
MainActivity.java
//in OnCreate method of my activity, i set the glsurfaceview and renderer
final ActivityManager activityManager =
( ActivityManager ) getSystemService( Context.ACTIVITY_SERVICE );
final ConfigurationInfo configurationInfo =
activityManager.getDeviceConfigurationInfo( );
final boolean supportsEs2 = ( configurationInfo.reqGlEsVersion >= 0x20000
|| Build.FINGERPRINT.startsWith( "generic" ) );
if( supportsEs2 ) {
Log.i( "JO", "configurationInfo.reqGlEsVersion:"
+ configurationInfo.reqGlEsVersion + "supportsEs2:"
+ supportsEs2 );
// Request an OpenGL ES 2.0 compatible context.
myGlsurfaceView.setEGLContextClientVersion( 2 );
final DisplayMetrics displayMetrics = new DisplayMetrics( );
getWindowManager( ).getDefaultDisplay( ).getMetrics( displayMetrics );
// Set the renderer to our demo renderer, defined below.
myRenderer = new MyRenderer( this, myGlsurfaceView );
myGlsurfaceView.setRenderer( myRenderer, displayMetrics.density );
myGlsurfaceView.setRenderMode( GLSurfaceView.RENDERMODE_CONTINUOUSLY );
MyGLSurfaceView.java
//in this im getting the coordinates of my touch on the glSurfaceView to draw the line and //passing those points to the renderer class
public MyGLsurfaceview( Context context ) {
super( context );
Log.i( "JO", "MyGLsurfaceview1" );
}
public MyGLsurfaceview(
Context context,
AttributeSet attrs )
{
super( context, attrs );
con = context;
mActivity = new MainActivity( );
mActivity.myGlsurfaceView = this;
Log.i( "JO", "MyGLsurfaceview2" );
}
public void setRenderer(
MyRenderer renderer,
float density )
{
Log.i( "JO", "setRenderer" );
myRenderer = renderer;
myDensity = density;
mGestureDetector = new GestureDetector( con, mGestureListener );
super.setRenderer( renderer );
setRenderMode( GLSurfaceView.RENDERMODE_CONTINUOUSLY );
}
#Override public boolean onTouchEvent( MotionEvent ev ) {
boolean retVal = mGestureDetector.onTouchEvent( ev );
if( myline ) {
switch ( ev.getAction( ) ) {
case MotionEvent.ACTION_DOWN:
isLUp = false;
if( count == 1 ) {
dx = ev.getX( );
dy = ev.getY( );
dx = ( dx / ( getWidth( ) / 2 ) ) - 1;
dy = 1 - ( dy / ( getHeight( ) / 2 ) );
firstX = dx;
firstY = dy;
} else if( count == 2 ) {
ux = ev.getX( );
uy = ev.getY( );
ux = ( ux / ( getWidth( ) / 2 ) ) - 1;
uy = 1 - ( uy / ( getHeight( ) / 2 ) );
secondX = ux;
secondY = uy;
myRenderer.dx = firstX;
myRenderer.dy = firstY;
myRenderer.ux = secondX;
myRenderer.uy = secondY;
midX = ( firstX + secondX ) / 2;
midY = ( firstY + secondY ) / 2;
Log.e( "JO",
"Line:firstX" + firstX +
"firstY" + firstY );
lp = new LinePoints( firstX, firstY,
secondX, secondY,
midX, midY );
lineArray.add( lp );
myRenderer.isNewClick = false;
myRenderer.isEnteredAngle = false;
myRenderer.myline = true;
myRenderer.mycircle = false;
myRenderer.mydashedline = false;
myRenderer.eraseCircle = false;
myRenderer.eraseLine = false;
myRenderer.eraseSelCir = false;
myRenderer.angle = angle;
myRenderer.length = length;
requestRender( );
count = 0;
}
count++;
break;
case MotionEvent.ACTION_MOVE:
isLUp = true;
break;
case MotionEvent.ACTION_UP:
if( isLUp ) {
ux = ev.getX( );
uy = ev.getY( );
ux = ( ux / ( getWidth( ) / 2 ) ) - 1;
uy = 1 - ( uy / ( getHeight( ) / 2 ) );
Log.i( "JO", "line2:" + ux + "," + uy );
secondX = ux;
secondY = uy;
myRenderer.dx = firstX;
myRenderer.dy = firstY;
myRenderer.ux = secondX;
myRenderer.uy = secondY;
midX = ( firstX + secondX ) / 2;
midY = ( firstY + secondY ) / 2;
Log.e( "JO",
"Line:firstX" + firstX +
"firstY" + firstY );
lp = new LinePoints( firstX, firstY,
secondX, secondY,
midX, midY );
lineArray.add( lp );
myRenderer.isNewClick = false;
myRenderer.isEnteredAngle = false;
myRenderer.myline = true;
myRenderer.mycircle = false;
myRenderer.mydashedline = false;
myRenderer.mysnaptoedge = false;
myRenderer.mysnaptoMiddle = false;
myRenderer.eraseCircle = false;
myRenderer.eraseLine = false;
myRenderer.eraseSelCir = false;
count = 1;
requestRender( );
}
break;
}
}
}
}
MyRenderer.java
//renderer class to render the line to the glsurfaceview
Lines line;
public MyRenderer(
MainActivity mainActivity,
MyGLsurfaceview myGlsurfaceView )
{
Log.i( "JO", "MyRenderer" );
this.main = mainActivity;
myGlsurface = myGlsurfaceView;
}
public void onDrawFrame(
GL10 gl )
{
line.draw( dx, dy, ux, uy );
}
#Override public void onSurfaceCreated(
GL10 gl,
EGLConfig config )
{
Log.i( "JO", "onSurfaceCreated" );
// Set the background frame color
GLES20.glClearColor( 0.0f, 0.0f, 0.0f, 1.0f );
// Create the GLText
glText = new GLText( main.getAssets( ) );
// Load the font from file (set size + padding), creates the texture
// NOTE: after a successful call to this the font is ready for
// rendering!
glText.load( "Roboto-Regular.ttf", 14, 2, 2 ); // Create Font (Height: 14
// Pixels / X+Y Padding
// 2 Pixels)
// enable texture + alpha blending
GLES20.glEnable( GLES20.GL_BLEND );
GLES20.glBlendFunc( GLES20.GL_ONE, GLES20.GL_ONE_MINUS_SRC_ALPHA );
}
#Override public void onSurfaceChanged(
GL10 gl,
int width,
int height )
{
// Adjust the viewport based on geometry changes,
// such as screen rotation
GLES20.glViewport( 0, 0, width, height );
ratio = ( float ) width / height;
width_surface = width;
height_surface = height;
/*
* // this projection matrix is applied to object coordinates // in the
* onDrawFrame() method Matrix.frustumM(mProjMatrix, 0, -ratio, ratio,
* -1, 1, 3, 7);
*/
// Take into account device orientation
if( width > height ) {
Matrix.frustumM( mProjMatrix, 0, -ratio, ratio, -1, 1, 1, 10 );
} else {
Matrix.frustumM( mProjMatrix, 0, -1, 1, -1 / ratio, 1 / ratio,
1, 10 );
}
// Save width and height
this.width = width; // Save Current Width
this.height = height; // Save Current Height
int useForOrtho = Math.min( width, height );
// TODO: Is this wrong?
Matrix.orthoM( mVMatrix, 0, -useForOrtho / 2, useForOrtho / 2,
-useForOrtho / 2, useForOrtho / 2, 0.1f, 100f );
}
Line.java
//Line class to draw line
public class Lines
{
final String vertexShaderCode = "attribute vec4 vPosition;"
+ "void main() {" + " gl_Position = vPosition;" + "}";
final String fragmentShaderCode = "precision mediump float;"
+ "uniform vec4 vColor;" + "void main() {"
+ " gl_FragColor = vColor;" + "}";
final FloatBuffer vertexBuffer;
final int mProgram;
int mPositionHandle;
int mColorHandle;
// number of coordinates per vertex in this array
final int COORDS_PER_VERTEX = 3;
float lineCoords[] = new float[6];
final int vertexCount = lineCoords.length / COORDS_PER_VERTEX;
final int vertexStride = COORDS_PER_VERTEX * 4; // bytes per vertex
// Set color with red, green, blue and alpha (opacity) values
float lcolor[] = { 1.0f, 1.0f, 1.0f, 1.0f };
public Lines(
)
{
// initialize vertex byte buffer for shape coordinates
ByteBuffer bb = ByteBuffer.allocateDirect(
// (number of coordinate values * 4 bytes per float)
lineCoords.
length * 4 );
// use the device hardware's native byte order
bb.order( ByteOrder.nativeOrder( ) );
// create a floating point buffer from the ByteBuffer
vertexBuffer = bb.asFloatBuffer( );
// prepare shaders and OpenGL program
int vertexShader =
MyRenderer.loadShader( GLES20.GL_VERTEX_SHADER,
vertexShaderCode );
int fragmentShader =
MyRenderer.loadShader( GLES20.GL_FRAGMENT_SHADER,
fragmentShaderCode );
mProgram = GLES20.glCreateProgram( ); // create empty OpenGL Program
GLES20.glAttachShader( mProgram, vertexShader ); // add the vertex shader
// to program
GLES20.glAttachShader( mProgram, fragmentShader ); // add the fragment
// shader to program
GLES20.glLinkProgram( mProgram ); // create OpenGL program executables
}
public void draw(
float dX,
float dY,
float uX,
float uY )
{
lineCoords[0] = dX;
lineCoords[1] = dY;
lineCoords[2] = 0.0f;
lineCoords[3] = uX;
lineCoords[4] = uY;
lineCoords[5] = 0.0f;
Log.i( "JO",
"lineCoords:" + lineCoords[0] + "," + lineCoords[1] +
"," + lineCoords[3] + "," + lineCoords[4] );
vertexBuffer.put( lineCoords );
vertexBuffer.position( 0 );
// Add program to OpenGL environment
GLES20.glUseProgram( mProgram );
// get handle to vertex shader's vPosition member
mPositionHandle =
GLES20.glGetAttribLocation( mProgram, "vPosition" );
// Enable a handle to the triangle vertices
GLES20.glEnableVertexAttribArray( mPositionHandle );
// Prepare the triangle coordinate data
GLES20.glVertexAttribPointer( mPositionHandle,
COORDS_PER_VERTEX,
GLES20.GL_FLOAT, false,
vertexStride, vertexBuffer );
// get handle to fragment shader's vColor member
mColorHandle =
GLES20.glGetUniformLocation( mProgram, "vColor" );
// Set color for drawing the triangle
GLES20.glUniform4fv( mColorHandle, 1, lcolor, 0 );
GLES20.glLineWidth( 3 );
// Draw the triangle
GLES20.glDrawArrays( GLES20.GL_LINES, 0, vertexCount );
// Disable vertex array
GLES20.glDisableVertexAttribArray( mPositionHandle );
}
}
Okay, here it goes again: ^1
OpenGL is not a scene graph. OpenGL does not maintain a scene, knows about objects or keeps tracks of geometry. OpenGL is a drawing API. You give it a canvas (in form of a Window or a PBuffer) and order it to draw points, lines or triangles and OpenGL does exactly that. Once a primitive (=point, line, triangle) has been drawn, OpenGL has no recollection about it whatsoever. If something changes, you have to redraw the whole thing.
The proper steps to redraw a scene are:
Disable the stencil test, so that the following step operates on the whole window.
Clear the framebuffer using glClear(bits), where bits is a bitmask specifying which parts of the canvas to clear. When rendering a new frame you want to clear everything so bits = GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
set the viewport, build an apropriate projection matrix
for each object in the scene load the right modelview matrix, set uniforms, select the vertex arrays and make the drawing call.
finish the rendering by flushing the pipeline. If using a single buffered window glFinish(), if using a double buffered window call SwapBuffers. In case of higher level frameworks this may be performed by the framework.
Important Once the drawing has been finished on a double buffered window, you must not continue to send drawing operations, as by performing the buffer swap the contents of the back buffer you're drawing to are undefined. Hence you must start the drawing anew, beginning with clearing the framebuffer (steps 1 and 2).
What your code misses are exactly those two steps. Also I have the impression that you're performing OpenGL drawing calls in direct reaction to input events, possibly in the input event handlers themself. Don't do this!. Instead use the input events to add to a list of primitives (lines in your case) to draw, then send a redraw event, which makes the framework call the drawing function. In the drawing function iterate over that list to draw the desired lines.
Redrawing the whole scene is canonical in OpenGL!
[1] (geesh, I'm getting tired of having to write this every 3rd question or so…)
Taking a punt here, but are you ever actually clearing the screen? The kinds of behaviour you are seeing suggest that you are not, and that in different scenaries you are seeing different errors - uninitialised memory, reusing an old buffer, implicitly clearing, etc.
GL requires you to be specific about what you want, so you need to explicitly clear.
OpenGL is just a standard. The actual implementation of the API is up to the graphics card manufacturer. So yes, OpenGL development can be GPU dependant sometimes. However, all implementations should provide the same result (what happens behind the scenes can be really different). If your code gives a different result with different GPUs, there is probably a version difference in the OpenGL implementation.
You can use these functions to get the supported OpenGL version:
glGetIntegerv(GL_MAJOR_VERSION, *); //version 3.0+
glGetIntegerv(GL_MINOR_VERSION, *); //version 3.0+
glGetString(GL_VERSION); //all versions
Why don´t you provide one working example, so people actually could help?
From your code:
I can´t see where do you create your line? Something like:
#Override public void onSurfaceCreated(GL10 gl, EGLConfig config){
...
mLine = new Lines();
...
}
As others already mentioned, in onDrawFrame always clear the buffer:
public void onDrawFrame(GL10 gl )
{
// Erase CL_COLOR_BUFFER
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
Set the camera:
// Set the camera position (View matrix)
Matrix.setLookAtM(mViewMatrix, 0, 0, 0, 3, 0f, 0f, 0f, 0f, 1.0f, 0.0f);
//
// Calculate the projection and view transformation
Matrix.multiplyMM(mMVPMatrix, 0, mProjMatrix, 0, mViewMatrix, 0);
Draw:
line.draw( dx, dy, ux, uy );
Crossposted from my answer to a similar question Why my opengl output differs for various devices?:
Should we take into account of GPU while Coding ? No way, The OpenGL API is a layer between your application and the hardware.
This is largely correct for desktop graphics as all GPUs are immediate renderers, however, this is NOT the case in mobile graphics.
The Mali GPUs use tile-based immediate-mode rendering. For this type of rendering, the framebuffer is divided into tiles of size 16 by 16 pixels. The Polygon List Builder (PLB) organizes input data from the application into polygon lists. There is a polygon list for each tile. When a primitive covers part of a tile, an entry, called a polygon list command, is added to the polygon list for the tile. The pixel processor takes the polygon list for one tile and computes values for all pixels in that tile before starting work on the next tile. Because this tile-based approach uses a fast, on-chip tile buffer, the GPU only writes the tile buffer contents to the framebuffer in main memory at the end of each tile. Non-tiled-based, immediate-mode renderers generally require many more framebuffer accesses. The tile-based method therefore consumes less memory bandwidth, and supports operations such as depth testing, blending and anti-aliasing efficiently.
Another difference is the treatment of rendered buffers. Immediate renderers will "save" the content of your buffer, effectively allowing you to only draw differences in the rendered scene on top of what previously existed. This IS available in Mali, however, is not enabled by default as it can cause undesired effects if used incorrectly.
There is a Mali GLES2 SDK example on how to use "EGL Preserve" Correctly available in the GLES2 SDK here
The reason the Geforce ULP based nexus 7 works as intended is that, as an immediate based renderer, it defaults to preserving the buffers, whereas Mali does not.
From the Khronos EGL specification:
EGL_SWAP_BEHAVIOR
Specifies the effect on the color buffer of posting a surface with eglSwapBuffers. A value of EGL_BUFFER_PRESERVED indicates that color buffer contents are unaffected, while EGL_BUFFER_DESTROYED indicates that color buffer contents may be destroyed or changed by the operation.
The initial value of EGL_SWAP_BEHAVIOR is chosen by the implementation.
The default value for EGL_SWAP_BEHAVIOUR on the Mali platform is EGL_BUFFER_DESTROYED. This is due to the performance hit associated with having to fetch the previous buffer from memory before rendering the new frame, and storing it at the end as well as the consumption of bandwidth (which is also incredibly bad for battery life on mobile devices). I am unable to comment with certainty as to the default behavior of the Tegra SoCs however, it is apparent to me that their default is EGL_BUFFER_PRESERVED.
To clarify Mali's position with regards to the Khronos GLES specifications - Mali is fully compliant.
I've just switched my code over to using a separate shader instead of passing a boolean uniform to decide which algorithm to use. Unfortunately, after vigorous testing, I've discovered that one of the attributes (halo) is not being passed through the the new shader. The other attribute it uses (position) is passed through, though.
Abdridged code follows:
Java code:
// Attributes
protected static int position = 0;
protected static int colour = 1;
protected static int texture = 2;
protected static int halo = 3;
protected static int normal = 4;
protected static int program1;
protected static int program2;
...
// Linking shader1
GLES20.glBindAttribLocation(program1, position, "position");
GLES20.glBindAttribLocation(program1, colour, "colour");
GLES20.glBindAttribLocation(program1, texture, "texCoord");
GLES20.glBindAttribLocation(program1, normal, "normal");
GLES20.glLinkProgram(program1);
...
// Linking shader2
GLES20.glBindAttribLocation(program2, position, "position");
GLES20.glBindAttribLocation(program2, halo, "halo");
GLES20.glLinkProgram(program2);
...
GLES20.glUseProgram(program1);
GLES20.glVertexAttribPointer(
position,
3,
GLES20.GL_FLOAT,
false,
0,
buffer);
...
//Render with program1
...
GLES20.glUseProgram(program2);
GLES20.glVertexAttribPointer(
halo,
1,
GLES20.GL_FLOAT,
false,
0,
doHaloBuffer);
GLES20.glEnable(GLES20.GL_BLEND);
GLES20.glDisable(GLES20.GL_DEPTH_TEST);
...
// Using lines for testing purposes
GLES20.glDrawElements(GLES20.GL_LINE_LOOP, haloIndexCount, GLES20.GL_UNSIGNED_SHORT, haloIndexBuffer);
...
Fragment shaders are just simple "Render the texture and colour you get" shaders
shader1.vsh:
attribute vec3 position;
attribute vec4 colour;
attribute vec2 texCoord;
attribute vec3 normal;
...
varying vec2 fragTexCoord;
varying vec4 fragColour;
...
// All attributes used at some point
shader2.vsh:
attribute vec3 position;
attribute float halo;
varying vec4 fragColour;
...
vec4 colour = vec4(1.0, 1.0, 0.0, 1.0);
if(halo > 0.5){
colour.g = 0.0;
...
}
fragColour = colour;
...
If i change halo > 0.5 to halo == 0.0 or swap the green values in the above statements, red is rendered otherwise yellow is rendered.
I tried altering the input buffer to be all 1.0 for testing but it made no difference. It seems that halo is not being passed through.
Previously, I had the two shaders merged and had a boolean uniform to decide which code to run and it worked fine. Nothing else has changed; the input buffers are the same, the counts are the same it's just that I'm using separate shaders now that is different.
Any thoughts?
check if the halo attribute is enabled just before rendering with glDrawElements