I am trying to implement anisotropic lighting.
Vertex shader:
#version 300 es
uniform mat4 u_mvMatrix;
uniform mat4 u_vMatrix;
in vec4 a_position;
in vec3 a_normal;
...
out lowp float v_DiffuseIntensity;
out lowp float v_SpecularIntensity;
const vec3 lightPosition = vec3(-1.0, 0.0, 5.0);
const lowp vec3 grainDirection = vec3(15.0, 2.8, -1.0);
const vec3 eye_positiion = vec3(0.0, 0.0, 0.0);
void main() {
// transform normal orientation into eye space
vec3 modelViewNormal = mat3(u_mvMatrix) * a_normal;
vec3 modelViewVertex = vec3(u_mvMatrix * a_position);
vec3 lightVector = normalize(lightPosition - modelViewVertex);
lightVector = mat3(u_vMatrix) * lightVector;
vec3 normalGrain = cross(modelViewNormal, grainDirection);
vec3 tangent = normalize(cross(normalGrain, modelViewNormal));
float LdotT = dot(tangent, normalize(lightVector));
float VdotT = dot(tangent, normalize(mat3(u_mvMatrix) * eye_position));
float NdotL = sqrt(1.0 - pow(LdotT, 2.0));
float VdotR = NdotL * sqrt(1.0 - pow(VdotT, 2.0)) - VdotT * LdotT;
v_DiffuseIntensity = max(NdotL * 0.4 + 0.6, 0.0);
v_SpecularIntensity = max(pow(VdotR, 2.0) * 0.9, 0.0);
...
}
Fragment shader:
...
in lowp float v_DiffuseIntensity;
in lowp float v_SpecularIntensity;
const lowp vec3 default_color = vec3(0.1, 0.7, 0.9);
void main() {
...
lowp vec3 resultColor = (default_color * v_DiffuseIntensity)
+ v_SpecularIntensity;
outColor = vec4(resultColor, 1.0);
}
Overall, the lighting works well on different devices. But an artifact appears on the SAMSUNG tablet, as shown in the figure:
It seems that the darkest place is becoming completely black. Can anyone please suggest why this is happening? Thanks for any answer/comment!
You've got a couple of expressions that risk undefined behaviour:
sqrt(1.0 - pow(LdotT, 2.0))
sqrt(1.0 - pow(VdotT, 2.0))
The pow function is undefined if x is negative. I suspect you're getting away with this because y is 2.0 so they're probably optimised to just be x * x.
The sqrt function is undefined if x is negative. Mathematically it never should be since the magnitude of the dot product of two normalized vectors should never be more than 1, but computations always have error. I think this is causing your rendering artifacts.
I'd change those two expressions to:
sqrt(max(0.0, 1.0 - pow(max(0.0, LdotT), 2.0)))
sqrt(max(0.0, 1.0 - pow(max(0.0, VdotT), 2.0)))
The code looks a lot uglier, but it's safer and max(0.0, x) is a pretty cheap operation.
Edit: Just noticed pow(VdotR, 2.0), I'd change that too.
Related
I have an OpenGL app with a simple shader that run well on an emulator device in Android Studio with API 30 but on my own hardware device (API 30) it doesn't.
The problem is in the fragment shader. This is the code:
#version 100
precision highp float;
struct DirLight {
int on;
vec3 direction;
vec3 ambientColor;
vec3 diffuseColor;
vec3 specularColor;
float specularExponent;
sampler2D shadowMap;
mat4 shadowVPMatrix;
int shadowEnabled;
};
struct PointLight {
int on;
vec3 position;
float constant;
float linear;
float quadratic;
vec3 ambientColor;
vec3 diffuseColor;
vec3 specularColor;
float specularExponent;
sampler2D shadowMap;
mat4 shadowVPMatrix;
int shadowEnabled;
};
#define MAX_NUM_POINT_LIGHTS 8
uniform DirLight uDirLight;
uniform PointLight uPointLights[MAX_NUM_POINT_LIGHTS];
uniform int uNumPointLights;
uniform vec3 uViewPos;
uniform sampler2D uTexture;
uniform int uIsTextured;
varying vec4 vColor;
varying vec4 vPosition;
varying vec3 vNormal;
varying vec2 vTexCoords;
const vec4 bitShifts = vec4(1.0 / (256.0*256.0*256.0), 1.0 / (256.0*256.0), 1.0 / 256.0, 1.0);
vec4 getColor(){
if (uIsTextured != 0){
return texture2D(uTexture,vTexCoords);
}
return vColor;
}
float unpack(vec4 color){
return dot(color, bitShifts);
}
// return 0.0 if in shadow.
// return 1.0 if not in shadow.
float calcShadow(sampler2D shadowMap, vec4 positionFromLight, int shadowEnabled){
if (shadowEnabled == 0){
return 1.0;
}
vec3 positionFromLight3 = positionFromLight.xyz / positionFromLight.w;
positionFromLight3 = (positionFromLight3 + 1.0) / 2.0;
float closestFragmentZ = unpack(texture2D(shadowMap, positionFromLight3.xy));
float currentFragmentZ = positionFromLight3.z;
return float(closestFragmentZ > currentFragmentZ);
}
float diffuseLighting(vec3 normal, vec3 lightDir){
return max(dot(normal, lightDir), 0.0);
}
float specularLighting(vec3 normal, vec3 lightDir, vec3 viewDir, float specularExponent){
vec3 reflectDir = reflect(-lightDir, normal);
return pow(max(dot(viewDir, reflectDir), 0.0), specularExponent);
}
vec4 calcDirLight(vec3 normal, vec3 viewDir){
vec3 lightDir = normalize(-uDirLight.direction);
float diff = diffuseLighting(normal, lightDir);
float spec = specularLighting(normal, lightDir, viewDir, uDirLight.specularExponent);
vec4 color = getColor();
vec4 ambient = vec4(uDirLight.ambientColor, 1.0) * color;
vec4 diffuse = vec4(uDirLight.diffuseColor * diff, 1.0) * color;
vec4 specular = vec4(uDirLight.specularColor * spec, 1.0) * vec4(0.5,0.5,0.5,1.0);
return ambient + (diffuse + specular) * calcShadow(uDirLight.shadowMap, uDirLight.shadowVPMatrix * vPosition, uDirLight.shadowEnabled);
}
float calcAttenuation(PointLight pointLight, float distance){
return 1.0 / (pointLight.constant + pointLight.linear * distance + pointLight.quadratic * (distance * distance));
}
vec4 calcPointLight(PointLight pointLight, vec3 normal, vec3 viewDir){
vec3 d = pointLight.position - vec3(vPosition);
vec3 lightDir = normalize(d);
float diff = diffuseLighting(normal, lightDir);
float spec = specularLighting(normal, lightDir, viewDir, pointLight.specularExponent);
float distance = length(d);
float attenuation = calcAttenuation(pointLight,distance);
vec4 color = getColor();
vec4 ambient = vec4(pointLight.ambientColor, 1.0) * color;
vec4 diffuse = vec4(pointLight.diffuseColor * diff, 1.0) * color;
vec4 specular = vec4(pointLight.specularColor * spec, 1.0) * vec4(0.5,0.5,0.5,1.0);
ambient *= attenuation;
diffuse *= attenuation;
specular *= attenuation;
return ambient + (diffuse + specular) * calcShadow(pointLight.shadowMap, pointLight.shadowVPMatrix * vPosition, pointLight.shadowEnabled);
}
void main() {
vec3 normal = normalize(vNormal);
vec3 viewDir = normalize(uViewPos - vec3(vPosition));
vec4 result = vec4(0.0);
if (uDirLight.on == 1){
result = calcDirLight(normal, viewDir);
}
for (int i = 0; i < uNumPointLights; i++){
if (uPointLights[i].on == 1){
result += calcPointLight(uPointLights[i], normal, viewDir);
}
}
gl_FragColor = result;
}
When I run the app on my device logcat shows the following lines
2021-06-24 17:49:14.032 2061-2096/com.outofbound.rhinoengine I/AdrenoGLES-0: Build Config : S P 10.0.7 AArch64
2021-06-24 17:49:14.032 2061-2096/com.outofbound.rhinoengine I/AdrenoGLES-0: Driver Path : /vendor/lib64/egl/libGLESv2_adreno.so
2021-06-24 17:49:14.036 2061-2096/com.outofbound.rhinoengine I/AdrenoGLES-0: PFP: 0x016ee190, ME: 0x00000000
2021-06-24 17:49:14.040 2061-2061/com.outofbound.rhinoengine D/SurfaceView: UPDATE null, mIsCastMode = false
2021-06-24 17:49:14.074 2061-2102/com.outofbound.rhinoengine I/AdrenoGLES-0: ERROR: 0:101: 'viewDir' : undeclared identifier
ERROR: 0:101: 'specularLighting' : no matching overloaded function found
ERROR: 2 compilation errors. No code generated.
2021-06-24 17:49:14.075 2061-2102/com.outofbound.rhinoengine I/AdrenoGLES-0: ERROR: 0:101: 'viewDir' : undeclared identifier
ERROR: 0:101: 'specularLighting' : no matching overloaded function found
ERROR: 2 compilation errors. No code generated.
2021-06-24 17:49:15.316 2061-2085/com.outofbound.rhinoengine W/System: A resource failed to call close.
BUT if I simply rename viewDir to v in main() function
void main() {
vec3 normal = normalize(vNormal);
vec3 v = normalize(uViewPos - vec3(vPosition));
vec4 result = vec4(0.0);
if (uDirLight.on == 1){
result = calcDirLight(normal, v);
}
for (int i = 0; i < uNumPointLights; i++){
if (uPointLights[i].on == 1){
result += calcPointLight(uPointLights[i], normal, v);
}
}
gl_FragColor = result;
}
the error above disappears but the app still doesn't work showing a black screen.
Any tips?
It looks to me that the viewDir issue is a driver bug where it's messed up trying to inline your code.
However, you should be aware that is not a simple shader by OpenGLES 2 standards. As Dpk implied, you cannot assume high precision is available in OpenGLES2.
Additionally, you cannot assume that there's anywhere near enough uniform space for your shader. Try using glGetIntegerv(GL_MAX_FRAGMENT_UNIFORM_VECTORS, &maxFragmentUniforms); to see how many uniforms are supported. Devices are allowed to go as low as 16 vec4s, but your shader uses 100s.
I'd suggest you consider switching to OpenGLES 3 or 3.1 if you don't want to worry about some of the tight limits of GLES2. If you persist with OpenGLES2 then maybe cut the shader right back to literally nothing (just return a colour) and gradually build up the functionality.
Also, make sure you are checking for errors on shader compilation and linking and all OpenGLES calls, it can save a lot of time.
try
//#version 100
//precision highp float;
precision mediump float;
and try this
opengles20 may not support INT in param see doc
float on;
//if (uDirLight.on == 1){
if (uDirLight.on == 1.0){
I think the error is related to the array of uniform uniform PointLight uPointLights[MAX_NUM_POINT_LIGHTS];. So I solved using one point light
uniform PointLight uPointLight;.
Anyway I'll try if defining multiple uniform PointLight uPointLightN; with 0 <= N < MAX_NUM_POINT_LIGHTS it still works.
there is a problem that i just can't seem to get a handle on..
i have a fragment shader:
precision mediump float;
uniform vec3 u_AmbientColor;
uniform vec3 u_LightPos;
uniform float u_Attenuation_Constant;
uniform float u_Attenuation_Linear;
uniform float u_Attenuation_Quadradic;
uniform vec3 u_LightColor;
varying vec3 v_Normal;
varying vec3 v_fragPos;
vec4 fix(vec3 v);
void main() {
vec3 color = vec3(1.0,1.0,1.0);
vec3 vectorToLight = u_LightPos - v_fragPos;
float distance = length(vectorToLight);
vec3 direction = vectorToLight / distance;
float attenuation = 1.0/(u_Attenuation_Constant +
u_Attenuation_Linear * distance + u_Attenuation_Quadradic * distance * distance);
vec3 diffuse = u_LightColor * attenuation * max(normalize(v_Normal) * direction,0.0);
vec3 d = u_AmbientColor + diffuse;
gl_FragColor = fix(color * d);
}
vec4 fix(vec3 v){
float r = min(1.0,max(0.0,v.r));
float g = min(1.0,max(0.0,v.g));
float b = min(1.0,max(0.0,v.b));
return vec4(r,g,b,1.0);
}
i've been following some tutorial i found on the web,
anyways, the ambientColor and lightColor uniforms are (0.2,0.2,0.2), and (1.0,1.0,1.0)
respectively. the v_Normal is calculated at the vertex shader using the
inverted transposed matrix of the model-view matrix.
the v_fragPos is the model result of multiplying the position with the normal model-view matrix.
now, i expect that when i move the light position closer to the cube i render, it will just appear brighter, but the resulting image is very different:
(the little square there is an indicator for the light position)
now, i just don't understand how this can happen?
i mean, i multiply the color components each by the SAME value..
so, how is it that it seems to vary so??
EDIT: i noticed that if i move the camera in front of the cube, the light is just shades of blue.. which is the same problem but maybe it's a clue i don't know..
The Lambertian reflectance is computed with the Dot product of the normal vector and the vector to the light source, instead of the component wise product.
See How does the calculation of the light model work in a shader program?
Use the dot function instead of the * (multiplication) operator:
vec3 diffuse = u_LightColor * attenuation * max(normalize(v_Normal) * direction,0.0);
vec3 diffuse = u_LightColor * attenuation * max(dot(normalize(v_Normal), direction), 0.0);
You can simplify the code in the fix function. min and max can be substituted with clamp. This functions work component wise, so they do not have to be called separately for each component:
vec4 fix(vec3 v)
{
return vec4(clamp(v, 0.0, 1.0), 1.0);
}
Problem: The direction of the directional light changes when the position of the object changes.
I watched posts with a similar problem:
Directional light in worldSpace is dependent on viewMatrix
OpenGL directional light shader
Diffuse lighting for a moving object
Based on these posts, I tried to apply this:
#version 300 es
uniform mat4 u_mvMatrix;
uniform mat4 u_vMatrix;
in vec4 a_position;
in vec3 a_normal;
const vec3 lightDirection = vec3(-1.9, 0.0, -5.0);
...
void main() {
vec3 modelViewNormal = vec3(u_mvMatrix * vec4(a_normal, 0.0));
vec3 lightVector = lightDirection * mat3(u_vMatrix);
float diffuseFactor = max(dot(modelViewNormal, -lightVector), 0.0);
...
}
But the result looks like this:
Tried also:
vec3 modelViewVertex = vec3(u_mvMatrix * a_position);
vec3 lightVector = normalize(lightDirection - modelViewVertex);
float diffuseFactor = max(dot(modelViewNormal, lightVector), 0.0);
And:
vec3 lightVector = normalize(lightDirection - modelViewVertex);
lightVector = lightVector * mat3(u_vMatrix);
But the result:
What changes need to be made to the code so that all objects are lit identically?
Thanks in advance!
Solution:
In practice, creating directional lighting was not such an easy task for me. On Rabbid76 advice, I changed the order of multiplication. On another Rabbid76 advice (post), I also created a custom point of view:
Matrix.setLookAtM(pointViewMatrix, rmOffset:0, eyeX:3.8f, eyeY:0.0f, eyeZ:2.8f,
centerX:0.0f, centerY:0f, centerZ:0f, upX:0f, upY:1.0f, upZ:0.0f)
Also calculated eye coordinates and light vector, although the camera is set in [0, 0, 0]:
#version 300 es
uniform mat4 u_mvMatrix;
uniform mat4 u_pointViewMatrix;
in vec4 a_position;
in vec3 a_normal;
const vec3 lightPosition = vec3(-5.0, 0.0, 1.0);
...
void main() {
// transform normal orientation into eye space
vec3 modelViewNormal = vec3(u_mvMatrix * vec4(a_normal, 0.0));
vec3 modelViewVertex = vec3(u_mvMatrix * a_position); // eye coordinates
vec3 lightVector = normalize(lightPosition - modelViewVertex);
lightVector = mat3(u_pointViewMatrix) * lightVector;
float diffuseFactor = max(dot(modelViewNormal, lightVector), 0.0);
...
}
Only after these steps did the picture become good:
Small differences are probably caused by a big perspective.
The vector has to be multiplied to the matrix from the right. See GLSL Programming/Vector and Matrix Operations.
vec3 lightVector = lightDirection * mat3(u_vMatrix);
vec3 lightVector = mat3(u_vMatrix) * lightDirection;
If you want to dot the light calculations in view space, then the normal vecotr has to be transformed form object (model) space to view space by the model view matrix and the light direction hss to be transformed form world space to view space, by the view matrix. For instance:
void main() {
vec3 modelViewNormal = mat3(u_mvMatrix) * a_normal;
vec3 lightVector = mat3(u_vMatrix) * lightDirection;
float diffuseFactor = max(dot(modelViewNormal, -lightVector), 0.0);
// [...]
}
The following is the GLSL Fragment Shader code I'm concerned about:
#extension GL_OES_EGL_image_external : require
precision lowp float;
varying highp vec2 v_TexCoordinate;
uniform samplerExternalOES u_Texture;
uniform float uParamValue1; // hue
uniform float uParamValue2; // hue of replacing color
const float delta = 0.1;
vec3 rgb2hsv(vec3 c)
{
vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0);
vec4 p = c.g < c.b ? vec4(c.bg, K.wz) : vec4(c.gb, K.xy);
vec4 q = c.r < p.x ? vec4(p.xyw, c.r) : vec4(c.r, p.yzx);
float d = q.x - min(q.w, q.y);
float e = 1.0e-10;
return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x);
}
vec3 hsv2rgb(vec3 c)
{
vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
}
void main()
{
vec3 texel = texture2D(u_Texture, v_TexCoordinate).rgb;
vec3 texelHsv = rgb2hsv(texel);
if(!(abs(texelHsv.x - uParamValue1) < delta))
{
texel = vec3(dot(vec3(0.299, 0.587, 0.114), texel));
//texel = vec3(texture2D(inputImageTexture2, vec2(texel.r, .16666)).r);
}
else
{
texelHsv.x = uParamValue2;
texel = hsv2rgb(texelHsv);
}
gl_FragColor = vec4(texel, 1.0);
}
The value of uParamValue1 and uParamValue2 is changed via two seekbars.
When I checked the uniform locations of uParamValue1 and uParamValue2, they returned valid uniforms on both my Galaxy S6 and Xiaomi Mi 3W, 1 and 2 respectively.
However, when I move the slider that corresponds to uParamValue1, the shader doesn't seem to respond to the changes in Xiaomi Mi 3W, whereas in the Galaxy S6, it works fine.
Why is this, and how can I prevent it from happening?
I am using OpenGL es 3.0 and my GLSL version #version 300 es.
I am trying to calculate luminous histogram in GPU.
i had identified that my device supports Vertex texture fetch and trying to read color information in vertex shader using texelFetch function.i am using texelFetch because i pass in every texture coordinate so as to read color information at every pixel.
Attaching the code
#version 300 es
uniform sampler2D imageTexture;
in vec2 texturePosition;
const float SCREEN_WIDTH = 1024.0;
in vec4 position;
vec2 texturePos;
out vec3 colorFactor;
const vec3 W = vec3(0.299, 0.587, 0.114);
void main() {
texturePos = texturePosition / SCREEN_WIDTH;
vec3 color = texelFetch(imageTexture, texturePos.xy,0).rgb;
float luminance = dot(color.xyz,W);
colorFactor = vec3(1.0, 1.0, 1.0);
gl_Position = vec4(-1.0 + (luminance * 0.00784313725), 0.0, 0.0, 1.0);
gl_PointSize = 1.0;
};
now getting the error 'texelFetch' : no matching overloaded function found.
Could somebody help with error and provide suggestion to calculate luminous histogram in GPU.
texturePos needs to be an ivec2, with integer texture coordinates (i.e. the pixel position in the texture) rather than normalized [0.0 1.0] floating point coordinates.
The code below should work:
#version 300 es
uniform sampler2D imageTexture;
in ivec2 texturePosition;
in vec4 position;
out vec3 colorFactor;
const vec3 W = vec3(0.299, 0.587, 0.114);
void main() {
vec3 color = texelFetch(imageTexture, texturePosition, 0).rgb;
float luminance = dot(color, W);
colorFactor = vec3(1.0, 1.0, 1.0);
gl_Position = vec4(-1.0 + (luminance * 0.00784313725), 0.0, 0.0, 1.0);
gl_PointSize = 1.0;
};
But you need to feed the 'texturePosition' input array with integers containing all the (x,y) pixel coordinates you want to include in your histogram as integers. (with VBOs).
As pointed out in the comments, you'll need to use glVertexAttribIPointer() to feed integer types.