Is Opengl Development GPU Dependant? - android

I am developing an android application in opengl ES2.0.In this Application I used to draw multiple lines and circles by touch event in GL surfaceView.
As opengl depends on GPU, Currently it works fine in Google Nexus 7(ULP GeForce).
In Samsung Galaxy Note 2(MALI 400MP) I'm trying to draw more than one line, but it clears the previous line and draw current line as new.
In Sony Xperia Neo V(Adreno 205) I'm trying to draw a new line, it crashes the surface as shown in below image.
Is it possible to make it work on all devices or do I need to write code for Individual GPU?
Source code
MainActivity.java
//in OnCreate method of my activity, i set the glsurfaceview and renderer
final ActivityManager activityManager =
( ActivityManager ) getSystemService( Context.ACTIVITY_SERVICE );
final ConfigurationInfo configurationInfo =
activityManager.getDeviceConfigurationInfo( );
final boolean supportsEs2 = ( configurationInfo.reqGlEsVersion >= 0x20000
|| Build.FINGERPRINT.startsWith( "generic" ) );
if( supportsEs2 ) {
Log.i( "JO", "configurationInfo.reqGlEsVersion:"
+ configurationInfo.reqGlEsVersion + "supportsEs2:"
+ supportsEs2 );
// Request an OpenGL ES 2.0 compatible context.
myGlsurfaceView.setEGLContextClientVersion( 2 );
final DisplayMetrics displayMetrics = new DisplayMetrics( );
getWindowManager( ).getDefaultDisplay( ).getMetrics( displayMetrics );
// Set the renderer to our demo renderer, defined below.
myRenderer = new MyRenderer( this, myGlsurfaceView );
myGlsurfaceView.setRenderer( myRenderer, displayMetrics.density );
myGlsurfaceView.setRenderMode( GLSurfaceView.RENDERMODE_CONTINUOUSLY );
MyGLSurfaceView.java
//in this im getting the coordinates of my touch on the glSurfaceView to draw the line and //passing those points to the renderer class
public MyGLsurfaceview( Context context ) {
super( context );
Log.i( "JO", "MyGLsurfaceview1" );
}
public MyGLsurfaceview(
Context context,
AttributeSet attrs )
{
super( context, attrs );
con = context;
mActivity = new MainActivity( );
mActivity.myGlsurfaceView = this;
Log.i( "JO", "MyGLsurfaceview2" );
}
public void setRenderer(
MyRenderer renderer,
float density )
{
Log.i( "JO", "setRenderer" );
myRenderer = renderer;
myDensity = density;
mGestureDetector = new GestureDetector( con, mGestureListener );
super.setRenderer( renderer );
setRenderMode( GLSurfaceView.RENDERMODE_CONTINUOUSLY );
}
#Override public boolean onTouchEvent( MotionEvent ev ) {
boolean retVal = mGestureDetector.onTouchEvent( ev );
if( myline ) {
switch ( ev.getAction( ) ) {
case MotionEvent.ACTION_DOWN:
isLUp = false;
if( count == 1 ) {
dx = ev.getX( );
dy = ev.getY( );
dx = ( dx / ( getWidth( ) / 2 ) ) - 1;
dy = 1 - ( dy / ( getHeight( ) / 2 ) );
firstX = dx;
firstY = dy;
} else if( count == 2 ) {
ux = ev.getX( );
uy = ev.getY( );
ux = ( ux / ( getWidth( ) / 2 ) ) - 1;
uy = 1 - ( uy / ( getHeight( ) / 2 ) );
secondX = ux;
secondY = uy;
myRenderer.dx = firstX;
myRenderer.dy = firstY;
myRenderer.ux = secondX;
myRenderer.uy = secondY;
midX = ( firstX + secondX ) / 2;
midY = ( firstY + secondY ) / 2;
Log.e( "JO",
"Line:firstX" + firstX +
"firstY" + firstY );
lp = new LinePoints( firstX, firstY,
secondX, secondY,
midX, midY );
lineArray.add( lp );
myRenderer.isNewClick = false;
myRenderer.isEnteredAngle = false;
myRenderer.myline = true;
myRenderer.mycircle = false;
myRenderer.mydashedline = false;
myRenderer.eraseCircle = false;
myRenderer.eraseLine = false;
myRenderer.eraseSelCir = false;
myRenderer.angle = angle;
myRenderer.length = length;
requestRender( );
count = 0;
}
count++;
break;
case MotionEvent.ACTION_MOVE:
isLUp = true;
break;
case MotionEvent.ACTION_UP:
if( isLUp ) {
ux = ev.getX( );
uy = ev.getY( );
ux = ( ux / ( getWidth( ) / 2 ) ) - 1;
uy = 1 - ( uy / ( getHeight( ) / 2 ) );
Log.i( "JO", "line2:" + ux + "," + uy );
secondX = ux;
secondY = uy;
myRenderer.dx = firstX;
myRenderer.dy = firstY;
myRenderer.ux = secondX;
myRenderer.uy = secondY;
midX = ( firstX + secondX ) / 2;
midY = ( firstY + secondY ) / 2;
Log.e( "JO",
"Line:firstX" + firstX +
"firstY" + firstY );
lp = new LinePoints( firstX, firstY,
secondX, secondY,
midX, midY );
lineArray.add( lp );
myRenderer.isNewClick = false;
myRenderer.isEnteredAngle = false;
myRenderer.myline = true;
myRenderer.mycircle = false;
myRenderer.mydashedline = false;
myRenderer.mysnaptoedge = false;
myRenderer.mysnaptoMiddle = false;
myRenderer.eraseCircle = false;
myRenderer.eraseLine = false;
myRenderer.eraseSelCir = false;
count = 1;
requestRender( );
}
break;
}
}
}
}
MyRenderer.java
//renderer class to render the line to the glsurfaceview
Lines line;
public MyRenderer(
MainActivity mainActivity,
MyGLsurfaceview myGlsurfaceView )
{
Log.i( "JO", "MyRenderer" );
this.main = mainActivity;
myGlsurface = myGlsurfaceView;
}
public void onDrawFrame(
GL10 gl )
{
line.draw( dx, dy, ux, uy );
}
#Override public void onSurfaceCreated(
GL10 gl,
EGLConfig config )
{
Log.i( "JO", "onSurfaceCreated" );
// Set the background frame color
GLES20.glClearColor( 0.0f, 0.0f, 0.0f, 1.0f );
// Create the GLText
glText = new GLText( main.getAssets( ) );
// Load the font from file (set size + padding), creates the texture
// NOTE: after a successful call to this the font is ready for
// rendering!
glText.load( "Roboto-Regular.ttf", 14, 2, 2 ); // Create Font (Height: 14
// Pixels / X+Y Padding
// 2 Pixels)
// enable texture + alpha blending
GLES20.glEnable( GLES20.GL_BLEND );
GLES20.glBlendFunc( GLES20.GL_ONE, GLES20.GL_ONE_MINUS_SRC_ALPHA );
}
#Override public void onSurfaceChanged(
GL10 gl,
int width,
int height )
{
// Adjust the viewport based on geometry changes,
// such as screen rotation
GLES20.glViewport( 0, 0, width, height );
ratio = ( float ) width / height;
width_surface = width;
height_surface = height;
/*
* // this projection matrix is applied to object coordinates // in the
* onDrawFrame() method Matrix.frustumM(mProjMatrix, 0, -ratio, ratio,
* -1, 1, 3, 7);
*/
// Take into account device orientation
if( width > height ) {
Matrix.frustumM( mProjMatrix, 0, -ratio, ratio, -1, 1, 1, 10 );
} else {
Matrix.frustumM( mProjMatrix, 0, -1, 1, -1 / ratio, 1 / ratio,
1, 10 );
}
// Save width and height
this.width = width; // Save Current Width
this.height = height; // Save Current Height
int useForOrtho = Math.min( width, height );
// TODO: Is this wrong?
Matrix.orthoM( mVMatrix, 0, -useForOrtho / 2, useForOrtho / 2,
-useForOrtho / 2, useForOrtho / 2, 0.1f, 100f );
}
Line.java
//Line class to draw line
public class Lines
{
final String vertexShaderCode = "attribute vec4 vPosition;"
+ "void main() {" + " gl_Position = vPosition;" + "}";
final String fragmentShaderCode = "precision mediump float;"
+ "uniform vec4 vColor;" + "void main() {"
+ " gl_FragColor = vColor;" + "}";
final FloatBuffer vertexBuffer;
final int mProgram;
int mPositionHandle;
int mColorHandle;
// number of coordinates per vertex in this array
final int COORDS_PER_VERTEX = 3;
float lineCoords[] = new float[6];
final int vertexCount = lineCoords.length / COORDS_PER_VERTEX;
final int vertexStride = COORDS_PER_VERTEX * 4; // bytes per vertex
// Set color with red, green, blue and alpha (opacity) values
float lcolor[] = { 1.0f, 1.0f, 1.0f, 1.0f };
public Lines(
)
{
// initialize vertex byte buffer for shape coordinates
ByteBuffer bb = ByteBuffer.allocateDirect(
// (number of coordinate values * 4 bytes per float)
lineCoords.
length * 4 );
// use the device hardware's native byte order
bb.order( ByteOrder.nativeOrder( ) );
// create a floating point buffer from the ByteBuffer
vertexBuffer = bb.asFloatBuffer( );
// prepare shaders and OpenGL program
int vertexShader =
MyRenderer.loadShader( GLES20.GL_VERTEX_SHADER,
vertexShaderCode );
int fragmentShader =
MyRenderer.loadShader( GLES20.GL_FRAGMENT_SHADER,
fragmentShaderCode );
mProgram = GLES20.glCreateProgram( ); // create empty OpenGL Program
GLES20.glAttachShader( mProgram, vertexShader ); // add the vertex shader
// to program
GLES20.glAttachShader( mProgram, fragmentShader ); // add the fragment
// shader to program
GLES20.glLinkProgram( mProgram ); // create OpenGL program executables
}
public void draw(
float dX,
float dY,
float uX,
float uY )
{
lineCoords[0] = dX;
lineCoords[1] = dY;
lineCoords[2] = 0.0f;
lineCoords[3] = uX;
lineCoords[4] = uY;
lineCoords[5] = 0.0f;
Log.i( "JO",
"lineCoords:" + lineCoords[0] + "," + lineCoords[1] +
"," + lineCoords[3] + "," + lineCoords[4] );
vertexBuffer.put( lineCoords );
vertexBuffer.position( 0 );
// Add program to OpenGL environment
GLES20.glUseProgram( mProgram );
// get handle to vertex shader's vPosition member
mPositionHandle =
GLES20.glGetAttribLocation( mProgram, "vPosition" );
// Enable a handle to the triangle vertices
GLES20.glEnableVertexAttribArray( mPositionHandle );
// Prepare the triangle coordinate data
GLES20.glVertexAttribPointer( mPositionHandle,
COORDS_PER_VERTEX,
GLES20.GL_FLOAT, false,
vertexStride, vertexBuffer );
// get handle to fragment shader's vColor member
mColorHandle =
GLES20.glGetUniformLocation( mProgram, "vColor" );
// Set color for drawing the triangle
GLES20.glUniform4fv( mColorHandle, 1, lcolor, 0 );
GLES20.glLineWidth( 3 );
// Draw the triangle
GLES20.glDrawArrays( GLES20.GL_LINES, 0, vertexCount );
// Disable vertex array
GLES20.glDisableVertexAttribArray( mPositionHandle );
}
}

Okay, here it goes again: ^1
OpenGL is not a scene graph. OpenGL does not maintain a scene, knows about objects or keeps tracks of geometry. OpenGL is a drawing API. You give it a canvas (in form of a Window or a PBuffer) and order it to draw points, lines or triangles and OpenGL does exactly that. Once a primitive (=point, line, triangle) has been drawn, OpenGL has no recollection about it whatsoever. If something changes, you have to redraw the whole thing.
The proper steps to redraw a scene are:
Disable the stencil test, so that the following step operates on the whole window.
Clear the framebuffer using glClear(bits), where bits is a bitmask specifying which parts of the canvas to clear. When rendering a new frame you want to clear everything so bits = GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
set the viewport, build an apropriate projection matrix
for each object in the scene load the right modelview matrix, set uniforms, select the vertex arrays and make the drawing call.
finish the rendering by flushing the pipeline. If using a single buffered window glFinish(), if using a double buffered window call SwapBuffers. In case of higher level frameworks this may be performed by the framework.
Important Once the drawing has been finished on a double buffered window, you must not continue to send drawing operations, as by performing the buffer swap the contents of the back buffer you're drawing to are undefined. Hence you must start the drawing anew, beginning with clearing the framebuffer (steps 1 and 2).
What your code misses are exactly those two steps. Also I have the impression that you're performing OpenGL drawing calls in direct reaction to input events, possibly in the input event handlers themself. Don't do this!. Instead use the input events to add to a list of primitives (lines in your case) to draw, then send a redraw event, which makes the framework call the drawing function. In the drawing function iterate over that list to draw the desired lines.
Redrawing the whole scene is canonical in OpenGL!
[1] (geesh, I'm getting tired of having to write this every 3rd question or so…)

Taking a punt here, but are you ever actually clearing the screen? The kinds of behaviour you are seeing suggest that you are not, and that in different scenaries you are seeing different errors - uninitialised memory, reusing an old buffer, implicitly clearing, etc.
GL requires you to be specific about what you want, so you need to explicitly clear.

OpenGL is just a standard. The actual implementation of the API is up to the graphics card manufacturer. So yes, OpenGL development can be GPU dependant sometimes. However, all implementations should provide the same result (what happens behind the scenes can be really different). If your code gives a different result with different GPUs, there is probably a version difference in the OpenGL implementation.
You can use these functions to get the supported OpenGL version:
glGetIntegerv​(GL_MAJOR_VERSION​, *); //version 3.0+
glGetIntegerv​(GL_MINOR_VERSION​, *); //version 3.0+
glGetString​(GL_VERSION​); //all versions

Why don´t you provide one working example, so people actually could help?
From your code:
I can´t see where do you create your line? Something like:
#Override public void onSurfaceCreated(GL10 gl, EGLConfig config){
...
mLine = new Lines();
...
}
As others already mentioned, in onDrawFrame always clear the buffer:
public void onDrawFrame(GL10 gl )
{
// Erase CL_COLOR_BUFFER
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
Set the camera:
// Set the camera position (View matrix)
Matrix.setLookAtM(mViewMatrix, 0, 0, 0, 3, 0f, 0f, 0f, 0f, 1.0f, 0.0f);
//
// Calculate the projection and view transformation
Matrix.multiplyMM(mMVPMatrix, 0, mProjMatrix, 0, mViewMatrix, 0);
Draw:
line.draw( dx, dy, ux, uy );

Crossposted from my answer to a similar question Why my opengl output differs for various devices?:
Should we take into account of GPU while Coding ? No way, The OpenGL API is a layer between your application and the hardware.
This is largely correct for desktop graphics as all GPUs are immediate renderers, however, this is NOT the case in mobile graphics.
The Mali GPUs use tile-based immediate-mode rendering. For this type of rendering, the framebuffer is divided into tiles of size 16 by 16 pixels. The Polygon List Builder (PLB) organizes input data from the application into polygon lists. There is a polygon list for each tile. When a primitive covers part of a tile, an entry, called a polygon list command, is added to the polygon list for the tile. The pixel processor takes the polygon list for one tile and computes values for all pixels in that tile before starting work on the next tile. Because this tile-based approach uses a fast, on-chip tile buffer, the GPU only writes the tile buffer contents to the framebuffer in main memory at the end of each tile. Non-tiled-based, immediate-mode renderers generally require many more framebuffer accesses. The tile-based method therefore consumes less memory bandwidth, and supports operations such as depth testing, blending and anti-aliasing efficiently.
Another difference is the treatment of rendered buffers. Immediate renderers will "save" the content of your buffer, effectively allowing you to only draw differences in the rendered scene on top of what previously existed. This IS available in Mali, however, is not enabled by default as it can cause undesired effects if used incorrectly.
There is a Mali GLES2 SDK example on how to use "EGL Preserve" Correctly available in the GLES2 SDK here
The reason the Geforce ULP based nexus 7 works as intended is that, as an immediate based renderer, it defaults to preserving the buffers, whereas Mali does not.
From the Khronos EGL specification:
EGL_SWAP_BEHAVIOR
Specifies the effect on the color buffer of posting a surface with eglSwapBuffers. A value of EGL_BUFFER_PRESERVED indicates that color buffer contents are unaffected, while EGL_BUFFER_DESTROYED indicates that color buffer contents may be destroyed or changed by the operation.
The initial value of EGL_SWAP_BEHAVIOR is chosen by the implementation.
The default value for EGL_SWAP_BEHAVIOUR on the Mali platform is EGL_BUFFER_DESTROYED. This is due to the performance hit associated with having to fetch the previous buffer from memory before rendering the new frame, and storing it at the end as well as the consumption of bandwidth (which is also incredibly bad for battery life on mobile devices). I am unable to comment with certainty as to the default behavior of the Tegra SoCs however, it is apparent to me that their default is EGL_BUFFER_PRESERVED.
To clarify Mali's position with regards to the Khronos GLES specifications - Mali is fully compliant.

Related

Vuforia 6.0.117 0x501 error when rendering texture

I'm trying to get Vuforia 6.0.117 working in my Android app. I'm using this specific version since its the last version supporting FrameMarkers. The detection of FrameMarkers is working fine, but when i'm trying to render a texture over the FrameMarker on my phone I get an error stating:
After operation FrameMarkers render frame got glError 0x501
My renderFrame method:
// Clear color and depth buffer
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT | GLES20.GL_DEPTH_BUFFER_BIT);
// Get the state from Vuforia and mark the beginning of a rendering
// section
State state = Renderer.getInstance().begin();
// Explicitly render the Video Background
Renderer.getInstance().drawVideoBackground();
GLES20.glEnable(GLES20.GL_DEPTH_TEST);
GLES20.glEnable(GLES20.GL_BLEND);
GLES20.glBlendEquation(GLES20.GL_FUNC_ADD);
// GLES20.glBlendFunc(GLES20.GL_SRC_ALPHA, GLES20.GL_ONE_MINUS_SRC_ALPHA);
GLES20.glBlendFunc(GLES20.GL_ONE, GLES20.GL_ONE_MINUS_SRC_ALPHA);
// We must detect if background reflection is active and adjust the
// culling direction.
// If the reflection is active, this means the post matrix has been
// reflected as well,
// therefore standard counter clockwise face culling will result in
// "inside out" models.
GLES20.glEnable(GLES20.GL_CULL_FACE);
GLES20.glCullFace(GLES20.GL_BACK);
if (Renderer.getInstance().getVideoBackgroundConfig().getReflection() == VIDEO_BACKGROUND_REFLECTION.VIDEO_BACKGROUND_REFLECTION_ON) {
GLES20.glFrontFace(GLES20.GL_CW); // Front camera
} else {
GLES20.glFrontFace(GLES20.GL_CCW); // Back camera
}
// Did we find any trackables this frame?
if (mActivity.isHelpVisible() || state.getNumTrackableResults() == 0) {
// no marker scanned
mActivity.hideInfoButton();
} else {
// Get the trackable:
TrackableResult trackableResult = state.getTrackableResult(0);
float[] modelViewMatrix = Tool.convertPose2GLMatrix(trackableResult.getPose()).getData();
// Check the type of the trackable:
MarkerResult markerResult = (MarkerResult) trackableResult;
Marker marker = (Marker) markerResult.getTrackable();
if (markerId != marker.getMarkerId()) {
markerId = marker.getMarkerId();
tag = DataManager.getInstance().getTagByMarkerId(markerId);
if (tag != null) {
texture = Texture.loadTexture(tag.getTexture());
setupTexture(texture);
tag.addToDB();
}
}
if (tag != null) {
String poiReference = tag.getPoiReference();
if (!poiReference.isEmpty()) {
mActivity.showInfoButton(poiReference);
}
// Select which model to draw:
Buffer vertices = planeObject.getVertices();
Buffer normals = planeObject.getNormals();
Buffer indices = planeObject.getIndices();
Buffer texCoords = planeObject.getTexCoords();
int numIndices = planeObject.getNumObjectIndex();
float[] modelViewProjection = new float[16];
float scale = (float) tag.getScale();
Matrix.scaleM(modelViewMatrix, 0, scale, scale, scale);
Matrix.multiplyMM(modelViewProjection, 0, vuforiaAppSession.getProjectionMatrix().getData(), 0, modelViewMatrix, 0);
GLES20.glUseProgram(shaderProgramID);
GLES20.glVertexAttribPointer(vertexHandle, 3, GLES20.GL_FLOAT, false, 0, vertices);
GLES20.glVertexAttribPointer(normalHandle, 3, GLES20.GL_FLOAT, false, 0, normals);
GLES20.glVertexAttribPointer(textureCoordHandle, 2, GLES20.GL_FLOAT, false, 0, texCoords);
GLES20.glEnableVertexAttribArray(vertexHandle);
GLES20.glEnableVertexAttribArray(normalHandle);
GLES20.glEnableVertexAttribArray(textureCoordHandle);
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, texture.mTextureID[0]);
GLES20.glUniformMatrix4fv(mvpMatrixHandle, 1, false, modelViewProjection, 0);
GLES20.glUniform1i(texSampler2DHandle, 0);
GLES20.glDrawElements(GLES20.GL_TRIANGLES, numIndices, GLES20.GL_UNSIGNED_SHORT, indices);
GLES20.glDisableVertexAttribArray(vertexHandle);
GLES20.glDisableVertexAttribArray(normalHandle);
GLES20.glDisableVertexAttribArray(textureCoordHandle);
SampleUtils.checkGLError("FrameMarkers render frame");
}
}
GLES20.glDisable(GLES20.GL_DEPTH_TEST);
Renderer.getInstance().end();
}
I'm loading a texture of the size 640x482 and is loading as follows:
public class Texture {
public int mWidth; // The width of the texture.
public int mHeight; // The height of the texture.
public int mChannels; // The number of channels.
public ByteBuffer mData; // The pixel data.
public int[] mTextureID = new int[1];
public boolean mSuccess = false;
public static Texture loadTexture(String fileName) {
try {
InputStream inputStream = new FileInputStream(fileName);
BufferedInputStream bufferedStream = new BufferedInputStream(inputStream);
Bitmap bitMap = BitmapFactory.decodeStream(bufferedStream);
bufferedStream.close();
inputStream.close();
int[] data = new int[bitMap.getWidth() * bitMap.getHeight()];
bitMap.getPixels(data, 0, bitMap.getWidth(), 0, 0, bitMap.getWidth(), bitMap.getHeight());
return loadTextureFromIntBuffer(data, bitMap.getWidth(), bitMap.getHeight());
} catch (IOException e) {
Log.e(Constants.DEBUG, "Failed to load texture '" + fileName + "' from APK");
Log.i(Constants.DEBUG, e.getMessage());
return null;
}
}
public static Texture loadTextureFromIntBuffer(int[] data, int width, int height) {
// Convert:
int numPixels = width * height;
byte[] dataBytes = new byte[numPixels * 4];
for (int p = 0; p < numPixels; ++p) {
int colour = data[p];
dataBytes[p * 4] = (byte) (colour >>> 16); // R
dataBytes[p * 4 + 1] = (byte) (colour >>> 8); // G
dataBytes[p * 4 + 2] = (byte) colour; // B
dataBytes[p * 4 + 3] = (byte) (colour >>> 24); // A
}
Texture texture = new Texture();
texture.mWidth = width;
texture.mHeight = height;
texture.mChannels = 4;
texture.mData = ByteBuffer.allocateDirect(dataBytes.length).order(ByteOrder.nativeOrder());
int rowSize = texture.mWidth * texture.mChannels;
for (int r = 0; r < texture.mHeight; r++) {
texture.mData.put(dataBytes, rowSize * (texture.mHeight - 1 - r), rowSize);
}
texture.mData.rewind();
texture.mSuccess = true;
return texture;
}
}
Anybody got an idea why i'm getting this error and how to fix it?
I cannot go over your entire code right now, and even if I could I'm not sure it would help. You first need to narrow down the problem, so I will first give you the method to do that, and I hope it will serve you in other cases as well.
You managed to find out that there was an error - but you are checking it only at the end of the rendering function. What you need to do is to place the checkGLError call in several places inside the rendering code (print a different text message), until you can pin-point the exact line after which the error first appears. Then, if you cannot understand the problem, comment here what is the problematic line and I will try to help.
UPDATE:
After looking at the shader code, following your report that normalHandle is -1, I got to the following conclusions:
The error, which indicates the variable vertexNormal cannot be found in the shader, may be due to the fact that this variable is probably optimized out during shader compilation, since it is not really required.
Explanation: in the vertex shader (CUBE_MESH_VERTEX_SHADER), vertexNormal is assigned to a varying called normal (variable that is passed to the fragment shader). In the fragment shader, this varying is declared but not used.
Therefore, you can actually delete the variables vertexNormal and normal from the shader, and you can delete all usages of 'normalHandle' in your code.
This should eliminate the error.

Cocos2d-X: CCDrawNode draw circle/custom shape

I am using CCDrawNode to create mask type effect (not exactly mask). Everything works well but there is one problem that CCDrawNode only draws square and i want to draw it with custom texture/sprite. Is there any solution to it.
Below is my code of using CCDrawNode
// on "init" you need to initialize your instance
bool HelloWorld::init()
{
//////////////////////////////
// 1. super init first
if ( !CCLayer::init() )
{
return false;
}
CCSize visibleSize = CCDirector::sharedDirector()->getVisibleSize();
CCPoint origin = CCDirector::sharedDirector()->getVisibleOrigin();
CCLayer *layer = CCLayer::create();
CCSprite* pSprite = CCSprite::create("HelloWorld.png");
pSprite->setPosition(ccp(visibleSize.width/2 + origin.x, visibleSize.height/2 + origin.y));
layer->addChild(pSprite, 0);
addChild(layer);
//this is the layer that we want to "cut"
CCLayer* layer1 = CCLayerColor::create(ccc4(122, 144, 0, 255), visibleSize.width, visibleSize.height);
this->setTouchEnabled(true);
//we need to create a ccnode, which will be a stencil for ccclipingnode, draw node is a good choice for that
stencil = CCDrawNode::create();
//CCClipingNode show the intersection of stencil and theirs children
CCClippingNode *cliper = CCClippingNode::create(stencil);
cliper->setInverted(true);
cliper->addChild(layer1);
addChild(cliper);
return true;
}
void HelloWorld::ccTouchesMoved(CCSet* touches, CCEvent* event)
{
CCTouch* touch = (CCTouch*)touches->anyObject();
// get start & end location
CCPoint start = touch->getLocationInView();
CCPoint end = touch->getPreviousLocationInView();
// get corrected location
start = CCDirector::sharedDirector()->convertToGL(start);
end = CCDirector::sharedDirector()->convertToGL(end);
//stencil->drawDot(start, 25, ccc4f(0, 0, 0, 255));
stencil->drawSegment(start, end, 25, ccc4f(0, 0, 0, 255));
}
If you want to draw custom texture you should use CCRenderTexture. In order to draw something you should go smthin like this
myRenderTexture->begin();
mySpriteLoadedWithTexture->visit();
myRenderTexture->end();
Also if you want the drawn lines to be smooth you should draw it in loop so that they are placed in equal distance
float distance = ccpDistance(start, end);
for (int i = 0; i < distance; i++)
{
float difx = end.x - start.x;
float dify = end.y - start.y;
float delta = (float)i / distance;
m_brush->setPosition(CCPoint(start.x + (difx * delta), start.y + (dify * delta)));
m_brush->visit();
}
Hope it helps

Using Point lights in Unity3D Android

I am trying to use a point light animation for my game. It runs fine in Editor with Diffuse, Bumped Specular and VertexLit shaders. However it doesn't work on any Mobile shaders provided by default.
Is there a way to use Point lights in Android? Or is there any shader which can work on mobiles and supports point lights too?
Finally found the answer - this post on UnityAnswer helped me. I am reposting the Custom Shader here -
// Specular, Normal Maps with Main Texture
// Fragment based
Shader "SpecTest/SpecTest5"
{
Properties
{
_Shininess ("Shininess", Range (0, 1.5)) = 0.078125
_Color ("Main Color", Color) = (1,1,1,1)
_SpecColor ("Specular Color", Color) = (0, 0, 0, 0)
_MainTex ("Texture", 2D) = "white" {}
_BumpMap ("Bump Map", 2D) = "bump" {}
_NormalStrength ("Normal Strength", Range (0, 1.5)) = 1
} // eo Properties
SubShader
{
// pass for 4 vertex lights, ambient light & first pixel light
Tags { "RenderType"="Opaque" }
LOD 200
CGPROGRAM
#pragma surface surf MobileBlinnPhong
fixed4 LightingMobileBlinnPhong (SurfaceOutput s, fixed3 lightDir, fixed3 halfDir, fixed atten)
{
fixed diff = saturate(dot (s.Normal, lightDir));
fixed nh = saturate(dot (s.Normal, halfDir)); //Instead of injecting the normalized light+view, we just inject view, which is provided as halfasview in the initial surface shader CG parameters
fixed spec = pow (nh, s.Specular*128) * s.Gloss;
fixed4 c;
c.rgb = (s.Albedo * _LightColor0.rgb * diff + _SpecColor.rgb * spec) * (atten*2);
c.a = 0.0;
return c;
}
struct Input {
float2 uv_MainTex;
float2 uv_BumpMap;
};
// User-specified properties
uniform sampler2D _MainTex;
uniform sampler2D _BumpMap;
uniform float _Shininess;
uniform float _NormalStrength;
uniform fixed4 _Color;
float3 expand(float3 v) { return (v - 0.5) * 2; } // eo expand
void surf (Input IN, inout SurfaceOutput o) {
half4 tex = tex2D (_MainTex, IN.uv_MainTex) * _Color;
o.Albedo = tex.rgb;
o.Gloss = tex.a;
o.Alpha = tex.a;
o.Specular = _Shininess;
// fetch and expand range-compressed normal
float3 normalTex = UnpackNormal (tex2D (_BumpMap, IN.uv_BumpMap));
float3 normal = normalTex * _NormalStrength;
o.Normal = normal;
} // eo surf
ENDCG
}
//Fallback "Specular"
} // eo Shader
Remember to increase strength though. And obviously it's too costly on frame rate. I need it for just an animation, so I used it.

How can I to render a 2D graph/figure using OpenGL on Android?

I'm making a simple fractal viewing app for Android, just for fun. I'm also using it as an oppotunity to learn OpenGL since I've never worked with it before. Using the Android port of the NeHe tutorials as a starting point, my approach is to have one class (FractalModel) which does all the math to create the fractal, and FractalView which does all the rendering.
The difficulty I'm having is in getting the rendering to work. Since I'm essentially plotting a graph of points of different colors where each point should correspond to 1 pixel, I thought I'd handle this by rendering 1x1 rectangles over the entire screen, using the dimensions to calculate the offsets so that there's a 1:1 correspondence between the rectangles and the physical pixels. Since the color of each pixel will be calculated independently, I can re-use the same rendering code to render different parts of the fractal (I want to add panning and zooming later on).
Here is the view class I wrote:
public class FractalView extends GLSurfaceView implements Renderer {
private float[] mVertices;
private FloatBuffer[][] mVBuffer;
private ByteBuffer[][] mBuffer;
private int mScreenWidth;
private int mScreenHeight;
private float mXOffset;
private float mYOffset;
private int mNumPixels;
//references to current vertex coordinates
private float xTL;
private float yTL;
private float xBL;
private float yBL;
private float xBR;
private float yBR;
private float xTR;
private float yTR;
public FractalView(Context context, int w, int h){
super(context);
setEGLContextClientVersion(1);
mScreenWidth = w;
mScreenHeight = h;
mNumPixels = mScreenWidth * mScreenHeight;
mXOffset = (float)1.0/mScreenWidth;
mYOffset = (float)1.0/mScreenHeight;
mVertices = new float[12];
mVBuffer = new FloatBuffer[mScreenHeight][mScreenWidth];
mBuffer = new ByteBuffer[mScreenHeight][mScreenWidth];
}
public void onDrawFrame(GL10 gl){
int i,j;
gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
gl.glLoadIdentity();
mapVertices();
gl.glColor4f(0.0f,1.0f, 0.0f,.5f);
for(i = 0; i < mScreenHeight; i++){
for(j = 0; j < mScreenWidth; j++){
gl.glFrontFace(GL10.GL_CW);
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, mVBuffer[i][j]);
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, 0, mVertices.length / 3);
gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);
}
}
}
public void onSurfaceChanged(GL10 gl, int w, int h){
if(h == 0) { //Prevent A Divide By Zero By
h = 1; //Making Height Equal One
}
gl.glViewport(0, 0, w, h); //Reset The Current Viewport
gl.glMatrixMode(GL10.GL_PROJECTION); //Select The Projection Matrix
gl.glLoadIdentity(); //Reset The Projection Matrix
//Calculate The Aspect Ratio Of The Window
GLU.gluPerspective(gl, 45.0f, (float)w / (float)h, 0.1f, 100.0f);
gl.glMatrixMode(GL10.GL_MODELVIEW); //Select The Modelview Matrix
gl.glLoadIdentity();
}
public void onSurfaceCreated(GL10 gl, EGLConfig config){
gl.glShadeModel(GL10.GL_SMOOTH); //Enable Smooth Shading
gl.glClearColor(0.0f, 0.0f, 0.0f, 0.5f); //Black Background
gl.glClearDepthf(1.0f); //Depth Buffer Setup
gl.glEnable(GL10.GL_DEPTH_TEST); //Enables Depth Testing
gl.glDepthFunc(GL10.GL_LEQUAL); //The Type Of Depth Testing To Do
//Really Nice Perspective Calculations
gl.glHint(GL10.GL_PERSPECTIVE_CORRECTION_HINT, GL10.GL_NICEST);
}
private void mapVertices(){
int i,j;
xTL = -1;
yTL = 1;
xTR = -1 + mXOffset;
yTR = 1;
xBL = -1;
yBL = 1 - mYOffset;
xBR = -1 + mXOffset;
yBR = 1 - mYOffset;
for(i = 0; i < mScreenHeight; i++){
for (j = 0; j < mScreenWidth; j++){
//assign coords to vertex array
mVertices[0] = xBL;
mVertices[1] = yBL;
mVertices[2] = 0f;
mVertices[3] = xBR;
mVertices[4] = xBR;
mVertices[5] = 0f;
mVertices[6] = xTL;
mVertices[7] = yTL;
mVertices[8] = 0f;
mVertices[9] = xTR;
mVertices[10] = yTR;
mVertices[11] = 0f;
//add doubleBuffer
mBuffer[i][j] = ByteBuffer.allocateDirect(mVertices.length * 4);
mBuffer[i][j].order(ByteOrder.nativeOrder());
mVBuffer[i][j] = mBuffer[i][j].asFloatBuffer();
mVBuffer[i][j].put(mVertices);
mVBuffer[i][j].position(0);
//transform right
transformRight();
}
//transform down
transformDown();
//reset x
xTL = -1;
xTR = -1 + mXOffset;
xBL = -1;
xBR = -1 + mXOffset;
}
}
//transform all the coordinates 1 "pixel" to the right
private void transformRight(){
xTL = xTL + mXOffset; //TL
xBL = xBL + mXOffset; //BL
xBR = xBR + mXOffset; //BR
xTR = xTR + mXOffset; //TR;
}
//transform all of the coordinates 1 pixel down;
private void transformDown(){
yTL = yTL - mYOffset;
yBL = yBL - mYOffset;
yBR = yBR - mYOffset;
yTR = yTR - mYOffset;
}
}
Basically I'm trying to do it the same way as this (the square in lesson 2) but with far more objects. I'm assuming 1 and -1 roughly correspond to screen edges, (I know this isn't totally true, but I don't really understand how to use projection matrices and want to keep this as simple as possible unless there's a good resource out there I can learn from) but I understand that OpenGL's coordinates are separate from real screen coordinates. When I run my code I just get a black screen (it should be green) but LogCat shows the garbage collector working away so I know something is happening. I'm not sure if it's just a bug caused by my just not doing something right, or if it's just REALLY slow. In either case, what should I do differently? I feel like I may be going about this all wrong. I've looked around and most of the tutorials and examples are based on the link above.
Edit: I know I could go about this by generating a texture that fills up the entire screen and just drawing that, though the link I read which mentioned it said it would be slower since you're not supposed to redraw a texture every frame. That said, I only really need to redraw the texture when the perspective changes, so I could write my code to take this into account. The main difficulty I'm having currently is drawing the bitmap, and getting it to display correctly.
I would imagine that the blank screen is due to the fact that you are swapping buffers so many times, and also the fact that you are generating all your vertex buffers every frame. Thousands of buffer swaps AND thousands of buffer creations in a single frame would be INCREDIBLY slow.
One thing to mention is that Android devices have limited memory, so the garbage collector working away is probably an indication that your buffer creation code is eating up a lot of the available memory and the device is trying to free up some for the creation of new buffers.
I would suggest creating a texture that you fill with your pixel data each frame and then render to a single square that fills the screen. This will increase your speed by a huge amount, and also make your program more flexible.
Edit:
Look at the tutorial here : http://www.nullterminator.net/gltexture.html to get an idea on how to create textures and load them. You will basically need to fill BYTE* data with your own data.
If you are changing the data dynamically, you will need to update the texture data. Use the information here : http://www.opengl.org/wiki/Texture : in the section about Texture image modification.

Android Opengl ES tiling engine, smooth scrolling

Following this : Best approach for oldschool 2D zelda-like game
I got a simple 2D tiles generator working, im reading an int map[100][100] filled with either 1's or 0's and draw tiles according to their tile id, 0 is water, 1 grass.
Im using some basic Numpad control handler, using a camIncr (32.0f), i set the camera position according to the movement :
case KeyEvent.KEYCODE_DPAD_RIGHT:
cameraPosX = (float)(cameraPosX + camIncr);
break;
In my draw loop, im just drawing enough tiles to fit on my screen, and track the top left tile using cameraOffsetX and cameraOffsetY (its the camera position / tile size )
Im using a GLU.gluOrtho2D for my projection.
Here is the draw loop inside my custom renderer :
gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
gl.glMatrixMode( GL10.GL_PROJECTION );
gl.glLoadIdentity( );
GLU.gluOrtho2D(gl, 0, scrWidth, scrHeight, 0);
repere.draw(gl, 100.0f); // this is just a helper, draw 2 lines at the origin
//Call the drawing methods
gl.glMatrixMode(GL10.GL_MODELVIEW);
gl.glLoadIdentity();
tiledBackground.draw(gl, filtering);
my tiledBackground draw function :
int cols = (569 / 32) + 2; // how many columns can fit on the screen
int rows = (320 / 32) + 1; // haw many rows can fit on the screen
int cameraPosX = (int) Open2DRenderer.getCameraPosX();
int cameraPosY = (int) Open2DRenderer.getCameraPosY();
tileOffsetX = (int) (cameraPosX / 32);
tileOffsetY = (int) (cameraPosY / -32);
gl.glPushMatrix();
for (int y = 0; y < rows; y++) {
for (int x = 0; x < cols; x++) {
try {
tile = map[y + tileOffsetY][x + tileOffsetX];
} catch (Exception e) {
e.printStackTrace(); //when out of array
tile = 0;
}
gl.glPushMatrix();
if (tile==0){
waterTile.draw(gl, filter);
}
if (tile==4) {
grassTile.draw(gl, filter);
}
gl.glTranslatef(32.0f, 0.0f, 0.0f);
}//
gl.glPopMatrix();
gl.glTranslatef(0.0f, 32.0f, 0.0f);
}
gl.glPopMatrix();
}
the waterTile and grassTile .draw function draw a 32x32 textured tile, might post the code if relevant.
Everything is fine, i can move using numpad arrows, and my map 'moves' with me, since im only drawing what i can see, its fast (see android OpenGL ES simple Tile generator performance problem where Aleks pointed me to a simple 'culling' idea)
I would like my engine to 'smooth scroll' now. I've tried tweaking the camIncr variable, the GLU.gluOrtho2D etc, nothing worked.
Any ideas ? :)
I finally found out.
i added a glTranslatef method right before entering the loop :
gl.glPushMatrix();
gl.glTranslatef(-cameraPosX%32, -cameraPosY%32, 0);
for (int y = 0; y < rows; y++) {
...
First, i was unsuccessfully trying to translate the scene using a brute cameraPosX / TILE_HEIGHT division, didn't work.
We have to translate the offset by which the tile extends beyond the screen, not the total cameraPosX offset, so we're using the Mod (%) operator instead of division.
Sorry for my bad english ^^

Categories

Resources