Null Pointer while generating sphere - android

Im trying to create a sphere in opengl es 2.0 but I getting a null pointer exception. I've been stuck on this for hours. After checking the logcat it seems to find a couple of vertices but then the index buffer always hits null pointer straight after.
private void generateSphereCoords(float radius, int stacks, int slices) {
for (int stackNumber = 0; stackNumber <= stacks; stackNumber++) {
for (int sliceNumber = 0; sliceNumber <= slices; sliceNumber++) {
float theta = (float) (stackNumber * Math.PI / stacks);
float phi = (float) (sliceNumber * 2 * Math.PI / slices);
Log.i("theta", String.valueOf(theta));
Log.i("phi", String.valueOf(phi));
float sinTheta = FloatMath.sin(theta);
float sinPhi = FloatMath.sin(phi);
float cosTheta = FloatMath.cos(theta);
float cosPhi = FloatMath.cos(phi);
vertices = new float[]{radius * cosPhi * cosTheta, radius * sinPhi * cosTheta, radius * sinTheta};
Log.i("vertexX", String.valueOf(vertices[0]));
Log.i("vertexY", String.valueOf(vertices[1]));
Log.i("vertexZ", String.valueOf(vertices[2]));
// a float is 4 bytes, therefore I multiply the number of vertices by 4.
ByteBuffer vbb = ByteBuffer.allocateDirect(vertices.length * 4);// (number of coordinate values * 4 bytes per float)
vbb.order(ByteOrder.nativeOrder());// use the device hardware's native byte order
vertexBuffer = vbb.asFloatBuffer();// create a floating point buffer from the ByteBuffer
vertexBuffer.put(vertices);
vertexBuffer.position(0);// set the buffer to read the first coordinate
}
}
for (int stackNumber = 0; stackNumber <= stacks; stackNumber++) {
for (int sliceNumber = 0; sliceNumber <= slices; sliceNumber++) {
ByteBuffer ibb = ByteBuffer.allocateDirect(vertexCount * 2);// (number of coordinate values * 4 bytes per float)
ibb.order(ByteOrder.nativeOrder());// use the device hardware's native byte order
indexBuffer = ibb.asShortBuffer();// create a floating point buffer from the ByteBuffer
indexBuffer.put((short) ((stackNumber * slices) + (sliceNumber % slices)));
indexBuffer.put((short) (((stackNumber + 1) * slices) + (sliceNumber % slices)));
indexBuffer.position(0);// set the buffer to read the first coordinate
}
}
}

Create your vertex buffer and index buffer outside of your loops. As it is you are recreating the vertex buffer for each vertex and the index buffer for each pair of indices.
private static final int VERTICES_PER_COORD = 3;
private void generateSphereCoords(float radius, int stacks, int slices) {
// Create vertex buffer here:
ByteBuffer vbb = ByteBuffer.allocateDirect((stacks + 1) * (slices + 1) * VERTICES_PER_COORD * 4);// (number of coordinate values * 4 bytes per float)
vbb.order(ByteOrder.nativeOrder());// use the device hardware's native byte order
vertexBuffer = vbb.asFloatBuffer();// create a floating point buffer from the ByteBuffer
for (int stackNumber = 0; stackNumber <= stacks; stackNumber++) {
for (int sliceNumber = 0; sliceNumber <= slices; sliceNumber++) {
float theta = (float) (stackNumber * Math.PI / stacks);
float phi = (float) (sliceNumber * 2 * Math.PI / slices);
Log.i("theta", String.valueOf(theta));
Log.i("phi", String.valueOf(phi));
float sinTheta = FloatMath.sin(theta);
float sinPhi = FloatMath.sin(phi);
float cosTheta = FloatMath.cos(theta);
float cosPhi = FloatMath.cos(phi);
vertices = new float[]{radius * cosPhi * cosTheta, radius * sinPhi * cosTheta, radius * sinTheta};
Log.i("vertexX", String.valueOf(vertices[0]));
Log.i("vertexY", String.valueOf(vertices[1]));
Log.i("vertexZ", String.valueOf(vertices[2]));
// a float is 4 bytes, therefore I multiply the number of vertices by 4.
// add this vertex tp the buffer:
vertexBuffer.put(vertices);
}
}
vertexBuffer.position(0);// set the buffer to read the first coordinate
// Create index buffer here:
ByteBuffer ibb = ByteBuffer.allocateDirect((stacks + 1) * (slices + 1) * 2 * 2);// (number of index values * 2 bytes per short)
ibb.order(ByteOrder.nativeOrder());// use the device hardware's native byte order
indexBuffer = ibb.asShortBuffer();// create a floating point buffer from the ByteBuffer
for (int stackNumber = 0; stackNumber <= stacks; stackNumber++) {
for (int sliceNumber = 0; sliceNumber <= slices; sliceNumber++) {
indexBuffer.put((short) ((stackNumber * slices) + (sliceNumber % slices)));
indexBuffer.put((short) (((stackNumber + 1) * slices) + (sliceNumber % slices)));
}
}
indexBuffer.position(0);// set the buffer to read the first index
}

Related

Android OpenGL ES textured half sphere

I have to develop an equirectangular image viewer, like the one of the Ricoh Theta app.
I'm doing it on Android, with Open GL ES (1.0, but I can change to 2.0 if needed).
For now, I have managed to create the half sphere (based on this answer), with this code:
public class HalfSphere {
// ---------------------------------------------------------------------------------------------
// region Attributes
private final int[] mTextures = new int[1];
float[][] mVertices;
int mNbStrips;
int mNbVerticesPerStrips;
private final List<FloatBuffer> mVerticesBuffer = new ArrayList<>();
private final List<ByteBuffer> mIndicesBuffer = new ArrayList<>();
private final List<FloatBuffer> mTextureBuffer = new ArrayList<>();
// endregion
// ---------------------------------------------------------------------------------------------
// ---------------------------------------------------------------------------------------------
// region Constructor
public HalfSphere(int nbStrips, int nbVerticesPerStrips, float radius) {
// Generate the vertices:
mNbStrips = nbStrips;
mNbVerticesPerStrips = nbVerticesPerStrips;
mVertices = new float[mNbStrips * mNbVerticesPerStrips][3];
for (int i = 0; i < mNbStrips; i++) {
for (int j = 0; j < mNbVerticesPerStrips; j++) {
mVertices[i * mNbVerticesPerStrips + j][0] = (float) (radius * Math.cos(j * 2 * Math.PI / mNbVerticesPerStrips) * Math.cos(i * Math.PI / mNbStrips));
mVertices[i * mNbVerticesPerStrips + j][1] = (float) (radius * Math.sin(i * Math.PI / mNbStrips));
mVertices[i * mNbVerticesPerStrips + j][2] = (float) (radius * Math.sin(j * 2 * Math.PI / mNbVerticesPerStrips) * Math.cos(i * Math.PI / mNbStrips));
}
}
// Populate the buffers:
for(int i = 0; i < mNbStrips - 1; i++) {
for(int j = 0; j < mNbVerticesPerStrips; j++) {
byte[] indices = {
0, 1, 2, // first triangle (bottom left - top left - top right)
0, 2, 3 // second triangle (bottom left - top right - bottom right)
};
float[] p1 = mVertices[i * mNbVerticesPerStrips + j];
float[] p2 = mVertices[i * mNbVerticesPerStrips + (j + 1) % mNbVerticesPerStrips];
float[] p3 = mVertices[(i + 1) * mNbVerticesPerStrips + (j + 1) % mNbVerticesPerStrips];
float[] p4 = mVertices[(i + 1) * mNbVerticesPerStrips + j];
float[] quad = {
p1[0], p1[1], p1[2],
p2[0], p2[1], p2[2],
p3[0], p3[1], p3[2],
p4[0], p4[1], p4[2]
};
mVerticesBuffer.add(floatArrayToFloatBuffer(quad));
mTextureBuffer.add(floatArrayToFloatBuffer(quad));
mIndicesBuffer.add(byteArrayToByteBuffer(indices));
}
}
}
// endregion
// ---------------------------------------------------------------------------------------------
// ---------------------------------------------------------------------------------------------
// region Draw
public void draw(final GL10 gl) {
// bind the previously generated texture.
gl.glBindTexture(GL10.GL_TEXTURE_2D, this.mTextures[0]);
// Point to our buffers.
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
gl.glEnableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
// Set the face rotation, clockwise in this case.
gl.glFrontFace(GL10.GL_CW);
for(int i = 0; i < mVerticesBuffer.size(); i++) {
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, mVerticesBuffer.get(i));
gl.glTexCoordPointer(3, GL10.GL_FLOAT, 0, mTextureBuffer.get(i));
gl.glDrawElements(GL10.GL_TRIANGLE_STRIP, 6, GL10.GL_UNSIGNED_BYTE, mIndicesBuffer.get(i)); // GL_TRIANGLE_STRIP / GL_LINE_LOOP
}
// Disable the client state before leaving.
gl.glDisableClientState(GL10.GL_VERTEX_ARRAY);
gl.glDisableClientState(GL10.GL_TEXTURE_COORD_ARRAY);
}
// endregion
// ---------------------------------------------------------------------------------------------
// ---------------------------------------------------------------------------------------------
// region Utils
public void loadGLTexture(GL10 gl, Bitmap texture) {
// Generate one texture pointer, and bind it to the texture array.
gl.glGenTextures(1, this.mTextures, 0);
gl.glBindTexture(GL10.GL_TEXTURE_2D, this.mTextures[0]);
// Create nearest filtered texture.
gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MIN_FILTER, GL10.GL_NEAREST);
gl.glTexParameterf(GL10.GL_TEXTURE_2D, GL10.GL_TEXTURE_MAG_FILTER, GL10.GL_LINEAR);
// Use Android GLUtils to specify a two-dimensional texture image from our bitmap.
GLUtils.texImage2D(GL10.GL_TEXTURE_2D, 0, texture, 0);
texture.recycle();
}
public FloatBuffer floatArrayToFloatBuffer(float[] array) {
ByteBuffer vbb = ByteBuffer.allocateDirect(array.length * 4);
vbb.order(ByteOrder.nativeOrder()); // use the device hardware's native byte order
FloatBuffer fb = vbb.asFloatBuffer(); // create a floating point buffer from the ByteBuffer
fb.put(array); // add the coordinates to the FloatBuffer
fb.position(0); // set the buffer to read the first coordinate
return fb;
}
public ByteBuffer byteArrayToByteBuffer(byte[] array) {
ByteBuffer vbb = ByteBuffer.allocateDirect(array.length * 4);
vbb.order(ByteOrder.nativeOrder()); // use the device hardware's native byte order
vbb.put(array); // add the coordinates to the FloatBuffer
vbb.position(0); // set the buffer to read the first coordinate
return vbb;
}
// endregion
// ---------------------------------------------------------------------------------------------
}
Of course, the texture is not applied correctly, as I'm using the coordinates of my vertices. Does someone see how to do it correctly? I'll also need to be able to "move" the texture when the user pan.
EDIT: as suggested by codetiger, doing lat/180 and lon/360, and then normalizing to [0..1] worked. Now, I'm trying to add the panning. It works when panning on longitude (horizontally):
But not when panning on latitude (vertically):
I'm simply adding values between 0..1 when the user pans. I tried to use the formula given here with no success. Any idea?
If it helps, that's what I want (obtained with the Ricoh Theta app):
In order to make the sphere a full 360 degree sphere, you can replace the lines below.
mVertices[i * mNbVerticesPerStrips + j][0] = (float) (radius * Math.cos(j * 2 * Math.PI / mNbVerticesPerStrips) * Math.cos(2 * i * Math.PI / mNbStrips));
mVertices[i * mNbVerticesPerStrips + j][1] = (float) (radius * Math.sin(2 * i * Math.PI / mNbStrips));
mVertices[i * mNbVerticesPerStrips + j][2] = (float) (radius * Math.sin(j * 2 * Math.PI / mNbVerticesPerStrips) * Math.cos(2 * i * Math.PI / mNbStrips));
The only change is using 2 * Math.PI / mNbStrips for second angle instead of Math.PI / mNbStrips
And to rotate the image, you can rotate the sphere by using
gl.glRotatef(angle, 1.0f, 0.0f, 0.0f);
Update:
To get correct Texture Coordinates for the sphere, for standard distortion sphere texture you can use (lat/180, lon/360) and normalise it to get [0..1]. As mentioned here https://stackoverflow.com/a/10395141/409315

Crop YUV420/NV21 byte array directly in android

I need to crop every frame from the camera's onPreviewFrame. I want to crop it directly doing operations with the byte array without converting it to a bitmap. The reason I want to do it directly on the array is because it can't be too slow or too expensive. After the crop operation I will use the output byte array directly so I don't need any bitmap conversion.
int frameWidth;
int frameHeight;
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
// Crop the data array directly.
cropData(data, frameWidth, frameHeight, startCropX, startCropY, outputWidth, outputHeight);
}
private static byte[] cropNV21(byte[] img, int imgWidth, #NonNull Rect cropRect) {
// 1.5 mean 1.0 for Y and 0.25 each for U and V
int croppedImgSize = (int)Math.floor(cropRect.width() * cropRect.height() * 1.5);
byte[] croppedImg = new byte[croppedImgSize];
// Start points of UV plane
int imgYPlaneSize = (int)Math.ceil(img.length / 1.5);
int croppedImgYPlaneSize = cropRect.width() * cropRect.height();
// Y plane copy
for (int w = 0; w < cropRect.height(); w++) {
int imgPos = (cropRect.top + w) * imgWidth + cropRect.left;
int croppedImgPos = w * cropRect.width();
System.arraycopy(img, imgPos, croppedImg, croppedImgPos, cropRect.width());
}
// UV plane copy
// U and V are reduced by 2 * 2, so each row is the same size as Y
// and is half U and half V data, and there are Y_rows/2 of UV_rows
for (int w = 0; w < (int)Math.floor(cropRect.height() / 2.0); w++) {
int imgPos = imgYPlaneSize + (cropRect.top / 2 + w) * imgWidth + cropRect.left;
int croppedImgPos = croppedImgYPlaneSize + (w * cropRect.width());
System.arraycopy(img, imgPos, croppedImg, croppedImgPos, cropRect.width());
}
return croppedImg;
}
NV21 structure:
P.S.: Also, if the starting position of your rectangle is odd, the U and V will be swapped. To handle this case, I use:
if (cropRect.left % 2 == 1) {
cropRect.left -= 1;
}
if (cropRect.top % 2 == 1) {
cropRect.top -= 1;
}

Android OpenGL Circle Texture Doubled

I am trying to draw Circle with texture on it which should be stretched on all vertices.
The problem is that the result i get looks like this:
http://s14.postimg.org/3wyb74469/image.png
I have tried to draw triangle fan as it need to be , first coordinates at 0,0,0
And rest as needed:
http://escience.anu.edu.au/lecture/cg/surfaceModeling/image/surfaceModeling015.png
Also here is the same question and I couldn't get answer for my problem from it :
OpenGL ES, add texture to circle
Loading circle vertices coordinates function:
private final int mVerticesDataSize = 3;
private final int mNumberOfVertices = 180;
private final int mBytesPerFloat = 4;
private float[] vertices;
private FloatBuffer mVerticesBuff;
public void loadCircleVerticesBuff(Context mActivityContext){
mVerticesBuff = ByteBuffer.allocateDirect(mNumberOfVertices * mVerticesDataSize * mBytesPerFloat).order(ByteOrder.nativeOrder()).asFloatBuffer();
vertices = new float[mNumberOfVertices * mVerticesDataSize];
float theta = 0;
for (int i = 0; i < (mNumberOfVertices * mVerticesDataSize); i += 3) {
vertices[i] = (float) (((float) 5*Math.cos(theta)));
vertices[i + 1] = (float) ((float) 5*Math.sin(theta));
vertices[i + 2] = 0;
theta += Math.PI / 90;
}
mVerticesBuff.put(vertices);
mVerticesBuff.position(0);
}
Loading circle texture coordinates function:
private final int mTextureCoordinateDataSize = 3;
public void loadCircleTextureBuff(){
mCircleTextureCoordinatesBuff = ByteBuffer.allocateDirect(mNumberOfVertices * mTextureCoordinateDataSize * mBytesPerFloat).order(ByteOrder.nativeOrder()).asFloatBuffer();
mCircleTextureCoordinatesBuff.put(vertices);
mCircleTextureCoordinatesBuff.position(0);
}
The opengl function used to draw is :
GLES20.glDrawArrays(GLES20.GL_TRIANGLE_FAN, 0, mNumberOfVertices);
So solution found (: 1 very very importent thing i missed up / or openGL missed up.
Texture coordinates can be only in range of 0 to 1 .
So here is the trick/solution :
//Build vertices :
vertices[i] = (float) (((float) raduis*Math.cos(theta)))+raduis;
vertices[i + 1] = (float) ((float) raduis*Math.sin(theta))+raduis;
// Build texture :
for (int i = 0; i < (mNumberOfVertices * mVerticesDataSize); i += 3) {
vertices[i] = (vertices[i])/(raduis*2);
vertices[i + 1] = (vertices[i+1])/(raduis*2);
vertices[i + 2] = 0;
}
And here is the res :
http://s2.postimg.org/tno4jr4y1/image.png
Dont forget to flip texture vertices as i forgot (:

How to fix a memory leak in Android regarding array of arrays?

I've created an application that takes an image captured by the Android device, displays this image in the ImageView. The user can then press a button to either blur or deblur the image. When I run the application on my Android device I can take an image with the camera and display this without any problems. A problem occurs when I press the blur button, which runs some code to blur the image. The application becomes frozen and I get an OutOfMemoryException for a line of my code that creates a new array and stores this in another array in a nested for loop.
This is the code for the nested for loop:
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
int xTranslated = (x + width / 2) % width;
int yTranslated = (y + height / 2) % height;
double real = temp[2 * (xTranslated + yTranslated * width)];
double imaginary = temp[2 * (xTranslated + yTranslated * width) + 1];
degradation[2 * (x + y * width)] = real;
degradation[2 * (x + y * width) + 1] = imaginary;
Complex c = new Complex(real, imaginary);
complex[y * width + x] = c;
}
}
This nested for loop deals with data extracted from the input image, which is stored as a Bitmap.
Here is the full method that applies the motion blur:
public Complex[] motionBlur(double[] degradation, int width, int height, double alpha, double gamma, double sigma) {
Complex[] complex = new Complex[width * height];
double[] temp = new double[2 * width * height];
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
double teta = Math.PI * ( (((x - width/2) % width) * gamma) + ((((y - height/2) % height) * sigma) ));
Sinc sinc = new Sinc();
double real = (Math.cos(teta) * sinc.value(teta)) * alpha;
double imaginary = (Math.sin(teta) * sinc.value(teta)) * alpha;
Complex cConj = new Complex(real, imaginary).conjugate();
temp[2 * (x + y * width)] = cConj.getReal();
temp[2 * (x + y * width) + 1] = cConj.getImaginary();
}
}
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
int xTranslated = (x + width / 2) % width;
int yTranslated = (y + height / 2) % height;
double real = temp[2 * (xTranslated + yTranslated * width)];
double imaginary = temp[2 * (xTranslated + yTranslated * width) + 1];
degradation[2 * (x + y * width)] = real;
degradation[2 * (x + y * width) + 1] = imaginary;
Complex c = new Complex(real, imaginary);
complex[y * width + x] = c;
}
}
return complex;
}
Here is a link to what is being output by the logcat when I try to run the application: http://pastebin.com/ysbN9A3s
MainActivity.java:373 corresponds to the line,
Complex c = new Complex(real, imaginary);
Here is nice talk about android crashes.
Pierre-Yves talks about OOM (OutOfMemory error) at the time 11:39. So the problem in OOM is not place where OOM happens. You should profile your app to find the place where most memory is consumed. I should admit that OOM is one of the hardest error to resolve.
Good luck!

Android Histogram equalization algorithm gives me really bright or red image

I am doing histogram equalization on an image. I first get the RGB image and convert it to YUV. I run the histogram equalization algorithm on Y' of YUV and then convert back to RGB. Is it me, or does the image look weird? I am doing this correctly? this image is pretty bright, other images are a little red.
Here are the before/after images:
The algorithm (the commented values are values that I used previously for conversion. Both yield pretty much the same results) :
public static void createContrast(Bitmap src) {
int width = src.getWidth();
int height = src.getHeight();
Bitmap processedImage = Bitmap.createBitmap(width, height, src.getConfig());
int A = 0,R,G,B;
int pixel;
float[][] Y = new float[width][height];
float[][] U = new float[width][height];
float[][] V = new float [width][height];
int [] histogram = new int[256];
Arrays.fill(histogram, 0);
int [] cdf = new int[256];
Arrays.fill(cdf, 0);
float min = 257;
float max = 0;
for(int x = 0; x < width; ++x) {
for(int y = 0; y < height; ++y) {
pixel = src.getPixel(x, y);
//Log.i("TEST","("+x+","+y+")");
A = Color.alpha(pixel);
R = Color.red(pixel);
G = Color.green(pixel);
B = Color.blue(pixel);
/*Log.i("TESTEST","R: "+R);
Log.i("TESTEST","G: "+G);
Log.i("TESTEST","B: "+B);*/
// convert to YUV
/*Y[x][y] = 0.299f * R + 0.587f * G + 0.114f * B;
U[x][y] = 0.492f * (B-Y[x][y]);
V[x][y] = 0.877f * (R-Y[x][y]);*/
Y[x][y] = 0.299f * R + 0.587f * G + 0.114f * B;
U[x][y] = 0.565f * (B-Y[x][y]);
V[x][y] = 0.713f * (R-Y[x][y]);
// create a histogram
histogram[(int) Y[x][y]]+=1;
// get min and max values
if (Y[x][y] < min){
min = Y[x][y];
}
if (Y[x][y] > max){
max = Y[x][y];
}
}
}
cdf[0] = histogram[0];
for (int i=1;i<=255;i++){
cdf[i] = cdf[i-1] + histogram[i];
//Log.i("TESTEST","cdf of: "+i+" = "+cdf[i]);
}
float minCDF = cdf[(int)min];
float denominator = width*height - minCDF;
//Log.i("TEST","Histeq Histeq Histeq Histeq Histeq Histeq");
for(int x = 0; x < width; ++x) {
for(int y = 0; y < height; ++y) {
//Log.i("TEST","("+x+","+y+")");
pixel = src.getPixel(x, y);
A = Color.alpha(pixel);
Y[x][y] = ((cdf[ (int) Y[x][y]] - minCDF)/(denominator)) * 255;
/*R = minMaxCalc(Y[x][y] + 1.140f * V[x][y]);
G = minMaxCalc (Y[x][y] - 0.395f * U[x][y] - 0.581f * V[x][y]);
B = minMaxCalc (Y[x][y] + 2.032f * U[x][y]);*/
R = minMaxCalc(Y[x][y] + 1.140f * V[x][y]);
G = minMaxCalc (Y[x][y] - 0.344f * U[x][y] - 0.714f * V[x][y]);
B = minMaxCalc (Y[x][y] + 1.77f * U[x][y]);
//Log.i("TESTEST","A: "+A);
/*Log.i("TESTEST","R: "+R);
Log.i("TESTEST","G: "+G);
Log.i("TESTEST","B: "+B);*/
processedImage.setPixel(x, y, Color.argb(A, R, G, B));
}
}
}
My next step is to graph the histograms before and after. I just want to get an opinion here.
The question is a little bit old, but let me answer.
The reason is the way histogram equalization works. The algorithm tries to use all of the 0-255 range instead of given image's range.
So if you give it a dark image, it will change relatively brighter pixels to white colors. And relatively darker colors to black colors.
If you give it a bright image, for the same reason it will get darkened.

Categories

Resources