I'm attempting to call a method which is outside the class I'm working in from an openGL thread but I'm getting a Can't create
handler inside thread that has not called Looper.prepare() runtime exception. Does anyone know of a way around this without putting the
method in the same class?
Program breaks # Extract cam = new Extract();
public void onDrawFrame(GL10 gl) {
onDrawFrameCounter++;
gl.glEnable(GL10.GL_TEXTURE_2D);
gl.glClear(GL10.GL_COLOR_BUFFER_BIT | GL10.GL_DEPTH_BUFFER_BIT);
bindCameraTexture(gl);
System.out.println(Global.bearing);
float bear = Global.bearing;
gl.glLoadIdentity();
gl.glNormal3f(0,0,1);
System.out.println("ARRAY!: " + GLCamTest.array.length);
p = 0;
gl.glRotatef(bear, 1, 0, 0);
int q = 0;
int e = 0;
if(q < Global.cubes){
Extract cam = new Extract();
Bitmap image = cam.extractimage(q);
final int TILE_WIDTH = 512;
final int TILE_HEIGHT = 512;
final int TILE_SIZE = TILE_WIDTH * TILE_HEIGHT;
int[] pixels = new int[TILE_WIDTH];
short[] rgb_565 = new short[TILE_SIZE];
// Convert 8888 to 565, and swap red and green channel position:
int i = 0;
for (int y = 0; y < TILE_HEIGHT; y++) {
image.getPixels(pixels, 0, TILE_WIDTH, 0, y, TILE_WIDTH,
1);
for (int x = 0; x < TILE_WIDTH; x++) {
int argb = pixels[x];
int r = 0x1f & (argb >> 19); // Take 5 bits from23..19
int g = 0x3f & (argb >> 10); // Take 6 bits from15..10
int b = 0x1f & (argb >> 3); // Take 5 bits from 7.. 3
int rgb = (r << 11) | (g << 5) | b;
rgb_565[i] = (short) rgb;
++i;
}
}
ShortBuffer textureBuffer = ShortBuffer.wrap (rgb_565, 0,
TILE_SIZE);
gl.glTexImage2D(GL10.GL_TEXTURE_2D, 0, GL10.GL_RGB, 48, 48, 0,
GL10.GL_RGB, GL10.GL_UNSIGNED_SHORT_5_6_5,
textureBuffer);
gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, e, 4);
e = e + 4;
gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, e, 4);
e = e + 4;
gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, e, 4);
e = e + 4;
gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, e, 4);
e = e + 4;
gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, e, 4);
e = e + 4;
gl.glDrawArrays(GL10.GL_TRIANGLE_STRIP, e, 4);
e = e + 4;
q++;
}
}
The error indicates that you are calling some code that expects to be called from the UI thread, but is not. The gl onDrawFrame is not called from the UI thread, hence the error.
From the code this looks to be happening in the Extract class' constructor. What is the class full name?
Assuming the Extract method loads an image from the camera or similar. I would move the code doing the "extracting" out of the gl thread, you likely dont want to limit your fps by doing this conversion on the gl thread anyway :-) Its better doing this "as fast as possible" on a separate thread, and have the gl draw the latest.
If you do need to make this work here, use Handler.
Related
I need to use WebRTC for android to send specific cropped(face) video to the videoChannel. I was able manipulate Camera1Session class of WebRTC to get the face cropped. Right now I am setting it to an ImageView.
listenForBytebufferFrames() of Camera1Session.java
private void listenForBytebufferFrames() {
this.camera.setPreviewCallbackWithBuffer(new PreviewCallback() {
public void onPreviewFrame(byte[] data, Camera callbackCamera) {
Camera1Session.this.checkIsOnCameraThread();
if(callbackCamera != Camera1Session.this.camera) {
Logging.e("Camera1Session", "Callback from a different camera. This should never happen.");
} else if(Camera1Session.this.state != Camera1Session.SessionState.RUNNING) {
Logging.d("Camera1Session", "Bytebuffer frame captured but camera is no longer running.");
} else {
mFrameProcessor.setNextFrame(data, callbackCamera);
long captureTimeNs = TimeUnit.MILLISECONDS.toNanos(SystemClock.elapsedRealtime());
if(!Camera1Session.this.firstFrameReported) {
int startTimeMs = (int)TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - Camera1Session.this.constructionTimeNs);
Camera1Session.camera1StartTimeMsHistogram.addSample(startTimeMs);
Camera1Session.this.firstFrameReported = true;
}
ByteBuffer byteBuffer1 = ByteBuffer.wrap(data);
Frame outputFrame = new Frame.Builder()
.setImageData(byteBuffer1,
Camera1Session.this.captureFormat.width,
Camera1Session.this.captureFormat.height,
ImageFormat.NV21)
.setTimestampMillis(mFrameProcessor.mPendingTimeMillis)
.setId(mFrameProcessor.mPendingFrameId)
.setRotation(3)
.build();
int w = outputFrame.getMetadata().getWidth();
int h = outputFrame.getMetadata().getHeight();
SparseArray<Face> detectedFaces = mDetector.detect(outputFrame);
if (detectedFaces.size() > 0) {
Face face = detectedFaces.valueAt(0);
ByteBuffer byteBufferRaw = outputFrame.getGrayscaleImageData();
byte[] byteBuffer = byteBufferRaw.array();
YuvImage yuvimage = new YuvImage(byteBuffer, ImageFormat.NV21, w, h, null);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
//My crop logic to get face co-ordinates
yuvimage.compressToJpeg(new Rect(left, top, right, bottom), 80, baos);
final byte[] jpegArray = baos.toByteArray();
Bitmap bitmap = BitmapFactory.decodeByteArray(jpegArray, 0, jpegArray.length);
Activity currentActivity = getActivity();
if (currentActivity instanceof CallActivity) {
((CallActivity) currentActivity).setBitmapToImageView(bitmap); //face on ImageView is set just fine
}
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, data, Camera1Session.this.captureFormat.width, Camera1Session.this.captureFormat.height, Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
} else {
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, data, Camera1Session.this.captureFormat.width, Camera1Session.this.captureFormat.height, Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
}
}
}
});
}
jpegArray is the final byteArray that I need to stream via WebRTC, which I tried with something like this:
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, jpegArray, (int) face.getWidth(), (int) face.getHeight(), Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(jpegArray);
Setting them up like this gives me following error:
../../webrtc/sdk/android/src/jni/androidvideotracksource.cc line 82
Check failed: length >= width * height + 2 * uv_width * ((height + 1) / 2) (2630 vs. 460800)
Which I assume is because androidvideotracksource does not get the same length of byteArray that it expects, since the frame is cropped now.
Could someone point me in the direction of how to achieve it? Is this the correct way/place to manipulate the data and feed into the videoTrack?
Edit:bitmap of byteArray data does not give me a camera preview on ImageView, unlike byteArray jpegArray. Maybe because they are packed differently?
Can we use WebRTC's Datachannel to exchang custom data ie cropped face "image" in your case and do the respective calculation at receiving end using any third party library ie OpenGL etc? Reason I am suggesting is that the WebRTC Video feed received from channel is a stream in real time not a bytearray . WebRTC Video by its inherent architecture isn't meant to crop video at other hand. If we want to crop or augment video we have to use any ar library to fulfill this job.
We can always leverage WebRTC's Data channel to exchange customized data. Using Video channel for the same is not recommended because it's real time stream not the bytearray.Please revert in case of any concern.
WebRTC in particular and video streaming in general presumes that the video has fixed dimensions. If you want to crop the detected face, your options are either to have pad the cropped image with e.g. black pixels (WebRTC does not use transparency), and crop the video on the receiver side, or, if you don't have control over the receiver, resize the cropped region to fill the expected width * height frame (you should also keep the expected aspect ratio).
Note that JPEG compress/decompress that you use to crop the original is far from efficient. Some other options can be found in Image crop and resize in Android.
Okay, this was definitely a problem of how the original byte[] data was packed and the way byte[] jpegArray was packed. Changing the way of packing this and scaling it as AlexCohn suggested worked for me. I found help from other post on StackOverflow on way to pack it. This is the code for it:
private byte[] getNV21(int left, int top, int inputWidth, int inputHeight, Bitmap scaled) {
int [] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, left, top, inputWidth, inputHeight);
byte [] yuv = new byte[inputWidth*inputHeight*3/2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
scaled.recycle();
return yuv;
}
private void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}`
I pass this byte[] data to onByteBufferFrameCaptured and callback:
Camera1Session.this.events.onByteBufferFrameCaptured(
Camera1Session.this,
data,
w,
h,
Camera1Session.this.getFrameOrientation(),
captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
Prior to this, I had to scale the bitmap which is pretty straight forward:
int width = bitmapToScale.getWidth();
int height = bitmapToScale.getHeight();
Matrix matrix = new Matrix();
matrix.postScale(newWidth / width, newHeight / height);
Bitmap scaledBitmap = Bitmap.createBitmap(bitmapToScale, 0, 0, bitmapToScale.getWidth(), bitmapToScale.getHeight(), matrix, true);
I'm trying to get Vuforia 6.0.117 working in my Android app. I'm using this specific version since its the last version supporting FrameMarkers. The detection of FrameMarkers is working fine, but when i'm trying to render a texture over the FrameMarker on my phone I get an error stating:
After operation FrameMarkers render frame got glError 0x501
My renderFrame method:
// Clear color and depth buffer
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT | GLES20.GL_DEPTH_BUFFER_BIT);
// Get the state from Vuforia and mark the beginning of a rendering
// section
State state = Renderer.getInstance().begin();
// Explicitly render the Video Background
Renderer.getInstance().drawVideoBackground();
GLES20.glEnable(GLES20.GL_DEPTH_TEST);
GLES20.glEnable(GLES20.GL_BLEND);
GLES20.glBlendEquation(GLES20.GL_FUNC_ADD);
// GLES20.glBlendFunc(GLES20.GL_SRC_ALPHA, GLES20.GL_ONE_MINUS_SRC_ALPHA);
GLES20.glBlendFunc(GLES20.GL_ONE, GLES20.GL_ONE_MINUS_SRC_ALPHA);
// We must detect if background reflection is active and adjust the
// culling direction.
// If the reflection is active, this means the post matrix has been
// reflected as well,
// therefore standard counter clockwise face culling will result in
// "inside out" models.
GLES20.glEnable(GLES20.GL_CULL_FACE);
GLES20.glCullFace(GLES20.GL_BACK);
if (Renderer.getInstance().getVideoBackgroundConfig().getReflection() == VIDEO_BACKGROUND_REFLECTION.VIDEO_BACKGROUND_REFLECTION_ON) {
GLES20.glFrontFace(GLES20.GL_CW); // Front camera
} else {
GLES20.glFrontFace(GLES20.GL_CCW); // Back camera
}
// Did we find any trackables this frame?
if (mActivity.isHelpVisible() || state.getNumTrackableResults() == 0) {
// no marker scanned
mActivity.hideInfoButton();
} else {
// Get the trackable:
TrackableResult trackableResult = state.getTrackableResult(0);
float[] modelViewMatrix = Tool.convertPose2GLMatrix(trackableResult.getPose()).getData();
// Check the type of the trackable:
MarkerResult markerResult = (MarkerResult) trackableResult;
Marker marker = (Marker) markerResult.getTrackable();
if (markerId != marker.getMarkerId()) {
markerId = marker.getMarkerId();
tag = DataManager.getInstance().getTagByMarkerId(markerId);
if (tag != null) {
texture = Texture.loadTexture(tag.getTexture());
setupTexture(texture);
tag.addToDB();
}
}
if (tag != null) {
String poiReference = tag.getPoiReference();
if (!poiReference.isEmpty()) {
mActivity.showInfoButton(poiReference);
}
// Select which model to draw:
Buffer vertices = planeObject.getVertices();
Buffer normals = planeObject.getNormals();
Buffer indices = planeObject.getIndices();
Buffer texCoords = planeObject.getTexCoords();
int numIndices = planeObject.getNumObjectIndex();
float[] modelViewProjection = new float[16];
float scale = (float) tag.getScale();
Matrix.scaleM(modelViewMatrix, 0, scale, scale, scale);
Matrix.multiplyMM(modelViewProjection, 0, vuforiaAppSession.getProjectionMatrix().getData(), 0, modelViewMatrix, 0);
GLES20.glUseProgram(shaderProgramID);
GLES20.glVertexAttribPointer(vertexHandle, 3, GLES20.GL_FLOAT, false, 0, vertices);
GLES20.glVertexAttribPointer(normalHandle, 3, GLES20.GL_FLOAT, false, 0, normals);
GLES20.glVertexAttribPointer(textureCoordHandle, 2, GLES20.GL_FLOAT, false, 0, texCoords);
GLES20.glEnableVertexAttribArray(vertexHandle);
GLES20.glEnableVertexAttribArray(normalHandle);
GLES20.glEnableVertexAttribArray(textureCoordHandle);
GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, texture.mTextureID[0]);
GLES20.glUniformMatrix4fv(mvpMatrixHandle, 1, false, modelViewProjection, 0);
GLES20.glUniform1i(texSampler2DHandle, 0);
GLES20.glDrawElements(GLES20.GL_TRIANGLES, numIndices, GLES20.GL_UNSIGNED_SHORT, indices);
GLES20.glDisableVertexAttribArray(vertexHandle);
GLES20.glDisableVertexAttribArray(normalHandle);
GLES20.glDisableVertexAttribArray(textureCoordHandle);
SampleUtils.checkGLError("FrameMarkers render frame");
}
}
GLES20.glDisable(GLES20.GL_DEPTH_TEST);
Renderer.getInstance().end();
}
I'm loading a texture of the size 640x482 and is loading as follows:
public class Texture {
public int mWidth; // The width of the texture.
public int mHeight; // The height of the texture.
public int mChannels; // The number of channels.
public ByteBuffer mData; // The pixel data.
public int[] mTextureID = new int[1];
public boolean mSuccess = false;
public static Texture loadTexture(String fileName) {
try {
InputStream inputStream = new FileInputStream(fileName);
BufferedInputStream bufferedStream = new BufferedInputStream(inputStream);
Bitmap bitMap = BitmapFactory.decodeStream(bufferedStream);
bufferedStream.close();
inputStream.close();
int[] data = new int[bitMap.getWidth() * bitMap.getHeight()];
bitMap.getPixels(data, 0, bitMap.getWidth(), 0, 0, bitMap.getWidth(), bitMap.getHeight());
return loadTextureFromIntBuffer(data, bitMap.getWidth(), bitMap.getHeight());
} catch (IOException e) {
Log.e(Constants.DEBUG, "Failed to load texture '" + fileName + "' from APK");
Log.i(Constants.DEBUG, e.getMessage());
return null;
}
}
public static Texture loadTextureFromIntBuffer(int[] data, int width, int height) {
// Convert:
int numPixels = width * height;
byte[] dataBytes = new byte[numPixels * 4];
for (int p = 0; p < numPixels; ++p) {
int colour = data[p];
dataBytes[p * 4] = (byte) (colour >>> 16); // R
dataBytes[p * 4 + 1] = (byte) (colour >>> 8); // G
dataBytes[p * 4 + 2] = (byte) colour; // B
dataBytes[p * 4 + 3] = (byte) (colour >>> 24); // A
}
Texture texture = new Texture();
texture.mWidth = width;
texture.mHeight = height;
texture.mChannels = 4;
texture.mData = ByteBuffer.allocateDirect(dataBytes.length).order(ByteOrder.nativeOrder());
int rowSize = texture.mWidth * texture.mChannels;
for (int r = 0; r < texture.mHeight; r++) {
texture.mData.put(dataBytes, rowSize * (texture.mHeight - 1 - r), rowSize);
}
texture.mData.rewind();
texture.mSuccess = true;
return texture;
}
}
Anybody got an idea why i'm getting this error and how to fix it?
I cannot go over your entire code right now, and even if I could I'm not sure it would help. You first need to narrow down the problem, so I will first give you the method to do that, and I hope it will serve you in other cases as well.
You managed to find out that there was an error - but you are checking it only at the end of the rendering function. What you need to do is to place the checkGLError call in several places inside the rendering code (print a different text message), until you can pin-point the exact line after which the error first appears. Then, if you cannot understand the problem, comment here what is the problematic line and I will try to help.
UPDATE:
After looking at the shader code, following your report that normalHandle is -1, I got to the following conclusions:
The error, which indicates the variable vertexNormal cannot be found in the shader, may be due to the fact that this variable is probably optimized out during shader compilation, since it is not really required.
Explanation: in the vertex shader (CUBE_MESH_VERTEX_SHADER), vertexNormal is assigned to a varying called normal (variable that is passed to the fragment shader). In the fragment shader, this varying is declared but not used.
Therefore, you can actually delete the variables vertexNormal and normal from the shader, and you can delete all usages of 'normalHandle' in your code.
This should eliminate the error.
I use the following procedure for transferring an array of float numbers to a RenderScript Kernel and it works fine.
float[] w = new float[10];
Allocation w_rs = Allocation.createSized(rs, Element.F32(rs), 10);
w_rs.copy1DRangeFrom(0, 10, w);
I want to use a similar procedure for transferring Float4 values as follows
Float4[] w = new Float4[10];
for (int i = 0; i < 10; i++) {
w[i] = new Float4(i, 2*i, 3*i, 4*i);
}
Allocation w_rs = Allocation.createSized(rs, Element.F32_4(rs), 10);
w_rs.copy1DRangeFromUnchecked(0, 10, w);
Which results in the following error
Object passed is not an Array of primitives
Apparently, w should be array of primitives. But I want w to be array of Float4.
You can simply use:
float[] w = new float[4 * 10];
for (int i = 0; i < 10; i++) {
w[i * 4 + 0] = i;
w[i * 4 + 1] = i*2;
w[i * 4 + 2] = i*3;
w[i * 4 + 3] = i*4;
}
Allocation w_rs = Allocation.createSized(rs, Element.F32_4(rs), 10);
w_rs.copyFrom(w);
// Or
w_rs.copy1DRangeFrom(0,40,w);
Painless :)
Reference: RenderScript: parallel computing on Android, the easy way
Deeper explanation
Inside RenderScript Java source code, you'll see this middleware function:
public void copy1DRangeFromUnchecked(int off, int count, Object array) {
copy1DRangeFromUnchecked(off, count, array,
validateObjectIsPrimitiveArray(array, false),
java.lang.reflect.Array.getLength(array));
}
The validateObjectIsPrimitiveArray is performed on any copy-method invocation. You can pass only raw arrays.
I have implemented a color picking method and it also works; sometimes. The problem is when I call my method in the onSurfaceChanged method, it does read back the correct pixels. But when I call my method in the onTouchevent of the GLSurfaceView, it reads back only zeros. In my method I create a framebuffer object and attach two renderbuffer objects to it. My renderer is set to RENDERMODE_WHEN_DIRTY and glGetError() returns 0. Where exactly is the problem? I'm not sure whether it is my method, or if I simply cannot do this in an onTouchevent. Here is the sourcecode of my picking method in my renderer:
public int pick(int x, int y) {
int result = -2;
int[] view = new int[4];
GLES20.glGetIntegerv(GLES20.GL_VIEWPORT, view, 0);
y = view[3] - y;
int[] fbo = new int[1];
GLES20.glGenFramebuffers(1, fbo, 0);
GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, fbo[0]);
int[] rbo = new int[2];
GLES20.glGenRenderbuffers(2, rbo, 0);
GLES20.glBindRenderbuffer(GLES20.GL_RENDERBUFFER, rbo[0]);
GLES20.glRenderbufferStorage(
GLES20.GL_RENDERBUFFER,
GLES20.GL_RGBA4, view[2], view[3]);
GLES20.glFramebufferRenderbuffer(
GLES20.GL_FRAMEBUFFER, GLES20.GL_COLOR_ATTACHMENT0,
GLES20.GL_RENDERBUFFER, rbo[0]);
GLES20.glBindRenderbuffer(GLES20.GL_RENDERBUFFER, rbo[1]);
GLES20.glRenderbufferStorage(
GLES20.GL_RENDERBUFFER,
GLES20.GL_DEPTH_COMPONENT16,
view[2], view[3]);
GLES20.glFramebufferRenderbuffer(
GLES20.GL_FRAMEBUFFER, GLES20.GL_DEPTH_ATTACHMENT,
GLES20.GL_RENDERBUFFER, rbo[1]);
int status = GLES20.glCheckFramebufferStatus(GLES20.GL_FRAMEBUFFER);
if (status == GLES20.GL_FRAMEBUFFER_COMPLETE) {
result = -1;
GLES20.glClear(
GLES20.GL_COLOR_BUFFER_BIT |
GLES20.GL_DEPTH_BUFFER_BIT);
for (int i = 0; i < mObjects.size(); i++)
mObjects.get(i).render(mProgram, mMatrixMVP, true);
ByteBuffer pixels = ByteBuffer.allocate(view[2] * view[3] * 4);
pixels.order(ByteOrder.nativeOrder());
GLES20.glReadPixels( // I read every pixel just for debugging
0, 0, view[2], view[3], GLES20.GL_RGBA,
GLES20.GL_UNSIGNED_BYTE, pixels);
int e = GLES20.glGetError(); // always returns 0
byte[] tmp = pixels.array(); // only zeros when called in onTouch
for (int i = 0; i < mObjects.size(); i++)
if ((Math.abs(r - mObjects.get(i).CODE_R) < 8) &&
(Math.abs(g - mObjects.get(i).CODE_G) < 8) &&
(Math.abs(b - mObjects.get(i).CODE_B) < 8)) {
result = i;
break;
}
}
GLES20.glDeleteRenderbuffers(2, rbo, 0);
GLES20.glDeleteFramebuffers(1, fbo, 0);
GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, 0);
return result;
}
And since it might be relevent, here is my onDrawFrame method:
#Override
public void onDrawFrame(GL10 gl) {
GLES20.glClear(
GLES20.GL_COLOR_BUFFER_BIT |
GLES20.GL_DEPTH_BUFFER_BIT);
for (int i = 0; i < mObjects.size(); i++)
mObjects.get(i).render(mProgram, mMatrixMVP, false);
}
I have read that the onDrawFrame Method runs in a seperate thread, but I don't think that this is the problem, since I am in RENDERMODE_WHEN_DIRTY...
Why dont you check this library. I think it is quite good, you could use it instead of create a new color picker.
https://github.com/LarsWerkman/HoloColorPicker
Even if you use RENDERMODE_WHEN_DIRTY your renderer runs on a different thread, it just doesn't render untill you call requestRender(). You can try to use the queueEvent()-method to run the code on your render thread instead and see if that helps.
You could also change your pick()-method so that it internally uses a Runnable and the queueEvent-method (or a Handler). You would of course also need to use some kind of callback to be able to "return" a result.
I am trying to render video via the NDK, to add some features that just aren't supported in the sdk. I am using FFmpeg to decode the video and can compile that via the ndk, and used this as a starting point. I have modified that example and instead of using glDrawTexiOES to draw the texture I have setup some vertices and am rendering the texture on top of that (opengl es way of rendering quad).
Below is what I am doing to render, but creating the glTexImage2D is slow. I want to know if there is any way to speed this up, or give the appearance of speeding this up, such as trying to setup some textures in the background and render pre-setup textures. Or if there is any other way to more quickly draw the video frames to screen in android? Currently I can only get about 12fps.
glClear(GL_COLOR_BUFFER_BIT);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glBindTexture(GL_TEXTURE_2D, textureConverted);
//this is slow
glTexImage2D(GL_TEXTURE_2D, /* target */
0, /* level */
GL_RGBA, /* internal format */
textureWidth, /* width */
textureHeight, /* height */
0, /* border */
GL_RGBA, /* format */
GL_UNSIGNED_BYTE,/* type */
pFrameConverted->data[0]);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(2, GL_FLOAT, 0, texCoords);
glVertexPointer(3, GL_FLOAT, 0, vertices);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_BYTE, indices);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
EDIT
I changed my code to initialize a gltextImage2D only once, and modify it with glSubTexImage2D, it didn't make much of an improvement to the framerate.
I then modified the code to modify a native Bitmap object on the NDK. With this approach I have a background thread that runs that process the next frames and populates the bitmap object on the native side. I think this has potential, but I need to get the speed increased of converting the AVFrame object from FFmpeg into a native bitmap. Below is currently what I am using to convert, a brute force approach. Is there any way to increase the speed of this or optimize this conversion?
static void fill_bitmap(AndroidBitmapInfo* info, void *pixels, AVFrame *pFrame)
{
uint8_t *frameLine;
int yy;
for (yy = 0; yy < info->height; yy++) {
uint8_t* line = (uint8_t*)pixels;
frameLine = (uint8_t *)pFrame->data[0] + (yy * pFrame->linesize[0]);
int xx;
for (xx = 0; xx < info->width; xx++) {
int out_offset = xx * 4;
int in_offset = xx * 3;
line[out_offset] = frameLine[in_offset];
line[out_offset+1] = frameLine[in_offset+1];
line[out_offset+2] = frameLine[in_offset+2];
line[out_offset+3] = 0;
}
pixels = (char*)pixels + info->stride;
}
}
Yes, texture (and buffer, and shader, and framebuffer) creation is slow.
That's why you should create texture only once. After it is created, you can modify its data by calling glSubTexImage2D.
And to make uploading texture data more faster - create two textures. While you use one to display, upload texture data from ffmpeg to second one. When you display second one, upload data to first one. And repeat from beginning.
I think it will still be not very fast. You could try to use jnigraphics library that allows to access Bitmap object pixels from NDK. After that - you just diplay this Bitmap on screen on java side.
Yes, you can optimized this code:
static void fill_bitmap(AndroidBitmapInfo* info, void *pixels, AVFrame *pFrame)
{
uint8_t *frameLine;
int yy;
for (yy = 0; yy < info->height; yy++)
{
uint8_t* line = (uint8_t*)pixels;
frameLine = (uint8_t *)pFrame->data[0] + (yy * pFrame->linesize[0]);
int xx;
for (xx = 0; xx < info->width; xx++) {
int out_offset = xx * 4;
int in_offset = xx * 3;
line[out_offset] = frameLine[in_offset];
line[out_offset+1] = frameLine[in_offset+1];
line[out_offset+2] = frameLine[in_offset+2];
line[out_offset+3] = 0;
}
pixels = (char*)pixels + info->stride;
}
}
to be something like:
static void fill_bitmap(AndroidBitmapInfo* info, void *pixels, AVFrame *pFrame)
{
uint8_t *frameLine = (uint8_t *)pFrame->data[0];
int yy;
for (yy = 0; yy < info->height; yy++)
{
uint8_t* line = (uint8_t*)pixels;
int xx;
int out_offset = 0;
int in_offset = 0;
for (xx = 0; xx < info->width; xx++) {
int out_offset += 4;
int in_offset += 3;
line[out_offset] = frameLine[in_offset];
line[out_offset+1] = frameLine[in_offset+1];
line[out_offset+2] = frameLine[in_offset+2];
line[out_offset+3] = 0;
}
pixels = (char*)pixels + info->stride;
frameLine += pFrame->linesize[0];
}
}
That will save you some cycles.
A couple of minor additions will solve your problem, first convert your AVFrame to RGB with swscale, then apply it directly to your texture i.e.:
AVPicture *pFrameConverted;
struct SwsContext img_convert_ctx;
void init(){
pFrameConverted=(AVPicture *)avcodec_alloc_frame();
avpicture_alloc(pFrameConverted, AV_PIX_FMT_RGB565, videoWidth, videoHeight);
img_convert_ctx = sws_getCachedContext(&img_convert_ctx,
videoWidth,
videoHeight,
pCodecCtx->pix_fmt,
videoWidth,
videoHeight,
AV_PIX_FMT_RGB565,
SWS_FAST_BILINEAR,
NULL, NULL, NULL );
ff_get_unscaled_swscale(img_convert_ctx);
}
void render(AVFrame* pFrame){
sws_scale(img_convert_ctx, (uint8_t const * const *)pFrame->data, pFrame->linesize, 0, pFrame->height, pFrameConverted->data, pFrameConverted->lineSize);
glClear(GL_COLOR_BUFFER_BIT);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, videoWidth, videoHeight, GL_RGB, GL_UNSIGNED_BYTE, pFrameConverted->data[0]);
glDrawTexiOES(0, 0, 0, videoWidth, videoHeight);
}
Oh,maybe you can use jnigraphics as https://github.com/havlenapetr/FFMpeg/commits/debug.
but if when you get yuv data after decode frame,you should convert it to RGB555,it is too slowly.Use android's mediaplayer is a good idea