In Android, I am trying to integrate JPCT to Vuforia by using this tutorial:
http://www.jpct.net/wiki/index.php/Integrating_JPCT-AE_with_Vuforia
The first time the application is launched, it works, but when I go back and I touch "play" again, it crashes.
These are the errors in my LogCat when the application crashes:
FATAL EXCEPTION: main
java.lang.RuntimeException: [ 1362671862690 ] - ERROR: A texture with the name 'texture' has been declared twice!
at com.threed.jpct.Logger.log(Logger.java:189)
at com.threed.jpct.TextureManager.addTexture(TextureManager.java:138)
at com.qualcomm.QCARSamples.ImageTargets.ImageTargetsRenderer.<init> (ImageTargetsRenderer.java:78)
at com.qualcomm.QCARSamples.ImageTargets.ImageTargets.initApplicationAR(ImageTargets.java:807)
at com.qualcomm.QCARSamples.ImageTargets.ImageTargets.updateApplicationStatus(ImageTargets.java:649)
at com.qualcomm.QCARSamples.ImageTargets.ImageTargets.updateApplicationStatus(ImageTargets.java:641)
at com.qualcomm.QCARSamples.ImageTargets.ImageTargets.access$3(ImageTargets.java:598)
at com.qualcomm.QCARSamples.ImageTargets.ImageTargets$InitQCARTask.onPostExecute(ImageTargets.java:226)
at com.qualcomm.QCARSamples.ImageTargets.ImageTargets$InitQCARTask.onPostExecute(ImageTargets.java:1)
at android.os.AsyncTask.finish(AsyncTask.java:417)
at android.os.AsyncTask.access$300(AsyncTask.java:127)
at android.os.AsyncTask$InternalHandler.handleMessage(AsyncTask.java:429)
at android.os.Handler.dispatchMessage(Handler.java:99)
at android.os.Looper.loop(Looper.java:123)
at android.app.ActivityThread.main(ActivityThread.java:3691)
at java.lang.reflect.Method.invokeNative(Native Method)
at java.lang.reflect.Method.invoke(Method.java:507)
at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:847)
at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:605)
at dalvik.system.NativeStart.main(Native Method)
Here is Imagetargetsrenderer.java code
public class ImageTargetsRenderer implements GLSurfaceView.Renderer
{
public boolean mIsActive = false;
/** Reference to main activity **/
public ImageTargets mActivity;
/** Native function for initializing the renderer. */
public native void initRendering();
/** Native function to update the renderer. */
public native void updateRendering(int width, int height);
private World world=null;
private Light sun = null;
private Object3D cube = null;
private FrameBuffer fb = null;
private float[] modelViewMat=null;
private Camera cam=null;
private float fov=0;
private float fovy=0;
//private Camera cam=null;
private Object3D plane=null;
public ImageTargetsRenderer(ImageTargets activity){
this.mActivity = activity;
world = new World();
world.setAmbientLight(20, 20, 20);
sun = new Light(world);
sun.setIntensity(250, 250, 250);
// Create a texture out of the icon...:-)
Texture texture = new Texture(BitmapHelper.rescale(BitmapHelper.convert(mActivity.getResources().getDrawable(R.drawable.ic_launcher)), 64, 64));
TextureManager.getInstance().addTexture("texture", texture);
cube = Primitives.getCube(10);
cube.calcTextureWrapSpherical();
cube.setTexture("texture");
cube.strip();
cube.build();
world.addObject(cube);
cam = world.getCamera();
/*cam.moveCamera(Camera.CAMERA_MOVEOUT, 50);
cam.lookAt(cube.getTransformedCenter());*/
SimpleVector sv = new SimpleVector();
SimpleVector position=new SimpleVector();
position.x=0;
position.y=0;
position.z=-10;
cube.setOrigin(position);
sv.set(cube.getTransformedCenter());
sv.y -= 100;
sv.z -= 100;
sun.setPosition(sv);
MemoryHelper.compact();
}
/** Called when the surface is created or recreated. */
public void onSurfaceCreated(GL10 gl, EGLConfig config)
{
DebugLog.LOGD("GLRenderer::onSurfaceCreated");
// Call native function to initialize rendering:
initRendering();
// Call QCAR function to (re)initialize rendering after first use
// or after OpenGL ES context was lost (e.g. after onPause/onResume):
QCAR.onSurfaceCreated();
}
/** Called when the surface changed size. */
public void onSurfaceChanged(GL10 gl, int width, int height)
{
DebugLog.LOGD("GLRenderer::onSurfaceChanged");
// Call native function to update rendering when render surface
// parameters have changed:
updateRendering(width, height);
// Call QCAR function to handle render surface size changes:
QCAR.onSurfaceChanged(width, height);
if (fb != null) {
fb.dispose();
}
fb = new FrameBuffer(width, height);
}
/** The native render function. */
public native void renderFrame();
/** Called to draw the current frame. */
public void onDrawFrame(GL10 gl)
{
if (!mIsActive)
return;
// Update render view (projection matrix and viewport) if needed:
mActivity.updateRenderView();
//updateCamera();
// Call our native function to render content
renderFrame();
world.renderScene(fb);
world.draw(fb);
fb.display();
}
public void updateModelviewMatrix(float mat[]) {
modelViewMat = mat;
}
public void setFov(float fov_) {
fov = fov_;
}
public void setFovy(float fovy_) {
fovy = fovy_;
}
public void updateCamera() {
Matrix m = new Matrix();
m.setDump(modelViewMat);
cam.setBack(m);
cam.setFOV(fov);
cam.setYFOV(fovy);
}
}
Code for imagetargets.cpp
JNIEXPORT void JNICALL
Java_com_qualcomm_QCARSamples_ImageTargets_ImageTargetsRenderer_renderFrame(JNIEnv *env, jobject obj)
{
const QCAR::CameraCalibration& cameraCalibration = QCAR::CameraDevice::getInstance().getCameraCalibration();
QCAR::Vec2F size = cameraCalibration.getSize();
QCAR::Vec2F focalLength = cameraCalibration.getFocalLength();
float fovyRadians = 2 * atan(0.5f * size.data[1] / focalLength.data[1]);
float fovRadians = 2 * atan(0.5f * size.data[0] / focalLength.data[0]);
jclass activityClass = env->GetObjectClass(obj);
jfloatArray modelviewArray = env->NewFloatArray(16);
jmethodID updateMatrixMethod = env->GetMethodID(activityClass, "updateModelviewMatrix", "([F)V");
jmethodID fovMethod = env->GetMethodID(activityClass, "setFov", "(F)V");
jmethodID fovyMethod = env->GetMethodID(activityClass, "setFovy", "(F)V");
// test
jclass newClass = env->GetObjectClass(obj);
jmethodID updateCameraMethod = env->GetMethodID(newClass, "updateCamera", "()V");
// Clear color and depth buffer
//glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get the state from QCAR and mark the beginning of a rendering section
QCAR::State state = QCAR::Renderer::getInstance().begin();
// Explicitly render the Video Background
QCAR::Renderer::getInstance().drawVideoBackground();
// Did we find any trackables this frame?
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix44F modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(result- >getPose());
}
QCAR::Renderer::getInstance().end();
for(int tIdx = 0; tIdx < state.getNumTrackableResults(); tIdx++)
{
// Get the trackable:
const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix44F modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(result- >getPose());
SampleUtils::rotatePoseMatrix(180.0f, 1.0f, 0, 0, &modelViewMatrix.data[0]);
// Passes the model view matrix to java
env->SetFloatArrayRegion(modelviewArray, 0, 16, modelViewMatrix.data);
env->CallVoidMethod(obj, updateMatrixMethod , modelviewArray);
env->CallVoidMethod(obj, updateCameraMethod);
env->CallVoidMethod(obj, fovMethod, fovRadians);
env->CallVoidMethod(obj, fovyMethod, fovyRadians);
}
env->DeleteLocalRef(modelviewArray);
}
What does that exception mean?
The beginning of your renderFrame method in ImageTarget.cpp should be this way:
jclass activityClass = env->GetObjectClass(obj);
jfloatArray modelviewArray = env->NewFloatArray(16);
jmethodID method = env->GetMethodID(activityClass, "updateModelviewMatrix", "([F)V");
This, I think, is gonna solve your error for activityClass was not declared in this scope
Comment out this line and test again. You don't need it anymore.
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
I would also recommend to comment out the renderFrame() method in onDrawFrame() to see if jPCT can render the cube if QCAR hasn't started the rendering earlier. (just for testing purpose)
Not to mention, QCAR initially changes the OpenGL states by default. Therefor, you have to enable some of them in order to render with jPCT. Check OpenGL State Changes in Video Background Renderer for more info.
I'm using this for OpenGL ES 1.x after I call renderFrame in onDrawFrame:
GL11 gl11 = (GL11) gl;
gl11.glEnable(GL11.GL_DEPTH_TEST);
gl11.glEnable(GL11.GL_CULL_FACE);
gl11.glTexEnvi(GL11.GL_TEXTURE_ENV, GL11.GL_TEXTURE_ENV_MODE, GL11.GL_MODULATE);
gl11.glEnable(GL11.GL_LIGHTING);
gl11.glEnable(GL11.GL_BLEND);
In order to see something before applying the matrix, you must first tell the camera to lookAt the object.
Camera cam = world.getCamera();
cam.moveCamera(Camera.CAMERA_MOVEOUT, 50);
cam.lookAt(cube.getTransformedCenter());
Be aware that you should remove these lines when you update the camera with the modelview matrix from the marker.
If you follow my tutorial you actually don't have to activate any OpenGL states to see something over the marker (although you might be interested in activating them as Sam Rad suggested, for other reasons).
Add this to Renderer for removing loaded Textures
public void cleanup()
{
TextureManager.getInstance().removeTexture("texture");
}
Call this from Activity while closing/pausing
protected void onPause()
{
mRenderer.cleanup();
}
Related
I'm need to send data from GL_TEXTURE_EXTERNAL_OES to simple GL_TEXTURE_2D (Render image from Android player to Unity texture) and currently do it through read pixels from buffer with attached source texture. This process work correctly on my OnePlus 5 phone, but have some glitches with image on phones like xiaomi note 4, mi a2 and etc (like image is very green), and also there is perfomance issues becouse of this process works every frame and than more pixels to read, than worser perfomance (even my phone has low fps at 4k resolution). Any idea how to optimize this process or do it in some other way?
Thanks and best regards!
GLuint FramebufferName;
glGenFramebuffers(1, &FramebufferName);
glBindFramebuffer(GL_FRAMEBUFFER, FramebufferName);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_EXTERNAL_OES, g_ExtTexturePointer, 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
LOGD("%s", "Error: Could not setup frame buffer.");
}
unsigned char* data = new unsigned char[g_SourceWidth * g_SourceHeight * 4];
glReadPixels(0, 0, g_SourceWidth, g_SourceHeight, GL_RGBA, GL_UNSIGNED_BYTE, data);
glBindTexture(GL_TEXTURE_2D, g_TexturePointer);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, g_SourceWidth, g_SourceHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glDeleteFramebuffers(1, &FramebufferName);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
delete[] data;
UPDATE.
Function which contain this code and function which calls it from Unity side
static void UNITY_INTERFACE_API OnRenderEvent(int eventID) { ... }
extern "C" UnityRenderingEvent UNITY_INTERFACE_EXPORT UNITY_INTERFACE_API UMDGetRenderEventFunc()
{
return OnRenderEvent;
}
Which called from Unity Update function like this:
[DllImport("RenderingPlugin")]
static extern IntPtr UMDGetRenderEventFunc();
IEnumerator UpdateVideoTexture()
{
while (true)
{
...
androidPlugin.UpdateSurfaceTexture();
GL.IssuePluginEvent(UMDGetRenderEventFunc, 1);
}
}
And Android plugin do this on its side (surfaceTexture its texture which contain this external texture on which ExoPlayer render video)
public void exportUpdateSurfaceTexture() {
synchronized (this) {
if (this.mIsStopped) {
return;
}
surfaceTexture.updateTexImage();
}
}
On the C++ side:
You're creating and destroying pixel data every frame when you do new unsigned char[g_SourceWidth * g_SourceHeight * 4]; and delete[] data and that's expensive depending on the Texture size. Create the texture data once then re-use it.
One way to do this is to have static variables on the C++ side hold the texture information then a function to initialize those variables::
static void* pixelData = nullptr;
static int _x;
static int _y;
static int _width;
static int _height;
void initPixelData(void* buffer, int x, int y, int width, int height) {
pixelData = buffer;
_x = x;
_y = y;
_width = width;
_height = height;
}
Then your capture function should be re-written to remove new unsigned char[g_SourceWidth * g_SourceHeight * 4]; and delete[] data but use the static variables.
static void UNITY_INTERFACE_API OnRenderEvent(int eventID)
{
if (pixelData == nullptr) {
//Debug::Log("Pointer is null", Color::Red);
return;
}
GLuint FramebufferName;
glGenFramebuffers(1, &FramebufferName);
glBindFramebuffer(GL_FRAMEBUFFER, FramebufferName);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_EXTERNAL_OES, g_ExtTexturePointer, 0);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
LOGD("%s", "Error: Could not setup frame buffer.");
}
glReadPixels(_x, _y, _width, _height, GL_RGBA, GL_UNSIGNED_BYTE, pixelData);
glBindTexture(GL_TEXTURE_2D, g_TexturePointer);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, _width, _height, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixelData);
glDeleteFramebuffers(1, &FramebufferName);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
}
extern "C" UnityRenderingEvent UNITY_INTERFACE_EXPORT UNITY_INTERFACE_API
UMDGetRenderEventFunc()
{
return OnRenderEvent;
}
On the C# side:
[DllImport("RenderingPlugin", CallingConvention = CallingConvention.Cdecl)]
public static extern void initPixelData(IntPtr buffer, int x, int y, int width, int height);
[DllImport("RenderingPlugin", CallingConvention = CallingConvention.StdCall)]
private static extern IntPtr UMDGetRenderEventFunc();
Create the Texture information, pin it and send the pointer to C++:
int width = 500;
int height = 500;
//Where Pixel data will be saved
byte[] screenData;
//Where handle that pins the Pixel data will stay
GCHandle pinHandler;
//Used to test the color
public RawImage rawImageColor;
private Texture2D texture;
// Use this for initialization
void Awake()
{
Resolution res = Screen.currentResolution;
width = res.width;
height = res.height;
//Allocate array to be used
screenData = new byte[width * height * 4];
texture = new Texture2D(width, height, TextureFormat.RGBA32, false, false);
//Pin the Array so that it doesn't move around
pinHandler = GCHandle.Alloc(screenData, GCHandleType.Pinned);
//Register the screenshot and pass the array that will receive the pixels
IntPtr arrayPtr = pinHandler.AddrOfPinnedObject();
initPixelData(arrayPtr, 0, 0, width, height);
StartCoroutine(UpdateVideoTexture());
}
Then to update the texture, see the sample below. Note that there are two methods to update the texture as shown on the code below. If you run into issues with Method1, comment out the two lines which uses texture.LoadRawTextureData and texture.Apply and un-comment the Method2 code which uses the ByteArrayToColor, texture.SetPixels and texture.Apply function:
IEnumerator UpdateVideoTexture()
{
while (true)
{
//Take screenshot of the screen
GL.IssuePluginEvent(UMDGetRenderEventFunc(), 1);
//Update Texture Method1
texture.LoadRawTextureData(screenData);
texture.Apply();
//Update Texture Method2. Use this if the Method1 above crashes
/*
ByteArrayToColor();
texture.SetPixels(colors);
texture.Apply();
*/
//Test it by assigning the texture to a raw image
rawImageColor.texture = texture;
//Wait for a frame
yield return null;
}
}
Color[] colors = null;
void ByteArrayToColor()
{
if (colors == null)
{
colors = new Color[screenData.Length / 4];
}
for (int i = 0; i < screenData.Length; i += 4)
{
colors[i / 4] = new Color(screenData[i],
screenData[i + 1],
screenData[i + 2],
screenData[i + 3]);
}
}
Unpin the array when done or when the script is about to be destroyed:
void OnDisable()
{
//Unpin the array when disabled
pinHandler.Free();
}
Calling glReadPixels is always going to be slow; CPUs are not good at bulk data transfer.
Ideally you'd managed to convince Unity to accept an external image handle, and do the whole process zero copy, but failing that I would use a GPU render-to-texture and use a shader to transfer from the external image to the RGB surface.
I have a live broadcasting app based off grafika's examples, where I send my video feed over RTMP to be live broadcast.
I now want to watermark my video by overlaying text or a logo on my video stream. I know this can be done with GLSL filtering, but I have no idea how to implement this based on the sample that I linked.
I tried using Alpha blending but it seems the two texture formats are somehow incompatible (one being TEXTURE_EXTERNAL_OES and the other one TEXTURE_2D) and I just get a black frame in return.
EDIT:
I based my code on Kickflip API:
class CameraSurfaceRenderer implements GLSurfaceView.Renderer {
private static final String TAG = "CameraSurfaceRenderer";
private static final boolean VERBOSE = false;
private CameraEncoder mCameraEncoder;
private FullFrameRect mFullScreenCamera;
private FullFrameRect mFullScreenOverlay; // For texture overlay
private final float[] mSTMatrix = new float[16];
private int mOverlayTextureId;
private int mCameraTextureId;
private boolean mRecordingEnabled;
private int mFrameCount;
// Keep track of selected filters + relevant state
private boolean mIncomingSizeUpdated;
private int mIncomingWidth;
private int mIncomingHeight;
private int mCurrentFilter;
private int mNewFilter;
boolean showBox = false;
/**
* Constructs CameraSurfaceRenderer.
* <p>
* #param recorder video encoder object
*/
public CameraSurfaceRenderer(CameraEncoder recorder) {
mCameraEncoder = recorder;
mCameraTextureId = -1;
mFrameCount = -1;
SessionConfig config = recorder.getConfig();
mIncomingWidth = config.getVideoWidth();
mIncomingHeight = config.getVideoHeight();
mIncomingSizeUpdated = true; // Force texture size update on next onDrawFrame
mCurrentFilter = -1;
mNewFilter = Filters.FILTER_NONE;
mRecordingEnabled = false;
}
/**
* Notifies the renderer that we want to stop or start recording.
*/
public void changeRecordingState(boolean isRecording) {
Log.d(TAG, "changeRecordingState: was " + mRecordingEnabled + " now " + isRecording);
mRecordingEnabled = isRecording;
}
#Override
public void onSurfaceCreated(GL10 unused, EGLConfig config) {
Log.d(TAG, "onSurfaceCreated");
// Set up the texture blitter that will be used for on-screen display. This
// is *not* applied to the recording, because that uses a separate shader.
mFullScreenCamera = new FullFrameRect(
new Texture2dProgram(Texture2dProgram.ProgramType.TEXTURE_EXT));
// For texture overlay:
GLES20.glEnable(GLES20.GL_BLEND);
GLES20.glBlendFunc(GLES20.GL_SRC_ALPHA, GLES20.GL_ONE_MINUS_SRC_ALPHA);
mFullScreenOverlay = new FullFrameRect(
new Texture2dProgram(Texture2dProgram.ProgramType.TEXTURE_2D));
mOverlayTextureId = GlUtil.createTextureWithTextContent("hello!");
mOverlayTextureId = GlUtil.createTextureFromImage(mCameraView.getContext(), R.drawable.red_dot);
mCameraTextureId = mFullScreenCamera.createTextureObject();
mCameraEncoder.onSurfaceCreated(mCameraTextureId);
mFrameCount = 0;
}
#Override
public void onSurfaceChanged(GL10 unused, int width, int height) {
Log.d(TAG, "onSurfaceChanged " + width + "x" + height);
}
#Override
public void onDrawFrame(GL10 unused) {
if (VERBOSE){
if(mFrameCount % 30 == 0){
Log.d(TAG, "onDrawFrame tex=" + mCameraTextureId);
mCameraEncoder.logSavedEglState();
}
}
if (mCurrentFilter != mNewFilter) {
Filters.updateFilter(mFullScreenCamera, mNewFilter);
mCurrentFilter = mNewFilter;
mIncomingSizeUpdated = true;
}
if (mIncomingSizeUpdated) {
mFullScreenCamera.getProgram().setTexSize(mIncomingWidth, mIncomingHeight);
mFullScreenOverlay.getProgram().setTexSize(mIncomingWidth, mIncomingHeight);
mIncomingSizeUpdated = false;
Log.i(TAG, "setTexSize on display Texture");
}
// Draw the video frame.
if(mCameraEncoder.isSurfaceTextureReadyForDisplay()){
mCameraEncoder.getSurfaceTextureForDisplay().updateTexImage();
mCameraEncoder.getSurfaceTextureForDisplay().getTransformMatrix(mSTMatrix);
//Drawing texture overlay:
mFullScreenOverlay.drawFrame(mOverlayTextureId, mSTMatrix);
mFullScreenCamera.drawFrame(mCameraTextureId, mSTMatrix);
}
mFrameCount++;
}
public void signalVertialVideo(FullFrameRect.SCREEN_ROTATION isVertical) {
if (mFullScreenCamera != null) mFullScreenCamera.adjustForVerticalVideo(isVertical, false);
}
/**
* Changes the filter that we're applying to the camera preview.
*/
public void changeFilterMode(int filter) {
mNewFilter = filter;
}
public void handleTouchEvent(MotionEvent ev){
mFullScreenCamera.handleTouchEvent(ev);
}
}
This is the code for Rendering the image on the screen (GLSurfaceView), but this is not actually overlayed over the video. If I am not mistaken, this is done on CameraEncoder.
Thing is, replicating the code from CameraSurfaceRenderer into CameraEncoder (they both have similar code when it comes to filters) does not provide an overlayed text/image.
The texture object uses the GL_TEXTURE_EXTERNAL_OES texture target, which is defined by the GL_OES_EGL_image_external OpenGL ES extension. This limits how the texture may be used. Each time the texture is bound it must be bound to the GL_TEXTURE_EXTERNAL_OES target rather than the GL_TEXTURE_2D target. Additionally, any OpenGL ES 2.0 shader that samples from the texture must declare its use of this extension using, for example, an "#extension GL_OES_EGL_image_external : require" directive. Such shaders must also access the texture using the samplerExternalOES GLSL sampler type.
https://developer.android.com/reference/android/graphics/SurfaceTexture.html
Post your code that you used to do alpha blending and I can probably fix it.
I would probably override the Texture2dProgram and pass that to the FullFrame Renderer. It has example code for rendering using the GL_TEXTURE_EXTERNAL_OES extension. Basically, #Override the draw function, call the base implementation, bind your watermark and draw.
That should be between camera and the video encoder.
On the one hand, I have a Surface Class which when instantiated, automatically initialize a new thread and start grabbing frames from a streaming source via native code based on FFMPEG. Here is the main parts of the code for the aforementioned Surface Class:
public class StreamingSurface extends Surface implements Runnable {
...
public StreamingSurface(SurfaceTexture surfaceTexture, int width, int height) {
super(surfaceTexture);
screenWidth = width;
screenHeight = height;
init();
}
public void init() {
mDrawTop = 0;
mDrawLeft = 0;
mVideoCurrentFrame = 0;
this.setVideoFile();
this.startPlay();
}
public void setVideoFile() {
// Initialise FFMPEG
naInit("");
// Get stream video res
int[] res = naGetVideoRes();
mDisplayWidth = (int)(res[0]);
mDisplayHeight = (int)(res[1]);
// Prepare Display
mBitmap = Bitmap.createBitmap(mDisplayWidth, mDisplayHeight, Bitmap.Config.ARGB_8888);
naPrepareDisplay(mBitmap, mDisplayWidth, mDisplayHeight);
}
public void startPlay() {
thread = new Thread(this);
thread.start();
}
#Override
public void run() {
while (true) {
while (2 == mStatus) {
//pause
SystemClock.sleep(100);
}
mVideoCurrentFrame = naGetVideoFrame();
if (0 < mVideoCurrentFrame) {
//success, redraw
if(isValid()){
Canvas canvas = lockCanvas(null);
if (null != mBitmap) {
canvas.drawBitmap(mBitmap, mDrawLeft, mDrawTop, prFramePaint);
}
unlockCanvasAndPost(canvas);
}
} else {
//failure, probably end of video, break
naFinish(mBitmap);
mStatus = 0;
break;
}
}
}
}
In my MainActivity class, I instantiated this class in the following way:
public void startCamera(int texture)
{
mSurface = new SurfaceTexture(texture);
mSurface.setOnFrameAvailableListener(this);
Surface surface = new StreamingSurface(mSurface, 640, 360);
surface.release();
}
I read the following line in the Android developer page, regarding the Surface class constructor:
"Images drawn to the Surface will be made available to the SurfaceTexture, which can attach them to an OpenGL ES texture via updateTexImage()."
That is exactly what I want to do, and I have everything ready for the further renderization. But definitely, with the above code, I never get my frames captured in the surface class transformed to its corresponding SurfaceTexture. I know this because the debugger, for instace, never call the OnFrameAvailableLister method associated with that Surface Texture.
Any ideas? Maybe the fact that I am using a thread to call the drawing functions is messing everything? In such a case, what alternatives I have to grab the frames?
Thanks in advance
I currently work on a effect such as "Tiny Wings" http://www.raywenderlich.com/3857/how-to-create-dynamic-textures-with-ccrendertexture ,and find CCRenderTexture is the solution. So I want to know how to make this effect on android , finally I found this link
https://github.com/ZhouWeikuan/cocos2d/blob/master/cocos2d-android/src/org/cocos2d/opengl/CCRenderTexture.java
It shows that its GL11ExtensionPack
GL11ExtensionPack egl = (GL11ExtensionPack)CCDirector.gl;
egl.glGetIntegerv(GL11ExtensionPack.GL_FRAMEBUFFER_BINDING_OES, oldFBO_, 0);
...
But in GLWrapperBase.java ,it shows
// Unsupported GL11ExtensionPack methods
public void glBindFramebufferOES (int target, int framebuffer) {
throw new UnsupportedOperationException();
}
Seems gdx have'nt implement this function . I want to know what's the same feature of libgdx or how to use GL11ExtensionPack at desktop ~
Thanks
In libGDX, you want to use a FrameBuffer object to do the equivalent of a "CCRenderTexture". The FrameBuffer basically lets you use OpenGL commands to draw into an off-screen buffer, and then you can display that buffer's contents as a texture later. See http://code.google.com/p/libgdx/wiki/OpenGLFramebufferObject. Note that the FrameBuffer object is only available if your app requires OpenGL ES 2.0.
Depending on what you want to draw, you might also look at the Pixmap class in libGDX. This supports some simple run-time drawing operations (like lines, rectangels, and pixels). Again the idea is that you draw into this texture and then render the resulting texture on-screen later. This is available in OpenGL ES 1.0, too.
Both FrameBuffer and Pixmap should work fine on Android and on the Desktop (and I believe on GWT and iOS, too..)
Be careful to understand what happens on Android when your app loses focus temporarily (OpenGL context loss causes some texture contents to disappear).
Question : CCRenderTexture,GL11ExtensionPack,Libgdx How TO
interpreted as : In libgdx, how to create dynamic texture.
Answer : Use a private render function to draw in a private frame
Example framework:
==================
package com.badlogic.gdx.tests.bullet;
/**
Question : CCRenderTexture,GL11ExtensionPack,Libgdx How TO
interpreted as : In libgdx, how to create dynamic texture?
Answer : Use a private render function to draw in a private frame buffer
convert the frame bufder to Pixmap, create Texture.
Author : Jon Goodwin
**/
import com.badlogic.gdx.graphics.Texture;
import com.badlogic.gdx.graphics.Pixmap;
...//(ctrl-shift-o) to auto-load imports in Eclipse
public class BaseBulletTest extends BulletTest
{
//class variables
=================
public Texture texture = null;//create this
public Array<Disposable> disposables = new Array<Disposable>();
public Pixmap pm = null;
//---------------------------
#Override
public void create ()
{
init();
}
//---------------------------
public static void init ()
{
if(texture == null) texture(Color.BLUE, Color.WHITE);
TextureAttribute ta_tex = TextureAttribute.createDiffuse(texture);
final Material material_box = new Material(ta_tex, ColorAttribute.createSpecular(1, 1, 1, 1),
FloatAttribute.createShininess(8f));
final long attributes1 = Usage.Position | Usage.Normal | Usage.TextureCoordinates;
final Model boxModel = modelBuilder.createBox(1f, 1f, 1f, material_box, attributes1);
...
}
//---------------------------
public Texture texture(Color fg_color, Color bg_color)
{
Pixmap pm = render( fg_color, bg_color );
texture = new Texture(pm);//***here's your new dynamic texture***
disposables.add(texture);//store the texture
}
//---------------------------
public Pixmap render(Color fg_color, Color bg_color)
{
int width = Gdx.graphics.getWidth();
int height = Gdx.graphics.getHeight();
SpriteBatch spriteBatch = new SpriteBatch();
m_fbo = new FrameBuffer(Format.RGB565, (int)(width * m_fboScaler), (int)(height * m_fboScaler), false);
m_fbo.begin();
Gdx.gl.glClearColor(bg_color.r, bg_color.g, bg_color.b, bg_color.a);
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT);
Matrix4 normalProjection = new Matrix4().setToOrtho2D(0, 0, Gdx.graphics.getWidth(), Gdx.graphics.getHeight());
spriteBatch.setProjectionMatrix(normalProjection);
spriteBatch.begin();
spriteBatch.setColor(fg_color);
//do some drawing ***here's where you draw your dynamic texture***
...
spriteBatch.end();//finish write to buffer
pm = ScreenUtils.getFrameBufferPixmap(0, 0, (int) width, (int) height);//write frame buffer to Pixmap
m_fbo.end();
// pm.dispose();
// flipped.dispose();
// tx.dispose();
m_fbo.dispose();
m_fbo = null;
spriteBatch.dispose();
// return texture;
return pm;
}
//---------------------------
}//class BaseBulletTest
//---------------------------
Hi i'm making some App with QCAR & JPCT-AE. can SOMEONE see my source code & advice me? if you can, i'll send my source code to you. my email address is lyhdra99#gmail.com.
Please help me ^^
first
i send modelViewMatrix(QCAR::Matrix44F) from JNI to JAVA
ex) JNIEXPORT jfloatArray JNICALL Java_jp_may_com_VirtualButtonsRenderer_getNowMatrix(JNIEnv* env, jobject obj)
use this modelViewMatrix like below
public class VirtualButtonsRenderer implements GLSurfaceView.Renderer {
public VirtualButtonsRenderer(Activity act) {
Config.maxAnimationSubSequences = 999;
// TODO Auto-generated constructor stub
this.act = act;
_Object3D = Loader.loadMD2(act.getResources().openRawResource(R.raw.tris), 1.0f);
_Object3D.setName("MyTarget");
}
public native float[] getNowMatrix();
public void onSurfaceCreated(GL10 gl, EGLConfig config) {
initRendering();
QCAR.onSurfaceCreated();
world = new World();
world.setAmbientLight(20, 20, 20);
TextureManager tm = TextureManager.getInstance();
com.threed.jpct.Texture Cover = new com.threed.jpct.Texture(BitmapFactory.decodeStream(act.getResources().openRawResource(R.raw.skin)));
tm.addTexture("Cover", Cover);
_Object3D.setTexture("Cover");
world.addObject(_Object3D);
world.buildAllObjects();
sun = new Light(world);
sun.setIntensity(250, 250, 250);
Camera cam = world.getCamera();
cam.moveCamera(Camera.CAMERA_MOVEOUT, 100);
cam.lookAt(_Object3D.getTransformedCenter());
SimpleVector sv = new SimpleVector();
sv.set(_Object3D.getTransformedCenter());
sv.x -= 300;
sv.z -= 0;
sun.setPosition(sv);
MemoryHelper.compact();
}
public void onDrawFrame(GL10 gl) {
if (!mIsActive)
return;
if (renderFrame()) {
Matrix NowMatrix = new Matrix();
NowMatrix.fillDump(getNowMatrix());
world.getCamera().setBack(NowMatrix);
world.renderScene(fb);
world.draw(fb);
fb.display();
return;
} else {
mIsTouch = false;
}
}
}
here, i got problem. i thought Object3D can move like Teapot(QCAR Sample Object) on Marker with modelViewMatrix. but it couldn't.
this is my problem ^^;;
i would like to help you with your app,
please send me your native imagetarget.cpp code,
before that i think you have seen this page ,refer this once
http://www.jpct.net/wiki/index.php/Integrating_JPCT-AE_with_Vuforia
its project source code
https://github.com/sidneibjunior/vuforia-jpct
fetch your modelViewMatrix from renderframe and send it to java , like this
const QCAR::TrackableResult* result = state.getTrackableResult(tIdx);
QCAR::Matrix44F modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(result->getPose());
SampleUtils::rotatePoseMatrix(90.0f, 1.0f, 0, 0, &modelViewMatrix.data[0]);
//inversing the matrix
QCAR::Matrix44F inverseMV = SampleMath::Matrix44FInverse(modelViewMatrix);
//transposing the inverted matrix
QCAR::Matrix44F invTranspMV = SampleMath::Matrix44FTranspose(inverseMV);
send the inverse transpose matrix to java code. It will work fine... i hope :)