Dynamic focus area for android camera - android

I am building an android camera app (not using camera2 api) to take close-range pictures of some objects in the outdoor conditions. The pictures needs to be taken in burst mode i.e. once activated the camera will take say 5 pics continuously and all the pics needs to be well focussed. The user may be moving the camera while taking pics and user will not be able to manually choose the point of focus. The objects are dark in color and sometimes the camera is being over-exposed by bright objects in the camera view.
I know how to set focus area as camera parameters but the position of focus area has to be changed automatically so that its always focussed on the dark regions in the camera view. The position of dark objects is not fixed in camera view and so the app will have to look for dark pixels in every frame before setting focus area.
I am thinking of checking for dark regions in the onPreviewFrame() callback but I am not sure if this is the correct way to do that. Has anyone done this before who can point me in the right direction? For example is there a project which will make android camera focus on a face always using a face detector? I tried to look on internet but could not find any relevant projects.

You have to implement touched focus. Domething like this:
#Override
public boolean onTouchEvent(MotionEvent event) {
if(event.getAction() == MotionEvent.ACTION_DOWN){
float x = event.getX();
float y = event.getY();
float touchMajor = event.getTouchMajor();
float touchMinor = event.getTouchMinor();
Rect touchRect = new Rect(
(int)(x - touchMajor/2),
(int)(y - touchMinor/2),
(int)(x + touchMajor/2),
(int)(y + touchMinor/2));
if (mTouchEventListener != null)
mTouchEventListener.touchFocus(touchRect, false);
}
where touchFocus looks like this:
#TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH)
public void touchFocus(Rect tfocusRect, boolean useInMid) {
if (mCamera == null) return;
try{
mCamera.cancelAutoFocus();
//Convert from View's width and height to +/- 1000
Rect targetFocusRect = (useInMid || sfv == null) ? new Rect() :
new Rect(tfocusRect.left * 2000/sfv.getWidth() - 1000,
tfocusRect.top * 2000/sfv.getHeight() - 1000,
tfocusRect.right * 2000/sfv.getWidth() - 1000,
tfocusRect.bottom * 2000/sfv.getHeight() - 1000);
final List<Camera.Area> focusList = new ArrayList<Camera.Area>();
Camera.Area focusArea = new Camera.Area(targetFocusRect, 1000);
focusList.add(focusArea);
Parameters para = mCamera.getParameters();
O.Log.d(TAG,para.getMaxNumFocusAreas() + ";" + para.getMaxNumMeteringAreas() + " >> " + tfocusRect.toString());
para.setFocusAreas(focusList);
para.setMeteringAreas(focusList);
try{
mCamera.setParameters(para);
}catch(RuntimeException e){
O.Log.e(TAG, "setParameters failed", e);
}
mCamera.autoFocus(myAutoFocusCallback);
// _.setCameraTorch(1);
}catch (Exception e){
O.Log.e(TAG, "Touch Focus Camera Error", e);
}
}
private static AutoFocusCallback myAutoFocusCallback = new AutoFocusCallback() {
#Override
public void onAutoFocus(boolean success, Camera camera) {
// TODO Auto-generated method stub
}
};
for new API read this post http://www.morethantechnical.com/2017/02/28/android-camera2-touch-to-focus/
//Override in your touch-enabled view (this can be differen than the view you use for displaying the cam preview)
#Override
public boolean onTouch(View view, MotionEvent motionEvent) {
final int actionMasked = motionEvent.getActionMasked();
if (actionMasked != MotionEvent.ACTION_DOWN) {
return false;
}
if (mManualFocusEngaged) {
Log.d(TAG, "Manual focus already engaged");
return true;
}
final Rect sensorArraySize = mCameraInfo.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE);
//TODO: here I just flip x,y, but this needs to correspond with the sensor orientation (via SENSOR_ORIENTATION)
final int y = (int)((motionEvent.getX() / (float)view.getWidth()) * (float)sensorArraySize.height());
final int x = (int)((motionEvent.getY() / (float)view.getHeight()) * (float)sensorArraySize.width());
final int halfTouchWidth = 150; //(int)motionEvent.getTouchMajor(); //TODO: this doesn't represent actual touch size in pixel. Values range in [3, 10]...
final int halfTouchHeight = 150; //(int)motionEvent.getTouchMinor();
MeteringRectangle focusAreaTouch = new MeteringRectangle(Math.max(x - halfTouchWidth, 0),
Math.max(y - halfTouchHeight, 0),
halfTouchWidth * 2,
halfTouchHeight * 2,
MeteringRectangle.METERING_WEIGHT_MAX - 1);
CameraCaptureSession.CaptureCallback captureCallbackHandler = new CameraCaptureSession.CaptureCallback() {
#Override
public void onCaptureCompleted(CameraCaptureSession session, CaptureRequest request, TotalCaptureResult result) {
super.onCaptureCompleted(session, request, result);
mManualFocusEngaged = false;
if (request.getTag() == "FOCUS_TAG") {
//the focus trigger is complete -
//resume repeating (preview surface will get frames), clear AF trigger
mPreviewRequestBuilder.set(CaptureRequest.CONTROL_AF_TRIGGER, null);
mCameraOps.setRepeatingRequest(mPreviewRequestBuilder.build(), null, null);
}
}
#Override
public void onCaptureFailed(CameraCaptureSession session, CaptureRequest request, CaptureFailure failure) {
super.onCaptureFailed(session, request, failure);
Log.e(TAG, "Manual AF failure: " + failure);
mManualFocusEngaged = false;
}
};
//first stop the existing repeating request
mCameraOps.stopRepeating();
//cancel any existing AF trigger (repeated touches, etc.)
mPreviewRequestBuilder.set(CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_CANCEL);
mPreviewRequestBuilder.set(CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_OFF);
mCameraOps.capture(mPreviewRequestBuilder.build(), captureCallbackHandler, mBackgroundHandler);
//Now add a new AF trigger with focus region
if (isMeteringAreaAFSupported()) {
mPreviewRequestBuilder.set(CaptureRequest.CONTROL_AF_REGIONS, new MeteringRectangle[]{focusAreaTouch});
}
mPreviewRequestBuilder.set(CaptureRequest.CONTROL_MODE, CameraMetadata.CONTROL_MODE_AUTO);
mPreviewRequestBuilder.set(CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_AUTO);
mPreviewRequestBuilder.set(CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_START);
mPreviewRequestBuilder.setTag("FOCUS_TAG"); //we'll capture this later for resuming the preview
//then we ask for a single request (not repeating!)
mCameraOps.capture(mPreviewRequestBuilder.build(), captureCallbackHandler, mBackgroundHandler);
mManualFocusEngaged = true;
return true;
}
private boolean isMeteringAreaAFSupported() {
return mCameraInfo.get(CameraCharacteristics.CONTROL_MAX_REGIONS_AF) >= 1;
}

Related

Cannot get Camera2 API touch to focus to work

I'm unable to get the touch to focus to work properly on Camera2 API. On touching I just seem to focus for a second and then it becomes extremely blurred. The phone is a Nexus 5X. Here is my code for touch to focus.
private void refocus(MotionEvent event, View view){
//Handler for autofocus callback
CameraCaptureSession.CaptureCallback captureCallbackHandler = new CameraCaptureSession.CaptureCallback() {
#Override
public void onCaptureCompleted(CameraCaptureSession session, CaptureRequest request, TotalCaptureResult result) {
super.onCaptureCompleted(session, request, result);
if (request.getTag() == "FOCUS_TAG") {
//the focus trigger is complete -
//resume repeating (preview surface will get frames), clear AF trigger
previewRequest.set(CaptureRequest.CONTROL_AF_TRIGGER, null);
try{
mSession.setRepeatingRequest(previewRequest.build(), null, null);}
catch (Exception e){
}
}
}
#Override
public void onCaptureFailed(CameraCaptureSession session, CaptureRequest request, CaptureFailure failure) {
super.onCaptureFailed(session, request, failure);
Log.e(TAG, "Manual AF failure: " + failure); }
};
try {
final Rect sensorArraySize = manager.getCameraCharacteristics(mCameraDevice.getId()).get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE);
//Find area size
int x = (int)(event.getX()/(float)view.getWidth() * (float)sensorArraySize.width());
int y = (int)(event.getY()/(float)view.getHeight() * (float)sensorArraySize.height());
final int halfTouchWidth = 150; //(int)motionEvent.getTouchMajor(); //TODO: this doesn't represent actual touch size in pixel. Values range in [3, 10]...
final int halfTouchHeight = 150; //(int)motionEvent.getTouchMinor();
MeteringRectangle rect = new MeteringRectangle(Math.max(x - halfTouchWidth, 0),
Math.max(y - halfTouchHeight, 0),
halfTouchWidth * 2,
halfTouchHeight * 2,
MeteringRectangle.METERING_WEIGHT_MAX - 1);
mSession.stopRepeating();
transparentLayer.drawFeedback(rect);
//Cancel requests
previewRequest.set(CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_CANCEL);
previewRequest.set(CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_OFF);
mSession.capture(previewRequest.build(), captureCallbackHandler, null);
//Now add a new AF trigger with focus region
if (isMeteringAreaAFSupported()) {
previewRequest.set(CaptureRequest.CONTROL_AF_REGIONS, new MeteringRectangle[]{rect});
}
previewRequest.set(CaptureRequest.CONTROL_MODE, CameraMetadata.CONTROL_MODE_AUTO);
previewRequest.set(CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_AUTO);
previewRequest.set(CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_START);
previewRequest.setTag("FOCUS_TAG"); //we'll capture this later for resuming the preview
//then we ask for a single request (not repeating!)
mSession.capture(previewRequest.build(), captureCallbackHandler, null);
}catch (Exception e){
e.printStackTrace();
}
}
Also have another helper function:
private boolean isMeteringAreaAFSupported() {
try {
return manager.getCameraCharacteristics(mCameraDevice.getId()).get(CameraCharacteristics.CONTROL_MAX_REGIONS_AF) >= 1;
}catch (Exception e){
return false;
}
}
What could be the possible reason for the focus working for a brief second, and then restarting, or getting completely blurry? There is no solution that I can find which is helpful.
Thanks all!
I would try setting AF_TRIGGER to IDLE in onCaptureCompleted - removing it entirely isn't totally well-specified.
Beyond that, it's not clear to me how you're converting from the screen touch coordinates to the camera active array coordinates for the metering regions. It looks like you're assuming the coordinates are identical, which isn't true. That shouldn't cause blurriness, but will cause you to focus on a different area than you think.
You need to scale the x and y correctly (based on the current crop region which defines the visible field of view when using digital zoom, and the active array rectangle)

Providing video recording functionality with ARCore

I'm working with this sample (https://github.com/google-ar/arcore-android-sdk/tree/master/samples/hello_ar_java), and I want to provide the functionality to record a video with the AR objects placed.
I tried multiple things but to no avail, is there a recommended way to do it?
Creating a video from an OpenGL surface is a little involved, but is doable. The easiest way to understand I think is to use two EGL surfaces, one for the UI and one for the media encoder. There is a good example of the EGL level calls needed in the Grafika project on GitHub. I used that as starting point to figure out the modifications needed to the HelloAR sample for ARCore. Since there are quite a few changes, I broke it down into steps.
Make changes to support writing to external storage
To save the video, you need to write the video file somewhere accessible, so you need to get this permission.
Declare the permission in the AndroidManifest.xml file:
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE"/>
Then change CameraPermissionHelper.java to request the external storage permission as well as the camera permission. To do this, make an array of the permissions and use that when requesting the permissions and iterate over it when checking the permission state:
private static final String REQUIRED_PERMISSIONS[] = {
Manifest.permission.CAMERA,
Manifest.permission.WRITE_EXTERNAL_STORAGE
};
public static void requestCameraPermission(Activity activity) {
ActivityCompat.requestPermissions(activity, REQUIRED_PERMISSIONS,
CAMERA_PERMISSION_CODE);
}
public static boolean hasCameraPermission(Activity activity) {
for(String p : REQUIRED_PERMISSIONS) {
if (ContextCompat.checkSelfPermission(activity, p) !=
PackageManager.PERMISSION_GRANTED) {
return false;
}
}
return true;
}
public static boolean shouldShowRequestPermissionRationale(Activity activity) {
for(String p : REQUIRED_PERMISSIONS) {
if (ActivityCompat.shouldShowRequestPermissionRationale(activity, p)) {
return true;
}
}
return false;
}
Add recording to HelloARActivity
Add a simple button and text view to the UI at the bottom of activity_main.xml:
<Button
android:id="#+id/fboRecord_button"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_alignStart="#+id/surfaceview"
android:layout_alignTop="#+id/surfaceview"
android:onClick="clickToggleRecording"
android:text="#string/toggleRecordingOn"
tools:ignore="OnClick"/>
<TextView
android:id="#+id/nowRecording_text"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_alignBaseline="#+id/fboRecord_button"
android:layout_alignBottom="#+id/fboRecord_button"
android:layout_toEndOf="#+id/fboRecord_button"
android:text="" />
In HelloARActivity add member variables for recording:
private VideoRecorder mRecorder;
private android.opengl.EGLConfig mAndroidEGLConfig;
Initialize mAndroidEGLConfig in onSurfaceCreated(). We'll use this config object to create the encoder surface.
EGL10 egl10 = (EGL10)EGLContext.getEGL();
javax.microedition.khronos.egl.EGLDisplay display = egl10.eglGetCurrentDisplay();
int v[] = new int[2];
egl10.eglGetConfigAttrib(display,config, EGL10.EGL_CONFIG_ID, v);
EGLDisplay androidDisplay = EGL14.eglGetCurrentDisplay();
int attribs[] = {EGL14.EGL_CONFIG_ID, v[0], EGL14.EGL_NONE};
android.opengl.EGLConfig myConfig[] = new android.opengl.EGLConfig[1];
EGL14.eglChooseConfig(androidDisplay, attribs, 0, myConfig, 0, 1, v, 1);
this.mAndroidEGLConfig = myConfig[0];
Refactor the onDrawFrame() method so all the non-drawing code is executed first, and the actual drawing is done in a method called draw(). This way during recording, we can update the ARCore frame, process the input, then draw to the UI, and draw again to the encoder.
#Override
public void onDrawFrame(GL10 gl) {
if (mSession == null) {
return;
}
// Notify ARCore session that the view size changed so that
// the perspective matrix and
// the video background can be properly adjusted.
mDisplayRotationHelper.updateSessionIfNeeded(mSession);
try {
// Obtain the current frame from ARSession. When the
//configuration is set to
// UpdateMode.BLOCKING (it is by default), this will
// throttle the rendering to the camera framerate.
Frame frame = mSession.update();
Camera camera = frame.getCamera();
// Handle taps. Handling only one tap per frame, as taps are
// usually low frequency compared to frame rate.
MotionEvent tap = mQueuedSingleTaps.poll();
if (tap != null && camera.getTrackingState() == TrackingState.TRACKING) {
for (HitResult hit : frame.hitTest(tap)) {
// Check if any plane was hit, and if it was hit inside the plane polygon
Trackable trackable = hit.getTrackable();
if (trackable instanceof Plane
&& ((Plane) trackable).isPoseInPolygon(hit.getHitPose())) {
// Cap the number of objects created. This avoids overloading both the
// rendering system and ARCore.
if (mAnchors.size() >= 20) {
mAnchors.get(0).detach();
mAnchors.remove(0);
}
// Adding an Anchor tells ARCore that it should track this position in
// space. This anchor is created on the Plane to place the 3d model
// in the correct position relative both to the world and to the plane.
mAnchors.add(hit.createAnchor());
// Hits are sorted by depth. Consider only closest hit on a plane.
break;
}
}
}
// Get projection matrix.
float[] projmtx = new float[16];
camera.getProjectionMatrix(projmtx, 0, 0.1f, 100.0f);
// Get camera matrix and draw.
float[] viewmtx = new float[16];
camera.getViewMatrix(viewmtx, 0);
// Compute lighting from average intensity of the image.
final float lightIntensity = frame.getLightEstimate().getPixelIntensity();
// Visualize tracked points.
PointCloud pointCloud = frame.acquirePointCloud();
mPointCloud.update(pointCloud);
draw(frame,camera.getTrackingState() == TrackingState.PAUSED,
viewmtx, projmtx, camera.getDisplayOrientedPose(),lightIntensity);
if (mRecorder!= null && mRecorder.isRecording()) {
VideoRecorder.CaptureContext ctx = mRecorder.startCapture();
if (ctx != null) {
// draw again
draw(frame, camera.getTrackingState() == TrackingState.PAUSED,
viewmtx, projmtx, camera.getDisplayOrientedPose(), lightIntensity);
// restore the context
mRecorder.stopCapture(ctx, frame.getTimestamp());
}
}
// Application is responsible for releasing the point cloud resources after
// using it.
pointCloud.release();
// Check if we detected at least one plane. If so, hide the loading message.
if (mMessageSnackbar != null) {
for (Plane plane : mSession.getAllTrackables(Plane.class)) {
if (plane.getType() ==
com.google.ar.core.Plane.Type.HORIZONTAL_UPWARD_FACING
&& plane.getTrackingState() == TrackingState.TRACKING) {
hideLoadingMessage();
break;
}
}
}
} catch (Throwable t) {
// Avoid crashing the application due to unhandled exceptions.
Log.e(TAG, "Exception on the OpenGL thread", t);
}
}
private void draw(Frame frame, boolean paused,
float[] viewMatrix, float[] projectionMatrix,
Pose displayOrientedPose, float lightIntensity) {
// Clear screen to notify driver it should not load
// any pixels from previous frame.
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT | GLES20.GL_DEPTH_BUFFER_BIT);
// Draw background.
mBackgroundRenderer.draw(frame);
// If not tracking, don't draw 3d objects.
if (paused) {
return;
}
mPointCloud.draw(viewMatrix, projectionMatrix);
// Visualize planes.
mPlaneRenderer.drawPlanes(
mSession.getAllTrackables(Plane.class),
displayOrientedPose, projectionMatrix);
// Visualize anchors created by touch.
float scaleFactor = 1.0f;
for (Anchor anchor : mAnchors) {
if (anchor.getTrackingState() != TrackingState.TRACKING) {
continue;
}
// Get the current pose of an Anchor in world space.
// The Anchor pose is
// updated during calls to session.update() as ARCore refines
// its estimate of the world.
anchor.getPose().toMatrix(mAnchorMatrix, 0);
// Update and draw the model and its shadow.
mVirtualObject.updateModelMatrix(mAnchorMatrix, scaleFactor);
mVirtualObjectShadow.updateModelMatrix(mAnchorMatrix, scaleFactor);
mVirtualObject.draw(viewMatrix, projectionMatrix, lightIntensity);
mVirtualObjectShadow.draw(viewMatrix, projectionMatrix, lightIntensity);
}
}
Handle the toggling of recording:
public void clickToggleRecording(View view) {
Log.d(TAG, "clickToggleRecording");
if (mRecorder == null) {
File outputFile = new File(Environment.getExternalStoragePublicDirectory(
Environment.DIRECTORY_PICTURES) + "/HelloAR",
"fbo-gl-" + Long.toHexString(System.currentTimeMillis()) + ".mp4");
File dir = outputFile.getParentFile();
if (!dir.exists()) {
dir.mkdirs();
}
try {
mRecorder = new VideoRecorder(mSurfaceView.getWidth(),
mSurfaceView.getHeight(),
VideoRecorder.DEFAULT_BITRATE, outputFile, this);
mRecorder.setEglConfig(mAndroidEGLConfig);
} catch (IOException e) {
Log.e(TAG,"Exception starting recording", e);
}
}
mRecorder.toggleRecording();
updateControls();
}
private void updateControls() {
Button toggleRelease = findViewById(R.id.fboRecord_button);
int id = (mRecorder != null && mRecorder.isRecording()) ?
R.string.toggleRecordingOff : R.string.toggleRecordingOn;
toggleRelease.setText(id);
TextView tv = findViewById(R.id.nowRecording_text);
if (id == R.string.toggleRecordingOff) {
tv.setText(getString(R.string.nowRecording));
} else {
tv.setText("");
}
}
Add a listener interface to receive video recording state changes:
#Override
public void onVideoRecorderEvent(VideoRecorder.VideoEvent videoEvent) {
Log.d(TAG, "VideoEvent: " + videoEvent);
updateControls();
if (videoEvent == VideoRecorder.VideoEvent.RecordingStopped) {
mRecorder = null;
}
}
Implement the VideoRecorder class to feed images to the encoder
The VideoRecorder class is used to feed the images to the media encoder. This class creates an off screen EGLSurface using the input surface of the media encoder. The general approach is during recording draw once for the UI display, and then make the same exact draw call for the media encoder surface.
The constructor takes recording parameters and a listener to push events to during the recording process.
public VideoRecorder(int width, int height, int bitrate, File outputFile,
VideoRecorderListener listener) throws IOException {
this.listener = listener;
mEncoderCore = new VideoEncoderCore(width, height, bitrate, outputFile);
mVideoRect = new Rect(0,0,width,height);
}
When recording starts, we need to create a new EGL surface for the encoder. Then notify the encoder that a new frame is available, make the encoder surface the current EGL surface, and return so the caller can make the drawing calls.
public CaptureContext startCapture() {
if (mVideoEncoder == null) {
return null;
}
if (mEncoderContext == null) {
mEncoderContext = new CaptureContext();
mEncoderContext.windowDisplay = EGL14.eglGetCurrentDisplay();
// Create a window surface, and attach it to the Surface we received.
int[] surfaceAttribs = {
EGL14.EGL_NONE
};
mEncoderContext.windowDrawSurface = EGL14.eglCreateWindowSurface(
mEncoderContext.windowDisplay,
mEGLConfig,mEncoderCore.getInputSurface(),
surfaceAttribs, 0);
mEncoderContext.windowReadSurface = mEncoderContext.windowDrawSurface;
}
CaptureContext displayContext = new CaptureContext();
displayContext.initialize();
// Draw for recording, swap.
mVideoEncoder.frameAvailableSoon();
// Make the input surface current
// mInputWindowSurface.makeCurrent();
EGL14.eglMakeCurrent(mEncoderContext.windowDisplay,
mEncoderContext.windowDrawSurface, mEncoderContext.windowReadSurface,
EGL14.eglGetCurrentContext());
// If we don't set the scissor rect, the glClear() we use to draw the
// light-grey background will draw outside the viewport and muck up our
// letterboxing. Might be better if we disabled the test immediately after
// the glClear(). Of course, if we were clearing the frame background to
// black it wouldn't matter.
//
// We do still need to clear the pixels outside the scissor rect, of course,
// or we'll get garbage at the edges of the recording. We can either clear
// the whole thing and accept that there will be a lot of overdraw, or we
// can issue multiple scissor/clear calls. Some GPUs may have a special
// optimization for zeroing out the color buffer.
//
// For now, be lazy and zero the whole thing. At some point we need to
// examine the performance here.
GLES20.glClearColor(0f, 0f, 0f, 1f);
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
GLES20.glViewport(mVideoRect.left, mVideoRect.top,
mVideoRect.width(), mVideoRect.height());
GLES20.glEnable(GLES20.GL_SCISSOR_TEST);
GLES20.glScissor(mVideoRect.left, mVideoRect.top,
mVideoRect.width(), mVideoRect.height());
return displayContext;
}
When the drawing is completed, the EGLContext needs to be restored back to the UI surface:
public void stopCapture(CaptureContext oldContext, long timeStampNanos) {
if (oldContext == null) {
return;
}
GLES20.glDisable(GLES20.GL_SCISSOR_TEST);
EGLExt.eglPresentationTimeANDROID(mEncoderContext.windowDisplay,
mEncoderContext.windowDrawSurface, timeStampNanos);
EGL14.eglSwapBuffers(mEncoderContext.windowDisplay,
mEncoderContext.windowDrawSurface);
// Restore.
GLES20.glViewport(0, 0, oldContext.getWidth(), oldContext.getHeight());
EGL14.eglMakeCurrent(oldContext.windowDisplay,
oldContext.windowDrawSurface, oldContext.windowReadSurface,
EGL14.eglGetCurrentContext());
}
Add some bookkeeping methods
public boolean isRecording() {
return mRecording;
}
public void toggleRecording() {
if (isRecording()) {
stopRecording();
} else {
startRecording();
}
}
protected void startRecording() {
mRecording = true;
if (mVideoEncoder == null) {
mVideoEncoder = new TextureMovieEncoder2(mEncoderCore);
}
if (listener != null) {
listener.onVideoRecorderEvent(VideoEvent.RecordingStarted);
}
}
protected void stopRecording() {
mRecording = false;
if (mVideoEncoder != null) {
mVideoEncoder.stopRecording();
}
if (listener != null) {
listener.onVideoRecorderEvent(VideoEvent.RecordingStopped);
}
}
public void setEglConfig(EGLConfig eglConfig) {
this.mEGLConfig = eglConfig;
}
public enum VideoEvent {
RecordingStarted,
RecordingStopped
}
public interface VideoRecorderListener {
void onVideoRecorderEvent(VideoEvent videoEvent);
}
The inner class for the CaptureContext keeps track of the display and surfaces in order to easily handle multiple surfaces being used with the EGL context:
public static class CaptureContext {
EGLDisplay windowDisplay;
EGLSurface windowReadSurface;
EGLSurface windowDrawSurface;
private int mWidth;
private int mHeight;
public void initialize() {
windowDisplay = EGL14.eglGetCurrentDisplay();
windowReadSurface = EGL14.eglGetCurrentSurface(EGL14.EGL_DRAW);
windowDrawSurface = EGL14.eglGetCurrentSurface(EGL14.EGL_READ);
int v[] = new int[1];
EGL14.eglQuerySurface(windowDisplay, windowDrawSurface, EGL14.EGL_WIDTH,
v, 0);
mWidth = v[0];
v[0] = -1;
EGL14.eglQuerySurface(windowDisplay, windowDrawSurface, EGL14.EGL_HEIGHT,
v, 0);
mHeight = v[0];
}
/**
* Returns the surface's width, in pixels.
* <p>
* If this is called on a window surface, and the underlying
* surface is in the process
* of changing size, we may not see the new size right away
* (e.g. in the "surfaceChanged"
* callback). The size should match after the next buffer swap.
*/
public int getWidth() {
if (mWidth < 0) {
int v[] = new int[1];
EGL14.eglQuerySurface(windowDisplay,
windowDrawSurface, EGL14.EGL_WIDTH, v, 0);
mWidth = v[0];
}
return mWidth;
}
/**
* Returns the surface's height, in pixels.
*/
public int getHeight() {
if (mHeight < 0) {
int v[] = new int[1];
EGL14.eglQuerySurface(windowDisplay, windowDrawSurface,
EGL14.EGL_HEIGHT, v, 0);
mHeight = v[0];
}
return mHeight;
}
}
Add VideoEncoder classes
The VideoEncoderCore class is copied from Grafika, as well as the TextureMovieEncoder2 class.

Android pinch zoom on Camera delay

I have a custom surfaceview for my CameraPreview in my app, and I am trying to implement Pinch zoom, by implementing these two methods:
#Override
public boolean onTouchEvent(MotionEvent event) {
Camera camera = getCamera();
if (camera == null) {
return true;
}
Camera.Parameters params = camera.getParameters();
int action = event.getAction();
if (event.getPointerCount() > 1) {
if (action == MotionEvent.ACTION_POINTER_DOWN) {
MCLog.v(TAG, "Single ");
mDist = getFingerSpacing(event);
MCLog.w(TAG, "Original distance " + mDist);
} else if (action == MotionEvent.ACTION_MOVE && params.isZoomSupported()) {
camera.cancelAutoFocus();
handleZoom(event, params);
}
} else {
if (action == MotionEvent.ACTION_UP) {
mFirstTime = false;
handleFocus(event, params);
}
}
return true;
}
private void handleZoom(MotionEvent event, Camera.Parameters params) {
if(mFirstTime) {
mDist = getFingerSpacing(event);
mFirstTime = false;
return;
}
List<Integer> zoomRatios = params.getZoomRatios();
int maxZoom = params.getMaxZoom();
int zoom = params.getZoom();
double spacing = getFingerSpacing(event);
MCLog.w(TAG, String.format("Old zoom is: %s", zoom));
//Percentage of displacement
MCLog.w(TAG, String.format("Original distance is: %s, new displacement is %s", mDist, spacing));
double percentage = (mDist + spacing)/mDist;
if(mDist > spacing)
{
percentage *= -1;
}
MCLog.w(TAG, String.format("Percentage is: %s", percentage));
zoom = new Double(zoom + percentage).intValue();
MCLog.w(TAG, String.format("New zoom is: %s", zoom));
if (zoom > maxZoom) {
zoom = maxZoom;
}
if (zoom < 0) {
zoom = 0;
}
mDist = spacing;
params.setZoom(zoom);
if (mZoomListener != null) {
mZoomListener.onZoomChanged(zoomRatios.get(zoom));
}
getCamera().setParameters(params);
}
This seems to be working, however the zoom has some slight delay that gets longer the more I zoom into the image. Like I would stop pinching and the image would still keep zooming in.
I couldnt find any implementation for pinch zoom in the camera besides this one, so maybe this is doing something wrong.
Since you're seeing the logging continue after you lift your finger, that probably means you're not processing your touch event queue fast enough.
That setProperties call is not particularly fast.
So you'll need to rate-limit somehow, and drop touch events that you don't have time to handle. There are many options of varying kinds of tradeoffs.
I'm not very familiar with the input APIs, so not sure if there's some parameter you can just tweak to reduce the rate of calls - maybe just don't do anything unless the change in zoom is above dinner threshold, and then increase the threshold until the zooming doesn't lag?
Or you can send the zoom calls to another thread to actually invoke setParameters, and just drop a zoom call to the floor if that thread if already busy processing a previous call.
Or better, have a 'nextZoom' parameter that your zoom setting thread looks at once it finishes its prior call, and then just have the touch event handler update nextZoom on each invocation. The zoom setting thread then always checks if the value has changed once it finishes the last set call, and if so, sets it again.
Then you'll always get the newest zoom level, and they won't pile up either.

Camera PreviewSize Automatically Change when takePicture

I'm using getOptimalPreviewSize() method on SurfaceChanged method. After using it when I click take picture button PreviewSize return first size and come back.
It have to stay same preview size. Why is it change?
Here Codes:
PictureCallback myPictureCallback_JPG = new PictureCallback(){
#Override
public void onPictureTaken(byte[] data, Camera camera) {
File pictureFile = getOutputMediaFile();
if (pictureFile == null){
return;
}
try {
FileOutputStream fos = new FileOutputStream(pictureFile);
fos.write(data);
fos.close();
} catch (FileNotFoundException e) {
Log.d("Method.PictureCallBack", "File not found: " + e.getMessage());
} catch (IOException e) {
Log.d("Method.PictureCallBack", "Error accessing file: " + e.getMessage());
}
camera.startPreview();
}
};
ShutterCallback myShutterCallback = new ShutterCallback(){
#Override
public void onShutter() {}
};
PictureCallback myPictureCallback_RAW = new PictureCallback(){
#Override
public void onPictureTaken(byte[] arg0, Camera arg1) {}
};
public void takePicture() {
mCamera.takePicture(myShutterCallback, myPictureCallback_RAW, myPictureCallback_JPG);
}
Preview size and picture sizes are different. Use getPreviewSize() to get the dimensions of the preview on screen. And using getPictureSize() you can get the exact dimensions of the picture that will be taken when you call takePicture.
I know this may be a late answer, but anyways, I wanted to share my solution to this problem.
All you need to do in order to solve this problem is the following:
Get all supported picture sizes using the Camera.Params.getSupportedPictureSizes() method.
Find the most acceptable sizes and store them in a temporary container(ArrayList will work just fine)
Find the size you want to use, and actually provide the camera with the size you want to use, trought the Camera.Params.setPictureSize(width, height)
This is not really a fourth step, it's rather an addition to the 3rd one.
In most cases your camera will have the incorrect orientation set by default, so will the image if not adjusted beforehand.
So, I would strongly suggest you to use the Camera.Params.setRotation(angle)
for the picture orientation change, and the Camera.Params.setDisplayOrientation(angle) for the preview orientation change.
And here is the code that's gonna do the thing with the picture size:
public static final float CAMERA_SIZE_RATIO_CALIBRATION = 0.1f;
private Camera.Size findTheBestPictureSize() {
List<Camera.Size> supportedSizes = mCamera.getParams().getSupportedPictureSizes();
ArrayList<Camera.Size> acceptableSizes = new ArrayList<Camera.Size>();
Camera.Size foundSize = null;
Camera.Size tmpSize = null;
float desiredRatio = (mDisplaySize[0] * 1f / mDisplaySize[1]);
float calculatedRatio;
float deltaRatio = 0f;
//Looking for the most acceptable sizes
int itemCount = supportedSizes.size();
for(int i = 0; i < itemCount; i++) {
tmpSize = supportedSizes.get(i);
calculatedRatio = (shouldSizeDimensionsBeFlipped ? (tmpSize.height * 1f / tmpSize.width)
: (tmpSize.width * 1f / tmpSize.height));
deltaRatio = Math.abs(calculatedRatio - desiredRatio);
if(deltaRatio <= CAMERA_SIZE_RATIO_CALIBRATION) {
acceptableSizes.add(tmpSize);
}
}
//Looking for the greatest acceptable size
itemCount = acceptableSizes.size();
for(int i = 0; i < itemCount; i++) {
tmpSize = acceptableSizes.get(i);
if(foundSize == null) {
foundSize = tmpSize;
continue;
}
if(tmpSize.width > foundSize.width && tmpSize.height > foundSize.height) {
foundSize = tmpSize;
}
}
return foundSize;
}
Where the shouldSizeDimensionsBeFlipped is a boolean value which determines whether we need to consider the fetched from the camera sizes as flipped(where width is taken as height, and height as width) or not(we will only need to do so, if the default Camera orientation is everything but 0 and 270)
I hope this is going to help someone who's having the same problem :)

AndEngine Chase Camera Not Following Body

I'm having a hard time making the chase camera follow the a car body. I am tinkering the Racer Game example project. The Tile Map is 1024 x 786 and the Camera is set to chase the Car Body. Here's the code:
#Override
public Scene onCreateScene() {
this.mEngine.registerUpdateHandler(new FPSLogger());
this.mScene = new Scene();
//this.mScene.setBackground(new Background(0, 0, 0));
/** Tiled Map Test **/
try {
final TMXLoader tmxLoader = new TMXLoader(this.getAssets(), this.mEngine.getTextureManager(), TextureOptions.BILINEAR_PREMULTIPLYALPHA,
this.getVertexBufferObjectManager(), new ITMXTilePropertiesListener() {
#Override
public void onTMXTileWithPropertiesCreated(final TMXTiledMap pTMXTiledMap, final TMXLayer pTMXLayer, final TMXTile pTMXTile,
final TMXProperties<TMXTileProperty> pTMXTileProperties) {
/* We are going to count the tiles that have the property "box=true" or "boxBool=true" set. */
if(pTMXTileProperties.containsTMXProperty("box", "true")) {
SpeedsterGameActivity.this.numBoxes++;
}
}
});
// Load the TMX file into an Object
this.mTMXTiledMap = tmxLoader.loadFromAsset("tmx/level3.tmx");
this.runOnUiThread(new Runnable() {
#Override
public void run() {
Toast.makeText( SpeedsterGameActivity.this, "Box count in this TMXTiledMap: " + SpeedsterGameActivity.this.numBoxes, Toast.LENGTH_LONG).show();
}
});
} catch (final TMXLoadException e) {
Debug.e(e);
}
// Get the first TMX Layer and add it to the scene
final TMXLayer tmxLayer = this.mTMXTiledMap.getTMXLayers().get(0);
this.mScene.attachChild(tmxLayer);
/* Make the camera not exceed the bounds of the TMXEntity. */
this.mBoundChaseCamera.setBounds(0, 0, tmxLayer.getHeight(), tmxLayer.getWidth());
this.mBoundChaseCamera.setBoundsEnabled(true);
/* Debugging stuff */
Debug.i( "Game Info", "Height & Width: " + tmxLayer.getHeight() + " x " + tmxLayer.getWidth() );
int[] maxTextureSize = new int[1];
GLES20.glGetIntegerv( GLES20.GL_MAX_TEXTURE_SIZE, maxTextureSize, 0);
Debug.i("Game Info", "Max texture size = " + maxTextureSize[0]);
/**********/
/* Calculate the coordinates for the face, so its centered on the camera. */
final float centerX = (CAMERA_WIDTH - this.mVehiclesTextureRegion.getWidth()) / 2;
final float centerY = (CAMERA_HEIGHT - this.mVehiclesTextureRegion.getHeight()) / 2;
/* Create the sprite and add it to the scene. */
final AnimatedSprite player = new AnimatedSprite(centerX, centerY, this.mVehiclesTextureRegion, this.getVertexBufferObjectManager());
this.mBoundChaseCamera.setChaseEntity(player);
/********************/
this.mPhysicsWorld = new FixedStepPhysicsWorld(30, new Vector2(0, 0), false, 8, 1);
//this.initRacetrack();
//this.initRacetrackBorders();
this.initCar();
this.initObstacles();
this.initOnScreenControls();
this.mScene.registerUpdateHandler(this.mPhysicsWorld);
}
A possible cause of the problem is that your camera size is 1024x786 too, therefore the full camera rectangle is shown and since you enabled bounds, the camera does not follow the car.
Omit the line this.mBoundChaseCamera.setBoundsEnabled(true);.
Another problem is - the camera follows the player object to which you lose the reference once onCreateScene finishes executing. You are not connecting the player object to a physics body using PhysicsConnector class, so it has no reason to move.
Otherwise if the car body & entity are created in initCarmethod, you are not setting the car as the chase entity.

Categories

Resources