Android camera2 output to ImageReader format YUV_420_888 still slow - android

I'm trying to get the Android camera2 running in the background service, then process the frame in the callback ImageReader.OnImageAvailableListener. I already use the suggested raw format YUV_420_888 to get max fps, however I only get around 7fps on the resolution 640x480. This is even slower than what I get using the old Camera interface( I want to upgrade to Camera2 to get higher fps ) or with the OpenCV JavaCameraView( I can't use this because I need to run processing in the background service ).
Below is my service class. What am I missing?
My phone is Redmi Note 3 running Android 5.0.2
public class Camera2ServiceYUV extends Service {
protected static final String TAG = "VideoProcessing";
protected static final int CAMERACHOICE = CameraCharacteristics.LENS_FACING_BACK;
protected CameraDevice cameraDevice;
protected CameraCaptureSession captureSession;
protected ImageReader imageReader;
// A semaphore to prevent the app from exiting before closing the camera.
private Semaphore mCameraOpenCloseLock = new Semaphore(1);
public static final String RESULT_RECEIVER = "resultReceiver";
private static final int JPEG_COMPRESSION = 90;
public static final int RESULT_OK = 0;
public static final int RESULT_DEVICE_NO_CAMERA= 1;
public static final int RESULT_GET_CAMERA_FAILED = 2;
public static final int RESULT_ALREADY_RUNNING = 3;
public static final int RESULT_NOT_RUNNING = 4;
private static final String START_SERVICE_COMMAND = "startServiceCommands";
private static final int COMMAND_NONE = -1;
private static final int COMMAND_START = 0;
private static final int COMMAND_STOP = 1;
private boolean mRunning = false;
public Camera2ServiceYUV() {
}
public static void startToStart(Context context, ResultReceiver resultReceiver) {
Intent intent = new Intent(context, Camera2ServiceYUV.class);
intent.putExtra(START_SERVICE_COMMAND, COMMAND_START);
intent.putExtra(RESULT_RECEIVER, resultReceiver);
context.startService(intent);
}
public static void startToStop(Context context, ResultReceiver resultReceiver) {
Intent intent = new Intent(context, Camera2ServiceYUV.class);
intent.putExtra(START_SERVICE_COMMAND, COMMAND_STOP);
intent.putExtra(RESULT_RECEIVER, resultReceiver);
context.startService(intent);
}
// SERVICE INTERFACE
#Override
public int onStartCommand(Intent intent, int flags, int startId) {
switch (intent.getIntExtra(START_SERVICE_COMMAND, COMMAND_NONE)) {
case COMMAND_START:
startCamera(intent);
break;
case COMMAND_STOP:
stopCamera(intent);
break;
default:
throw new UnsupportedOperationException("Cannot start the camera service with an illegal command.");
}
return START_STICKY;
}
#Override
public void onDestroy() {
try {
captureSession.abortCaptures();
} catch (CameraAccessException e) {
Log.e(TAG, e.getMessage());
}
captureSession.close();
}
#Override
public IBinder onBind(Intent intent) {
return null;
}
// CAMERA2 INTERFACE
/**
* 1. The android CameraManager class is used to manage all the camera devices in our android device
* Each camera device has a range of properties and settings that describe the device.
* It can be obtained through the camera characteristics.
*/
public void startCamera(Intent intent) {
final ResultReceiver resultReceiver = intent.getParcelableExtra(RESULT_RECEIVER);
if (mRunning) {
resultReceiver.send(RESULT_ALREADY_RUNNING, null);
return;
}
mRunning = true;
CameraManager manager = (CameraManager) getSystemService(CAMERA_SERVICE);
try {
if (!mCameraOpenCloseLock.tryAcquire(2500, TimeUnit.MILLISECONDS)) {
throw new RuntimeException("Time out waiting to lock camera opening.");
}
String pickedCamera = getCamera(manager);
Log.e(TAG,"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA " + pickedCamera);
manager.openCamera(pickedCamera, cameraStateCallback, null);
CameraCharacteristics characteristics = manager.getCameraCharacteristics(pickedCamera);
Size[] jpegSizes = null;
if (characteristics != null) {
jpegSizes = characteristics.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP).getOutputSizes(ImageFormat.YUV_420_888);
}
int width = 640;
int height = 480;
// if (jpegSizes != null && 0 < jpegSizes.length) {
// width = jpegSizes[jpegSizes.length -1].getWidth();
// height = jpegSizes[jpegSizes.length - 1].getHeight();
// }
// for(Size s : jpegSizes)
// {
// Log.e(TAG,"Size = " + s.toString());
// }
// DEBUG
StreamConfigurationMap map = characteristics.get(
CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);
if (map == null) {
return;
}
Log.e(TAG,"Width = " + width + ", Height = " + height);
Log.e(TAG,"output stall duration = " + map.getOutputStallDuration(ImageFormat.YUV_420_888, new Size(width,height)) );
Log.e(TAG,"Min output stall duration = " + map.getOutputMinFrameDuration(ImageFormat.YUV_420_888, new Size(width,height)) );
// Size[] sizeList = map.getInputSizes(ImageFormat.YUV_420_888);
// for(Size s : sizeList)
// {
// Log.e(TAG,"Size = " + s.toString());
// }
imageReader = ImageReader.newInstance(width, height, ImageFormat.YUV_420_888, 2 /* images buffered */);
imageReader.setOnImageAvailableListener(onImageAvailableListener, null);
Log.i(TAG, "imageReader created");
} catch (CameraAccessException e) {
Log.e(TAG, e.getMessage());
resultReceiver.send(RESULT_DEVICE_NO_CAMERA, null);
}catch (InterruptedException e) {
resultReceiver.send(RESULT_GET_CAMERA_FAILED, null);
throw new RuntimeException("Interrupted while trying to lock camera opening.", e);
}
catch(SecurityException se)
{
resultReceiver.send(RESULT_GET_CAMERA_FAILED, null);
throw new RuntimeException("Security permission exception while trying to open the camera.", se);
}
resultReceiver.send(RESULT_OK, null);
}
// We can pick the camera being used, i.e. rear camera in this case.
private String getCamera(CameraManager manager) {
try {
for (String cameraId : manager.getCameraIdList()) {
CameraCharacteristics characteristics = manager.getCameraCharacteristics(cameraId);
int cOrientation = characteristics.get(CameraCharacteristics.LENS_FACING);
if (cOrientation == CAMERACHOICE) {
return cameraId;
}
}
} catch (CameraAccessException e) {
e.printStackTrace();
}
return null;
}
/**
* 1.1 Callbacks when the camera changes its state - opened, disconnected, or error.
*/
protected CameraDevice.StateCallback cameraStateCallback = new CameraDevice.StateCallback() {
#Override
public void onOpened(#NonNull CameraDevice camera) {
Log.i(TAG, "CameraDevice.StateCallback onOpened");
mCameraOpenCloseLock.release();
cameraDevice = camera;
createCaptureSession();
}
#Override
public void onDisconnected(#NonNull CameraDevice camera) {
Log.w(TAG, "CameraDevice.StateCallback onDisconnected");
mCameraOpenCloseLock.release();
camera.close();
cameraDevice = null;
}
#Override
public void onError(#NonNull CameraDevice camera, int error) {
Log.e(TAG, "CameraDevice.StateCallback onError " + error);
mCameraOpenCloseLock.release();
camera.close();
cameraDevice = null;
}
};
/**
* 2. To capture or stream images from a camera device, the application must first create
* a camera capture captureSession.
* The camera capture needs a surface to output what has been captured, in this case
* we use ImageReader in order to access the frame data.
*/
public void createCaptureSession() {
try {
cameraDevice.createCaptureSession(Arrays.asList(imageReader.getSurface()), sessionStateCallback, null);
} catch (CameraAccessException e) {
Log.e(TAG, e.getMessage());
}
}
protected CameraCaptureSession.StateCallback sessionStateCallback = new CameraCaptureSession.StateCallback() {
#Override
public void onConfigured(#NonNull CameraCaptureSession session) {
Log.i(TAG, "CameraCaptureSession.StateCallback onConfigured");
// The camera is already closed
if (null == cameraDevice) {
return;
}
// When the captureSession is ready, we start to grab the frame.
Camera2ServiceYUV.this.captureSession = session;
try {
session.setRepeatingRequest(createCaptureRequest(), null, null);
} catch (CameraAccessException e) {
Log.e(TAG, e.getMessage());
}
}
#Override
public void onConfigureFailed(#NonNull CameraCaptureSession session) {
Log.e(TAG, "CameraCaptureSession.StateCallback onConfigureFailed");
}
};
/**
* 3. The application then needs to construct a CaptureRequest, which defines all the capture parameters
* needed by a camera device to capture a single image.
*/
private CaptureRequest createCaptureRequest() {
try {
/**
* Check other templates for further details.
* TEMPLATE_MANUAL = 6
* TEMPLATE_PREVIEW = 1
* TEMPLATE_RECORD = 3
* TEMPLATE_STILL_CAPTURE = 2
* TEMPLATE_VIDEO_SNAPSHOT = 4
* TEMPLATE_ZERO_SHUTTER_LAG = 5
*
* TODO: can set camera features like auto focus, auto flash here
* captureRequestBuilder.set(CaptureRequest.CONTROL_AF_MODE,CaptureRequest.CONTROL_AF_MODE_CONTINUOUS_PICTURE);
*/
CaptureRequest.Builder captureRequestBuilder = cameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_RECORD);
// captureRequestBuilder.set(CaptureRequest.EDGE_MODE,
// CaptureRequest.EDGE_MODE_OFF);
// captureRequestBuilder.set(
// CaptureRequest.LENS_OPTICAL_STABILIZATION_MODE,
// CaptureRequest.LENS_OPTICAL_STABILIZATION_MODE_ON);
// captureRequestBuilder.set(
// CaptureRequest.COLOR_CORRECTION_ABERRATION_MODE,
// CaptureRequest.COLOR_CORRECTION_ABERRATION_MODE_OFF);
// captureRequestBuilder.set(CaptureRequest.NOISE_REDUCTION_MODE,
// CaptureRequest.NOISE_REDUCTION_MODE_OFF);
// captureRequestBuilder.set(CaptureRequest.CONTROL_AF_TRIGGER,
// CaptureRequest.CONTROL_AF_TRIGGER_CANCEL);
//
// captureRequestBuilder.set(CaptureRequest.CONTROL_AE_LOCK, true);
// captureRequestBuilder.set(CaptureRequest.CONTROL_AWB_LOCK, true);
captureRequestBuilder.addTarget(imageReader.getSurface());
return captureRequestBuilder.build();
} catch (CameraAccessException e) {
Log.e(TAG, e.getMessage());
return null;
}
}
/**
* ImageReader provides a surface for the camera to output what has been captured.
* Upon the image available, call processImage() to process the image as desired.
*/
private long frameTime = 0;
private ImageReader.OnImageAvailableListener onImageAvailableListener = new ImageReader.OnImageAvailableListener() {
#Override
public void onImageAvailable(ImageReader reader) {
Log.i(TAG, "called ImageReader.OnImageAvailable");
Image img = reader.acquireLatestImage();
if (img != null) {
if( frameTime != 0 )
{
Log.e(TAG, "fps = " + (float)(1000.0 / (float)(SystemClock.elapsedRealtime() - frameTime)) + " fps");
}
frameTime = SystemClock.elapsedRealtime();
img.close();
}
}
};
private void processImage(Image image) {
Mat outputImage = imageToMat(image);
Bitmap bmp = Bitmap.createBitmap(outputImage.cols(), outputImage.rows(), Bitmap.Config.ARGB_8888);
Utils.bitmapToMat(bmp, outputImage);
Point mid = new Point(0, 0);
Point inEnd = new Point(outputImage.cols(), outputImage.rows());
Imgproc.line(outputImage, mid, inEnd, new Scalar(255, 0, 0), 2, Core.LINE_AA, 0);
Utils.matToBitmap(outputImage, bmp);
Intent broadcast = new Intent();
broadcast.setAction("your_load_photo_action");
broadcast.putExtra("BitmapImage", bmp);
sendBroadcast(broadcast);
}
private Mat imageToMat(Image image) {
ByteBuffer buffer;
int rowStride;
int pixelStride;
int width = image.getWidth();
int height = image.getHeight();
int offset = 0;
Image.Plane[] planes = image.getPlanes();
byte[] data = new byte[image.getWidth() * image.getHeight() * ImageFormat.getBitsPerPixel(ImageFormat.YUV_420_888) / 8];
byte[] rowData = new byte[planes[0].getRowStride()];
for (int i = 0; i < planes.length; i++) {
buffer = planes[i].getBuffer();
rowStride = planes[i].getRowStride();
pixelStride = planes[i].getPixelStride();
int w = (i == 0) ? width : width / 2;
int h = (i == 0) ? height : height / 2;
for (int row = 0; row < h; row++) {
int bytesPerPixel = ImageFormat.getBitsPerPixel(ImageFormat.YUV_420_888) / 8;
if (pixelStride == bytesPerPixel) {
int length = w * bytesPerPixel;
buffer.get(data, offset, length);
// Advance buffer the remainder of the row stride, unless on the last row.
// Otherwise, this will throw an IllegalArgumentException because the buffer
// doesn't include the last padding.
if (h - row != 1) {
buffer.position(buffer.position() + rowStride - length);
}
offset += length;
} else {
// On the last row only read the width of the image minus the pixel stride
// plus one. Otherwise, this will throw a BufferUnderflowException because the
// buffer doesn't include the last padding.
if (h - row == 1) {
buffer.get(rowData, 0, width - pixelStride + 1);
} else {
buffer.get(rowData, 0, rowStride);
}
for (int col = 0; col < w; col++) {
data[offset++] = rowData[col * pixelStride];
}
}
}
}
// Finally, create the Mat.
Mat mat = new Mat(height + height / 2, width, CV_8UC1);
mat.put(0, 0, data);
return mat;
}
private void stopCamera(Intent intent) {
ResultReceiver resultReceiver = intent.getParcelableExtra(RESULT_RECEIVER);
if (!mRunning) {
resultReceiver.send(RESULT_NOT_RUNNING, null);
return;
}
closeCamera();
resultReceiver.send(RESULT_OK, null);
mRunning = false;
Log.d(TAG, "Service is finished.");
}
/**
* Closes the current {#link CameraDevice}.
*/
private void closeCamera() {
try {
mCameraOpenCloseLock.acquire();
if (null != captureSession) {
captureSession.close();
captureSession = null;
}
if (null != cameraDevice) {
cameraDevice.close();
cameraDevice = null;
}
if (null != imageReader) {
imageReader.close();
imageReader = null;
}
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted while trying to lock camera closing.", e);
} finally {
mCameraOpenCloseLock.release();
}
}
}

I bumped into this problem recently when I try to upgrade my AR app from camera1 to camera2 API, I used a mid-range device for testing (Meizu S6) which has Exynos 7872 CPU and Mali-G71 GPU. What I want to achieve is a steady 30fps AR experience.
But through the migration I found that its quite tricky to get a decent preview frame rate using Camera2 API.
I configured my capture request using TEMPLATE_PREVIEW
mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW);
Then I Put 2 surfaces, one for preview which is a surfaceTexture at size 1280x720,
another ImageReader at size 1280x720 for image processing.
mImageReader = ImageReader.newInstance(
mVideoSize.getWidth(),
mVideoSize.getHeight(),
ImageFormat.YUV_420_888,
2);
List<Surface> surfaces =new ArrayList<>();
Surface previewSurface = new Surface(mSurfaceTexture);
surfaces.add(previewSurface);
mPreviewBuilder.addTarget(previewSurface);
Surface frameCaptureSurface = mImageReader.getSurface();
surfaces.add(frameCaptureSurface);
mPreviewBuilder.addTarget(frameCaptureSurface);
mPreviewBuilder.set(CaptureRequest.CONTROL_AF_MODE,
CameraMetadata.CONTROL_AF_MODE_CONTINUOUS_PICTURE);
mPreviewSession.setRepeatingRequest(mPreviewBuilder.build(), captureCallback, mBackgroundHandler);
Everything works as expected, my TextureView gets updated and framecallback gets called too Except ... the frame rate is about 10 fps and I haven't even do any image processing yet.
I have experimented many Camera2 API settings include SENSOR_FRAME_DURATION and different ImageFormat and size combinations but none of them improve the frame rate. But if I just remove the ImageReader from output surfaces, then preview gets 30 fps easily!
So I guess the problem is By adding ImageReader as Camera2 output surface decreased the preview frame rate drastically. At least on my case, so what is the solution?
My solution is glReadPixel
I know glReadPixel is one of the evil things because it copy bytes from GPU to main memory and also causing OpenGL to flush draw commands thus for sake of performance we'd better avoid using it. But its surprising that glReadPixel is actually pretty fast and providing much better frame rate then ImageReader's YUV_420_888 output.
In addition to reduce the memory overhead I make another draw call with smaller frame buffer like 360x640 instead of preview's 720p dedicated for feature detection.

Based on the implementation of camera2 by the openCV library.
I had the same problem, then I noticed this piece of code in the openCV code for the JavaCamera2View, you need to change the settings of the CaptureRequest.Builder that way:
CaptureRequest.Builder captureBuilder = mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW);
captureBuilder.set(CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_CONTINUOUS_PICTURE);
captureBuilder.set(CaptureRequest.CONTROL_AE_MODE, CaptureRequest.CONTROL_AE_MODE_ON_AUTO_FLASH);
It changed the fps from 10fps to around 28-30fps for me. Worked for me with two target surfaces, one surface of the preview textureview, the second of the ImageReader:
Surface readerSurface = imageReader.getSurface();
Surface surface = new Surface(surfaceTexture);
captureBuilder.addTarget(surface);
captureBuilder.addTarget(readerSurface);

Cannot post a comment (not enough reps). But running into the same issue with Redmi 6.
If using the the TextureView for previewing the camera output I get around 30 fps but replacing it with ImageReader it went down to 8/9 fps. All the camera configs are same in either case.
Interesting enough, on trying out the CameraXBasic, it showed the same issue. The updates from Camera were sluggish. But the android-Camera2Basic (using TextureView) was running without any issues.
Update: 1
Tested out with lowering the preview size from 1280x720 to 640x480, and as expected saw a better performance.

This is what I know after tweaking with it a little, the problem lies on ImageReader's maxImage param, I changed it from 2 to 3 to 56, it changed the fps quite a lot, what I think is the surface which we render to camera2 from ImageReader has a tendency to block the process of saving the camera's image to the buffer/cache when Image class from ImageReader.OnImageAvailableListener is being processed or isn't released, or we could say the camera wanna use the buffer but it doesn't have enough buffer, so when we increase the max buffer of imageReader, we could give space to camera2 to save the image.

Related

Opencv4android Mediarecorder.getSurface() no View display

I want to use Opencv4android sample Tutorial1 to do Video Recorder.
And I use this solution, but my smartphone not display anything on Screen.
Just black view. Can anyone help me?
here is my code
Tutorial1Activity
public class Tutorial1Activity extends Activity implements CvCameraViewListener2 {
private static final String TAG = "OCVSample::Activity";
private CameraBridgeViewBase mOpenCvCameraView;
private boolean mIsJavaCamera = true;
private MenuItem mItemSwitchCamera = null;
//*****************writetoSD***********************//
public FileWriter fw; // = new FileWriter(folder_path, false);
public BufferedWriter bw;// = new BufferedWriter(fw);
boolean first_in = true;
String showTimefile = null;
String showTime = null;
String folder_path = Environment.getExternalStorageDirectory().getAbsolutePath();
String folder_name = "Face Detection Signal";
String folder_pathforfile = null;
SimpleDateFormat sdf = new SimpleDateFormat("yyyyMMdd HH_mm_ss");
SimpleDateFormat sdf_fileintxt = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss");
//*****************writetoSD***********************//
//---------------MediaRecorder-------------------//
public MediaRecorder mediaRecorder;
Button bt_Record;
boolean isRecord = false;
Handler mThreadHandler;
HandlerThread mHandlerThread;
//---------------MediaRecorder-------------------//
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Log.i(TAG, "OpenCV loaded successfully");
mOpenCvCameraView.enableView();
} break;
default:
{
super.onManagerConnected(status);
} break;
}
}
};
public Tutorial1Activity() {
Log.i(TAG, "Instantiated new " + this.getClass());
}
/** Called when the activity is first created. */
#Override
public void onCreate(Bundle savedInstanceState) {
Log.i(TAG, "called onCreate");
super.onCreate(savedInstanceState);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
setContentView(R.layout.tutorial1_surface_view);
mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.tutorial1_activity_java_surface_view);
mOpenCvCameraView.setVisibility(SurfaceView.VISIBLE);
mOpenCvCameraView.setCvCameraViewListener(this);
//---------------------------------------------------------//
bt_Record = (Button)findViewById(R.id.bt_recorder);
folder_pathforfile = folder_path + File.separator + folder_name
+ File.separator + "opencv" + "_";
CreateSDfolder();
ongetTime();
//---------------------------------------------------------//
}
#Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
#Override
public void onResume()
{
super.onResume();
if (!OpenCVLoader.initDebug()) {
Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_3_0_0, this, mLoaderCallback);
} else {
Log.d(TAG, "OpenCV library found inside package. Using it!");
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
public void onDestroy() {
super.onDestroy();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
public void onCameraViewStarted(int width, int height) {
}
public void onCameraViewStopped() {
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
return inputFrame.rgba();
}
private void CreateSDfolder() {
String filefolderpath = folder_path + File.separator + folder_name;
File dir = new File(filefolderpath);
if (!dir.exists()){
Log.e("folder", "not exist");
try{
//dir.createNewFile(true);
dir.mkdir();
Log.e("folder", "creat exist");
}catch(Exception e){
Log.e("folder", "creat not exist");
e.printStackTrace();
}
}
else{
Log.e("folder", "exist");
}
}
private void ongetTime() {
Date dt=new Date();
showTime=sdf_fileintxt.format(dt);
showTimefile =sdf.format(dt);
}
private void WritetoSD(String data) {
try {
fw = new FileWriter(folder_pathforfile + showTimefile+".txt", true);
bw = new BufferedWriter(fw);
if (first_in == true) {
first_in = false;
bw.append(showTime);
bw.newLine();
}
bw.append(data);
bw.newLine();
bw.flush();
bw.close();
} catch (IOException e) {
Log.e("WriteToSD", "Write To SD ERROR");
e.printStackTrace();
}
}
public void onRecordSignal (View v){
if(!isRecord){
isRecord = true;
Log.e(TAG, "button click " + isRecord);
bt_Record.setText("Stop");
//new MediaPrepareTask().execute(null, null, null);
if (prepareMediaRecorder()) {
// Camera is available and unlocked, MediaRecorder is prepared,
// now you can start recording
Log.e("debug_mediarecorder", "prepareMediaRecorder in if");
mOpenCvCameraView.setRecorder(mediaRecorder);
mediaRecorder.start();
} else {
// prepare didn't work, release the camera
Log.e("debug_mediarecorder", "prepareMediaRecorder in else");
// mediaRecorder.stop();
releaseMediaRecorder();
}
} else{
isRecord = false;
Log.e(TAG, "button click " + isRecord);
bt_Record.setText("Record");
try {
if(mediaRecorder != null)
mediaRecorder.stop(); // stop the recording
else
Log.e(TAG,"onRecordSignal mediaRecorder is null");
} catch (RuntimeException e) {
// RuntimeException is thrown when stop() is called immediately after start().
// In this case the output file is not properly constructed ans should be deleted.
Log.d(TAG, "RuntimeException: stop() is called immediately after start()");
//noinspection ResultOfMethodCallIgnored
}
releaseMediaRecorder(); // release the MediaRecorder object
}
}
public void releaseMediaRecorder() {
Log.e("debug","releaseMediaRecorder");
if (mediaRecorder != null) {
mediaRecorder.reset(); // clear recorder configuration
mediaRecorder.release(); // release the recorder object
mediaRecorder = null;
JavaCameraView.mCamera.lock();
mOpenCvCameraView.releaseRecord();
}
}
private String recordfilepath() {
// TODO Auto-generated method stub
ongetTime();
File sddir = Environment.getExternalStorageDirectory();
File vrdir = new File(sddir, folder_name);
File file = new File(vrdir, showTimefile+"_.mp4");
String filepath = file.getAbsolutePath();
Log.e("debug mediarecorder", filepath);
return filepath;
}
public boolean prepareMediaRecorder() {
// TODO Auto-generated method stub
Log.e("debug mediarecorder", "in prepareMediaRecorder");
mediaRecorder = new MediaRecorder();
try {
JavaCameraView.mCamera.lock();
JavaCameraView.mCamera.unlock();
}catch (RuntimeException e){
Log.e("debug mediarecorder","JavaCameraView.mCamera.unlock() fail");
}
/*mediaRecorder.reset();
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
mediaRecorder.setVideoSource(MediaRecorder.VideoSource.SURFACE);
mediaRecorder.setProfile(CamcorderProfile.get(CamcorderProfile.QUALITY_HIGH));
//mediaRecorder.setPreviewDisplay(CameraBridgeViewBase.mSurfaceHolder.getSurface());
mediaRecorder.setOutputFile(recordfilepath());
//mediaRecorder.setOnInfoListener((MediaRecorder.OnInfoListener) this);
//mediaRecorder.setOnErrorListener((MediaRecorder.OnErrorListener) this);*/
mediaRecorder.reset();
mediaRecorder.setVideoSource(MediaRecorder.VideoSource.SURFACE);
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
CamcorderProfile cpHigh = CamcorderProfile.get(CamcorderProfile.QUALITY_HIGH);
mediaRecorder.setProfile(cpHigh);
//mediaRecorder.setOutputFile("out.mp4");
mediaRecorder.setOutputFile(recordfilepath());
mediaRecorder.setVideoSize(mOpenCvCameraView.mFrameWidth, mOpenCvCameraView.mFrameHeight);
//mediaRecorder.setOnInfoListener(this);
//mediaRecorder.setOnErrorListener(this);
try {
mediaRecorder.prepare();
} catch (IllegalStateException e) {
Log.e("debug mediarecorder", "not prepare");
releaseMediaRecorder();
return false;
} catch (IOException e) {
Log.e("debug mediarecorder", "not prepare IOException");
//releaseMediaRecorder();
}
return true;
}
}
CameraBridgeViewBase
public abstract class CameraBridgeViewBase extends SurfaceView implements SurfaceHolder.Callback {
private static final String TAG = "CameraBridge";
private static final int MAX_UNSPECIFIED = -1;
private static final int STOPPED = 0;
private static final int STARTED = 1;
private int mState = STOPPED;
private Bitmap mCacheBitmap;
private CvCameraViewListener2 mListener;
private boolean mSurfaceExist;
private Object mSyncObject = new Object();
public int mFrameWidth;
public int mFrameHeight;
protected int mMaxHeight;
protected int mMaxWidth;
protected float mScale = 0;
protected int mPreviewFormat = RGBA;
protected int mCameraIndex = CAMERA_ID_ANY;
protected boolean mEnabled;
protected FpsMeter mFpsMeter = null;
public static final int CAMERA_ID_ANY = -1;
public static final int CAMERA_ID_BACK = 99;
public static final int CAMERA_ID_FRONT = 98;
public static final int RGBA = 1;
public static final int GRAY = 2;
//-------------------------------------//
protected MediaRecorder mRecorder;
protected Surface mSurface = null;
public void setRecorder(MediaRecorder rec) {
mRecorder = rec;
//Log.e(TAG,mRecorder.toString());
if (mRecorder != null) {
mSurface = mRecorder.getSurface();
Log.e(TAG,"mRecorder is not null");
Log.e(TAG,"mSurface = "+mSurface.toString());
}
else{
Log.e(TAG,"mRecorder is null");
}
}
public void releaseRecord(){
mSurface.release();
}
//-------------------------------------//
public CameraBridgeViewBase(Context context, int cameraId) {
super(context);
mCameraIndex = cameraId;
getHolder().addCallback(this);
mMaxWidth = MAX_UNSPECIFIED;
mMaxHeight = MAX_UNSPECIFIED;
}
public CameraBridgeViewBase(Context context, AttributeSet attrs) {
super(context, attrs);
int count = attrs.getAttributeCount();
Log.d(TAG, "Attr count: " + Integer.valueOf(count));
TypedArray styledAttrs = getContext().obtainStyledAttributes(attrs, R.styleable.CameraBridgeViewBase);
if (styledAttrs.getBoolean(R.styleable.CameraBridgeViewBase_show_fps, false))
enableFpsMeter();
mCameraIndex = styledAttrs.getInt(R.styleable.CameraBridgeViewBase_camera_id, -1);
getHolder().addCallback(this);
mMaxWidth = MAX_UNSPECIFIED;
mMaxHeight = MAX_UNSPECIFIED;
styledAttrs.recycle();
}
/**
* Sets the camera index
* #param cameraIndex new camera index
*/
public void setCameraIndex(int cameraIndex) {
this.mCameraIndex = cameraIndex;
}
public interface CvCameraViewListener {
/**
* This method is invoked when camera preview has started. After this method is invoked
* the frames will start to be delivered to client via the onCameraFrame() callback.
* #param width - the width of the frames that will be delivered
* #param height - the height of the frames that will be delivered
*/
public void onCameraViewStarted(int width, int height);
/**
* This method is invoked when camera preview has been stopped for some reason.
* No frames will be delivered via onCameraFrame() callback after this method is called.
*/
public void onCameraViewStopped();
/**
* This method is invoked when delivery of the frame needs to be done.
* The returned values - is a modified frame which needs to be displayed on the screen.
* TODO: pass the parameters specifying the format of the frame (BPP, YUV or RGB and etc)
*/
public Mat onCameraFrame(Mat inputFrame);
}
public interface CvCameraViewListener2 {
/**
* This method is invoked when camera preview has started. After this method is invoked
* the frames will start to be delivered to client via the onCameraFrame() callback.
* #param width - the width of the frames that will be delivered
* #param height - the height of the frames that will be delivered
*/
public void onCameraViewStarted(int width, int height);
/**
* This method is invoked when camera preview has been stopped for some reason.
* No frames will be delivered via onCameraFrame() callback after this method is called.
*/
public void onCameraViewStopped();
/**
* This method is invoked when delivery of the frame needs to be done.
* The returned values - is a modified frame which needs to be displayed on the screen.
* TODO: pass the parameters specifying the format of the frame (BPP, YUV or RGB and etc)
*/
public Mat onCameraFrame(CvCameraViewFrame inputFrame);
};
protected class CvCameraViewListenerAdapter implements CvCameraViewListener2 {
public CvCameraViewListenerAdapter(CvCameraViewListener oldStypeListener) {
mOldStyleListener = oldStypeListener;
}
public void onCameraViewStarted(int width, int height) {
mOldStyleListener.onCameraViewStarted(width, height);
}
public void onCameraViewStopped() {
mOldStyleListener.onCameraViewStopped();
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
Mat result = null;
switch (mPreviewFormat) {
case RGBA:
result = mOldStyleListener.onCameraFrame(inputFrame.rgba());
break;
case GRAY:
result = mOldStyleListener.onCameraFrame(inputFrame.gray());
break;
default:
Log.e(TAG, "Invalid frame format! Only RGBA and Gray Scale are supported!");
};
return result;
}
public void setFrameFormat(int format) {
mPreviewFormat = format;
}
private int mPreviewFormat = RGBA;
private CvCameraViewListener mOldStyleListener;
};
/**
* This class interface is abstract representation of single frame from camera for onCameraFrame callback
* Attention: Do not use objects, that represents this interface out of onCameraFrame callback!
*/
public interface CvCameraViewFrame {
/**
* This method returns RGBA Mat with frame
*/
public Mat rgba();
/**
* This method returns single channel gray scale Mat with frame
*/
public Mat gray();
};
public void surfaceChanged(SurfaceHolder arg0, int arg1, int arg2, int arg3) {
Log.d(TAG, "call surfaceChanged event");
synchronized(mSyncObject) {
if (!mSurfaceExist) {
mSurfaceExist = true;
checkCurrentState();
} else {
/** Surface changed. We need to stop camera and restart with new parameters */
/* Pretend that old surface has been destroyed */
mSurfaceExist = false;
checkCurrentState();
/* Now use new surface. Say we have it now */
mSurfaceExist = true;
checkCurrentState();
}
}
}
public void surfaceCreated(SurfaceHolder holder) {
/* Do nothing. Wait until surfaceChanged delivered */
}
public void surfaceDestroyed(SurfaceHolder holder) {
synchronized(mSyncObject) {
mSurfaceExist = false;
checkCurrentState();
}
}
/**
* This method is provided for clients, so they can enable the camera connection.
* The actual onCameraViewStarted callback will be delivered only after both this method is called and surface is available
*/
public void enableView() {
synchronized(mSyncObject) {
mEnabled = true;
checkCurrentState();
}
}
/**
* This method is provided for clients, so they can disable camera connection and stop
* the delivery of frames even though the surface view itself is not destroyed and still stays on the scren
*/
public void disableView() {
synchronized(mSyncObject) {
mEnabled = false;
checkCurrentState();
}
}
/**
* This method enables label with fps value on the screen
*/
public void enableFpsMeter() {
if (mFpsMeter == null) {
mFpsMeter = new FpsMeter();
mFpsMeter.setResolution(mFrameWidth, mFrameHeight);
}
}
public void disableFpsMeter() {
mFpsMeter = null;
}
/**
*
* #param listener
*/
public void setCvCameraViewListener(CvCameraViewListener2 listener) {
mListener = listener;
}
public void setCvCameraViewListener(CvCameraViewListener listener) {
CvCameraViewListenerAdapter adapter = new CvCameraViewListenerAdapter(listener);
adapter.setFrameFormat(mPreviewFormat);
mListener = adapter;
}
/**
* This method sets the maximum size that camera frame is allowed to be. When selecting
* size - the biggest size which less or equal the size set will be selected.
* As an example - we set setMaxFrameSize(200,200) and we have 176x152 and 320x240 sizes. The
* preview frame will be selected with 176x152 size.
* This method is useful when need to restrict the size of preview frame for some reason (for example for video recording)
* #param maxWidth - the maximum width allowed for camera frame.
* #param maxHeight - the maximum height allowed for camera frame
*/
public void setMaxFrameSize(int maxWidth, int maxHeight) {
mMaxWidth = maxWidth;
mMaxHeight = maxHeight;
}
public void SetCaptureFormat(int format)
{
mPreviewFormat = format;
if (mListener instanceof CvCameraViewListenerAdapter) {
CvCameraViewListenerAdapter adapter = (CvCameraViewListenerAdapter) mListener;
adapter.setFrameFormat(mPreviewFormat);
}
}
/**
* Called when mSyncObject lock is held
*/
private void checkCurrentState() {
Log.d(TAG, "call checkCurrentState");
int targetState;
if (mEnabled && mSurfaceExist && getVisibility() == VISIBLE) {
targetState = STARTED;
} else {
targetState = STOPPED;
}
if (targetState != mState) {
/* The state change detected. Need to exit the current state and enter target state */
processExitState(mState);
mState = targetState;
processEnterState(mState);
}
}
private void processEnterState(int state) {
Log.d(TAG, "call processEnterState: " + state);
switch(state) {
case STARTED:
onEnterStartedState();
if (mListener != null) {
mListener.onCameraViewStarted(mFrameWidth, mFrameHeight);
}
break;
case STOPPED:
onEnterStoppedState();
if (mListener != null) {
mListener.onCameraViewStopped();
}
break;
};
}
private void processExitState(int state) {
Log.d(TAG, "call processExitState: " + state);
switch(state) {
case STARTED:
onExitStartedState();
break;
case STOPPED:
onExitStoppedState();
break;
};
}
private void onEnterStoppedState() {
/* nothing to do */
}
private void onExitStoppedState() {
/* nothing to do */
}
// NOTE: The order of bitmap constructor and camera connection is important for android 4.1.x
// Bitmap must be constructed before surface
private void onEnterStartedState() {
Log.d(TAG, "call onEnterStartedState");
/* Connect camera */
if (!connectCamera(getWidth(), getHeight())) {
AlertDialog ad = new AlertDialog.Builder(getContext()).create();
ad.setCancelable(false); // This blocks the 'BACK' button
ad.setMessage("It seems that you device does not support camera (or it is locked). Application will be closed.");
ad.setButton(DialogInterface.BUTTON_NEUTRAL, "OK", new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
dialog.dismiss();
((Activity) getContext()).finish();
}
});
ad.show();
}
}
private void onExitStartedState() {
disconnectCamera();
if (mCacheBitmap != null) {
mCacheBitmap.recycle();
}
}
/**
* This method shall be called by the subclasses when they have valid
* object and want it to be delivered to external client (via callback) and
* then displayed on the screen.
* #param frame - the current frame to be delivered
*/
protected void deliverAndDrawFrame(CvCameraViewFrame frame) {
Mat modified;
if (mListener != null) {
modified = mListener.onCameraFrame(frame);
} else {
modified = frame.rgba();
}
boolean bmpValid = true;
if (modified != null) {
try {
Utils.matToBitmap(modified, mCacheBitmap);
} catch(Exception e) {
Log.e(TAG, "Mat type: " + modified);
Log.e(TAG, "Bitmap type: " + mCacheBitmap.getWidth() + "*" + mCacheBitmap.getHeight());
Log.e(TAG, "Utils.matToBitmap() throws an exception: " + e.getMessage());
bmpValid = false;
}
}
if (bmpValid && mCacheBitmap != null) {
Canvas canvas;
if (mRecorder != null) {
canvas = mSurface.lockCanvas(null);
canvas.drawColor(0, android.graphics.PorterDuff.Mode.CLEAR);
Log.d(TAG, "mStretch value: " + mScale);
if (mScale != 0) {
canvas.drawBitmap(mCacheBitmap, new Rect(0,0,mCacheBitmap.getWidth(), mCacheBitmap.getHeight()),
new Rect((int)((canvas.getWidth() - mScale*mCacheBitmap.getWidth()) / 2),
(int)((canvas.getHeight() - mScale*mCacheBitmap.getHeight()) / 2),
(int)((canvas.getWidth() - mScale*mCacheBitmap.getWidth()) / 2 + mScale*mCacheBitmap.getWidth()),
(int)((canvas.getHeight() - mScale*mCacheBitmap.getHeight()) / 2 + mScale*mCacheBitmap.getHeight())), null);
} else {
canvas.drawBitmap(mCacheBitmap, new Rect(0,0,mCacheBitmap.getWidth(), mCacheBitmap.getHeight()),
new Rect((canvas.getWidth() - mCacheBitmap.getWidth()) / 2,
(canvas.getHeight() - mCacheBitmap.getHeight()) / 2,
(canvas.getWidth() - mCacheBitmap.getWidth()) / 2 + mCacheBitmap.getWidth(),
(canvas.getHeight() - mCacheBitmap.getHeight()) / 2 + mCacheBitmap.getHeight()), null);
}
if (mFpsMeter != null) {
mFpsMeter.measure();
mFpsMeter.draw(canvas, 20, 30);
}
mSurface.unlockCanvasAndPost(canvas);
}
}
}
/**
* This method is invoked shall perform concrete operation to initialize the camera.
* CONTRACT: as a result of this method variables mFrameWidth and mFrameHeight MUST be
* initialized with the size of the Camera frames that will be delivered to external processor.
* #param width - the width of this SurfaceView
* #param height - the height of this SurfaceView
*/
protected abstract boolean connectCamera(int width, int height);
/**
* Disconnects and release the particular camera object being connected to this surface view.
* Called when syncObject lock is held
*/
protected abstract void disconnectCamera();
// NOTE: On Android 4.1.x the function must be called before SurfaceTextre constructor!
protected void AllocateCache()
{
mCacheBitmap = Bitmap.createBitmap(mFrameWidth, mFrameHeight, Bitmap.Config.ARGB_8888);
}
public interface ListItemAccessor {
public int getWidth(Object obj);
public int getHeight(Object obj);
};
/**
* This helper method can be called by subclasses to select camera preview size.
* It goes over the list of the supported preview sizes and selects the maximum one which
* fits both values set via setMaxFrameSize() and surface frame allocated for this view
* #param supportedSizes
* #param surfaceWidth
* #param surfaceHeight
* #return optimal frame size
*/
protected Size calculateCameraFrameSize(List<?> supportedSizes, ListItemAccessor accessor, int surfaceWidth, int surfaceHeight) {
int calcWidth = 0;
int calcHeight = 0;
int maxAllowedWidth = (mMaxWidth != MAX_UNSPECIFIED && mMaxWidth < surfaceWidth)? mMaxWidth : surfaceWidth;
int maxAllowedHeight = (mMaxHeight != MAX_UNSPECIFIED && mMaxHeight < surfaceHeight)? mMaxHeight : surfaceHeight;
for (Object size : supportedSizes) {
int width = accessor.getWidth(size);
int height = accessor.getHeight(size);
if (width <= maxAllowedWidth && height <= maxAllowedHeight) {
if (width >= calcWidth && height >= calcHeight) {
calcWidth = (int) width;
calcHeight = (int) height;
}
}
}
return new Size(calcWidth, calcHeight);
}
}
And I add permission in Manifest
<uses-feature android:name="android.hardware.camera" android:required="false"/>
<uses-feature android:name="android.hardware.camera.autofocus" android:required="false"/>
<uses-feature android:name="android.hardware.camera.front" android:required="false"/>
<uses-feature android:name="android.hardware.camera.front.autofocus" android:required="false"/>
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
<uses-permission android:name="android.permission.WAKE_LOCK" />
<uses-permission android:name="android.permission.RECORD_VIDEO" />
<uses-permission android:name="android.permission.RECORD_AUDIO"/>

Camera2 callback equivalent

I got a RA application using Camera API and I want to integrate Camera2.
Is an equivalent exists to get each preview frame in order to sent it to JMonkeyEngine ?
Here is my code using Camera API :
public void onResume() {
mStopPreview = false;
try {
mCamera = CameraOldManager.getCamera();
if (mCamera != null) {
// initialize camera parameters
CameraOldManager.initializeCameraParameters(mCameraWidth, mCameraHeight);
CameraOldManager.setCameraDisplayOrientation(mActivity, 0, mCamera);
preparePreviewCallbackBuffer();
mJmeApp.setCameraVideoResolution(mCameraWidth, mCameraHeight);
mPreview = new CameraPreview(mActivity, mCamera, mCameraCallback);
ViewGroup.LayoutParams lp = new ViewGroup.LayoutParams(1, 1);
mActivity.addContentView(mPreview, lp);
}
} catch (Exception e) {
Log.e(TAG, "Exception init camera: " + e.getMessage());
}
}
// prepares the Camera preview callback buffers.
public void preparePreviewCallbackBuffer() {
int pformat;
pformat = mCamera.getParameters().getPreviewFormat();
Log.e(TAG, "PREVIEW format: " + pformat);
// Get pixel format information to compute buffer size.
PixelFormat info = new PixelFormat();
PixelFormat.getPixelFormatInfo(pformat, info);
// The actual preview width and height.
// They can differ from the requested width mDesiredmCameraWidth
//mPreviewWidth = mCamera.getParameters().getPreviewSize().width;
//mPreviewHeight = mCamera.getParameters().getPreviewSize().height;
int bufferSizeRGB565 = mCameraWidth * mCameraHeight * 2 + 4096;
//Delete buffer before creating a new one.
mPreviewBufferRGB565 = null;
mPreviewBufferRGB565 = new byte[bufferSizeRGB565];
mPreviewByteBufferRGB565 = ByteBuffer.allocateDirect(mPreviewBufferRGB565.length);
mCameraJMEImageRGB565 = new Image(Image.Format.RGB565, mCameraWidth,
mCameraHeight, mPreviewByteBufferRGB565);
}
// Implement the interface for getting copies of preview frames
private final Camera.PreviewCallback mCameraCallback = new Camera.PreviewCallback() {
public void onPreviewFrame(byte[] data, Camera c) {
if (c != null && !mStopPreview && mJmeApp != null && mJmeApp.isFirstModelLoaded()) {
mPreviewByteBufferRGB565.clear();
// Perform processing on the camera preview data.
if (mPixelFormatConversionNeeded) {
RAUtils.yCbCrToRGB565(data, mCameraWidth, mCameraHeight,
mPreviewBufferRGB565);
mPreviewByteBufferRGB565.put(mPreviewBufferRGB565);
} else {
mPreviewByteBufferRGB565.put(data);
}
mCameraJMEImageRGB565.setData(mPreviewByteBufferRGB565);
mJmeApp.setVideoBGTexture(mCameraJMEImageRGB565);
}
}
};
What I need is to get frames to give them to JME.

Recreate muxer for new video file while encoder works

I use CameraToMpegTest from BigFlake, but I modify it to enable start and stop muxer many time while encoder works. When the video recording is not, I still use the encoder to get frames from texture for other processing.
For first start/stop it works fine, but there is a problem with second start/stop. I get exception then close app: "stop() is called in invalid state 3", "...Caused by: java.lang.IllegalStateException: Failed to stop the muxer...". Also second video file doesn't open but it's not empty.
Is there a possibility to restart the muxer while continuously running the encoder? What am I doing wrong?
UPDATE: I find out that problem in keyframes. When I set format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 0) then all is fine. And so, how to request a keyframe when muxer starts recording again?
public class MainFrameProcessor {
public static final String TAG = "MainFrameProcessor";
public static final boolean VERBOSE = false; // lots of logging
// parameters for the encoder
private static final String MIME_TYPE = "video/avc"; // H.264 Advanced Video Coding
private static final int FRAME_RATE = 30; // 30fps
private static final int IFRAME_INTERVAL = 5; // 5 seconds between I-frames
public static final int encWidth = 1920;
public static final int encHeight = 1080;
private static final int encBitRate = 10000000;//6000000; // Mbps //http://static.googleusercontent.com/media/source.android.com/en//compatibility/android-cdd.pdf
public ByteBuffer mPixelBuf; // used by saveFrame()
private boolean isWrite=false;
private boolean isWork=true;
// encoder / muxer state
private MediaCodec mEncoder;
private CodecInputSurface mInputSurface;
private Writer writer;
private String path;
// camera state
private Camera mCamera;
private SurfaceTextureManager mStManager;
private Handler messageHandler;
public MainFrameProcessor(String path, Handler messageHandler){
mPixelBuf = ByteBuffer.allocateDirect(encHeight * encWidth * 4);
mPixelBuf.order(ByteOrder.LITTLE_ENDIAN);
this.path=path;
this.messageHandler=messageHandler;
}
/** test entry point */
public void startProcessor() throws Throwable {
CameraToMpegWrapper.runTest(this);
}
public void stopProcessor() {
isWork=false;
}
synchronized public void release(){
stopProcessor();
releaseCamera();
releaseEncoderAndWriter();
releaseSurfaceTexture();
}
/**
* Wraps processCameraFrames(). This is necessary because SurfaceTexture will try to use
* the looper in the current thread if one exists, and the CTS tests create one on the
* test thread.
*
* The wrapper propagates exceptions thrown by the worker thread back to the caller.
*/
private static class CameraToMpegWrapper implements Runnable {
private Throwable mThrowable;
private MainFrameProcessor mTest;
private CameraToMpegWrapper(MainFrameProcessor test) {
mTest = test;
}
#Override
public void run() {
try {
mTest.processCameraFrames();
} catch (Throwable th) {
mThrowable = th;
}
}
/** Entry point. */
public static void runTest(MainFrameProcessor obj) throws Throwable {
CameraToMpegWrapper wrapper = new CameraToMpegWrapper(obj);
Thread th = new Thread(wrapper, "codec test");
th.start();
//th.join(); //http://stackoverflow.hex1.ru/questions/22457623/surfacetextures-onframeavailable-method-always-called-too-late
if (wrapper.mThrowable != null) {
throw wrapper.mThrowable;
}
}
}
boolean to_start=false;
public void startRecord(){
to_start=true;
}
boolean to_stop=false;
public void stopRecord(){
to_stop=true;
}
/**
* Tests encoding of AVC video from Camera input. The output is saved as an MP4 file.
*/
private void processCameraFrames() {
// arbitrary but popular values
Log.d(TAG, MIME_TYPE + " output " + encWidth + "x" + encHeight + " #" + encBitRate);
try {
prepareCamera(encWidth, encHeight);
prepareEncoderAndWriter(encWidth, encHeight, encBitRate);
mInputSurface.makeCurrent();
prepareSurfaceTexture();
mCamera.startPreview();
SurfaceTexture st = mStManager.getSurfaceTexture();
while (isWork) {
if (to_start){
writer.startWriter();
isWrite=true;
to_start=false;
}
if (to_stop){
isWrite=false;
writer.stopWriter();
to_stop=false;
}
// Acquire a new frame of input, and render it to the Surface. If we had a
// GLSurfaceView we could switch EGL contexts and call drawImage() a second
// time to render it on screen. The texture can be shared between contexts by
// passing the GLSurfaceView's EGLContext as eglCreateContext()'s share_context
// argument.
mStManager.awaitNewImage();
mStManager.drawImage();
synchronized (mPixelBuf) {
mPixelBuf.rewind();
GLES20.glReadPixels(0, 0, encWidth, encHeight, GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE,
mPixelBuf);
}
if (isWrite) {
if (writer.checkDurationEnd()) {
stopRecord();
Message msg = messageHandler.obtainMessage(MainActivity.MESSAGE_STOP_REC);
messageHandler.sendMessage(msg);
} else
writer.write(st, mInputSurface);
}
}
} finally {
// release everything we grabbed
release();
}
}
/**
* Configures Camera for video capture. Sets mCamera.
* <p>
* Opens a Camera and sets parameters. Does not start preview.
*/
private void prepareCamera(int encWidth, int encHeight) {
if (mCamera != null) {
throw new RuntimeException("camera already initialized");
}
Camera.CameraInfo info = new Camera.CameraInfo();
mCamera = Camera.open(); // opens first back-facing camera
if (mCamera == null) {
throw new RuntimeException("Unable to open camera");
}
Camera.Parameters parms = mCamera.getParameters();
choosePreviewSize(parms, encWidth, encHeight);
// leave the frame rate set to default
mCamera.setParameters(parms);
Camera.Size size = parms.getPreviewSize();
Log.d(TAG, "Camera preview size is " + size.width + "x" + size.height);
}
/**
* Attempts to find a preview size that matches the provided width and height (which
* specify the dimensions of the encoded video). If it fails to find a match it just
* uses the default preview size.
* <p>
* TODO: should do a best-fit match.
*/
private static void choosePreviewSize(Camera.Parameters parms, int width, int height) {
// We should make sure that the requested MPEG size is less than the preferred
// size, and has the same aspect ratio.
Camera.Size ppsfv = parms.getPreferredPreviewSizeForVideo();
if (VERBOSE && ppsfv != null) {
Log.d(TAG, "Camera preferred preview size for video is " +
ppsfv.width + "x" + ppsfv.height);
}
for (Camera.Size size : parms.getSupportedPreviewSizes()) {
if (size.width == width && size.height == height) {
parms.setPreviewSize(width, height);
return;
}
}
Log.w(TAG, "Unable to set preview size to " + width + "x" + height);
if (ppsfv != null) {
parms.setPreviewSize(ppsfv.width, ppsfv.height);
}
}
/**
* Stops camera preview, and releases the camera to the system.
*/
private void releaseCamera() {
if (VERBOSE) Log.d(TAG, "releasing camera");
if (mCamera != null) {
mCamera.stopPreview();
mCamera.release();
mCamera = null;
}
}
/**
* Configures SurfaceTexture for camera preview. Initializes mStManager, and sets the
* associated SurfaceTexture as the Camera's "preview texture".
* <p>
* Configure the EGL surface that will be used for output before calling here.
*/
private void prepareSurfaceTexture() {
mStManager = new SurfaceTextureManager(mPixelBuf);
SurfaceTexture st = mStManager.getSurfaceTexture();
try {
mCamera.setPreviewTexture(st);
} catch (IOException ioe) {
throw new RuntimeException("setPreviewTexture failed", ioe);
}
}
/**
* Releases the SurfaceTexture.
*/
private void releaseSurfaceTexture() {
if (mStManager != null) {
mStManager.release();
mStManager = null;
}
}
/**
* Configures encoder and muxer state, and prepares the input Surface. Initializes
* mEncoder, mMuxer, mInputSurface, mBufferInfo, mTrackIndex, and mMuxerStarted.
*/
private void prepareEncoderAndWriter(int width, int height, int bitRate) {
MediaFormat format = MediaFormat.createVideoFormat(MIME_TYPE, width, height);
// Set some properties. Failing to specify some of these can cause the MediaCodec
// configure() call to throw an unhelpful exception.
format.setInteger(MediaFormat.KEY_COLOR_FORMAT,
MediaCodecInfo.CodecCapabilities.COLOR_FormatSurface);
format.setInteger(MediaFormat.KEY_BIT_RATE, bitRate);
format.setInteger(MediaFormat.KEY_FRAME_RATE, FRAME_RATE);
format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, IFRAME_INTERVAL);
if (VERBOSE) Log.d(TAG, "format: " + format);
// Create a MediaCodec encoder, and configure it with our format. Get a Surface
// we can use for input and wrap it with a class that handles the EGL work.
//
// If you want to have two EGL contexts -- one for display, one for recording --
// you will likely want to defer instantiation of CodecInputSurface until after the
// "display" EGL context is created, then modify the eglCreateContext call to
// take eglGetCurrentContext() as the share_context argument.
try {
mEncoder = MediaCodec.createEncoderByType(MIME_TYPE);
} catch (IOException e) {
e.printStackTrace();
}
mEncoder.configure(format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
mInputSurface = new CodecInputSurface(mEncoder.createInputSurface());
mEncoder.start();
writer=new Writer(path, mEncoder);
}
/**
* Releases encoder resources.
*/
private void releaseEncoderAndWriter() {
if (VERBOSE) Log.d(TAG, "releasing encoder objects");
if (writer!=null)
writer.releaseWriter();
if (mEncoder != null) {
mEncoder.stop();
mEncoder.release();
mEncoder = null;
}
if (mInputSurface != null) {
mInputSurface.release();
mInputSurface = null;
}
}
}
Class for Muxer:
public class Writer {
private static final long DURATION_SEC = 1000; // seconds of video
private MediaCodec mEncoder;
private MediaMuxer mMuxer;
private String path;
private int mTrackIndex;
private long startWhen;
private long desiredEnd;
private boolean mMuxerStarted;
// allocate one of these up front so we don't need to do it every time
private MediaCodec.BufferInfo mBufferInfo;
boolean new_track=false;
public Writer(String path, MediaCodec mEncoder){
this.path=path;
this.mEncoder=mEncoder;
}
public void startWriter(){
// Output filename. Ideally this would use Context.getFilesDir() rather than a
// hard-coded output directory.
String outputPath= CameraUtils.createPathFile(path, "mp4");
Log.i(MainFrameProcessor.TAG, "Output file is " + outputPath);
// Create a MediaMuxer. We can't add the video track and start() the muxer here,
// because our MediaFormat doesn't have the Magic Goodies. These can only be
// obtained from the encoder after it has started processing data.
//
// We're not actually interested in multiplexing audio. We just want to convert
// the raw H.264 elementary stream we get from MediaCodec into a .mp4 file.
try {
mMuxer = new MediaMuxer(outputPath, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
} catch (IOException ioe) {
throw new RuntimeException("MediaMuxer creation failed", ioe);
}
mBufferInfo = new MediaCodec.BufferInfo();
mTrackIndex = -1;
mMuxerStarted = false;
new_track=true;
startWhen = System.nanoTime();
desiredEnd = startWhen + DURATION_SEC * 1000000000L;
}
synchronized public void stopWriter(){
// send end-of-stream to encoder, and drain remaining output
if (mMuxer != null) {
Log.w(MainFrameProcessor.TAG,"stop");
//if (mEncoder!=null) drainEncoder(true);
mMuxer.stop();
mMuxer.release();
mMuxer = null;
}
}
public void releaseWriter() {
stopWriter();
}
public void write(SurfaceTexture st, CodecInputSurface mInputSurface) {
// Set the presentation time stamp from the SurfaceTexture's time stamp. This
// will be used by MediaMuxer to set the PTS in the video.
if (MainFrameProcessor.VERBOSE) {
Log.d(MainFrameProcessor.TAG, "present: " +
((st.getTimestamp() - startWhen) / 1000000.0) + "ms");
}
mInputSurface.setPresentationTime(st.getTimestamp());
// Submit it to the encoder. The eglSwapBuffers call will block if the input
// is full, which would be bad if it stayed full until we dequeued an output
// buffer (which we can't do, since we're stuck here). So long as we fully drain
// the encoder before supplying additional input, the system guarantees that we
// can supply another frame without blocking.
if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "sending frame to encoder");
mInputSurface.swapBuffers();
drainEncoder(false);
}
/**
* Extracts all pending data from the encoder and forwards it to the muxer.
* <p>
* If endOfStream is not set, this returns when there is no more data to drain. If it
* is set, we send EOS to the encoder, and then iterate until we see EOS on the output.
* Calling this with endOfStream set should be done once, right before stopping the muxer.
* <p>
* We're just using the muxer to get a .mp4 file (instead of a raw H.264 stream). We're
* not recording audio.
*/
private void drainEncoder(boolean endOfStream) {
final int TIMEOUT_USEC = 10000;
if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "drainEncoder(" + endOfStream + ")");
if (endOfStream) {
if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "sending EOS to encoder");
mEncoder.signalEndOfInputStream();
}
ByteBuffer[] encoderOutputBuffers = mEncoder.getOutputBuffers();
while (true) {
int encoderStatus = mEncoder.dequeueOutputBuffer(mBufferInfo, TIMEOUT_USEC);
Log.w("ddd", ""+encoderStatus+"; "+new_track);
if (encoderStatus == MediaCodec.INFO_TRY_AGAIN_LATER) {
// no output available yet
if (!endOfStream) {
break; // out of while
} else {
if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "no output available, spinning to await EOS");
}
} else if (encoderStatus == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
// not expected for an encoder
encoderOutputBuffers = mEncoder.getOutputBuffers();
} else if (encoderStatus == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
// should happen before receiving buffers, and should only happen once
/*if (mMuxerStarted) {
throw new RuntimeException("format changed twice");
}
MediaFormat newFormat = mEncoder.getOutputFormat();
Log.d(MainFrameProcessor.TAG, "encoder output format changed: " + newFormat);
// now that we have the Magic Goodies, start the muxer
mTrackIndex = mMuxer.addTrack(newFormat);
mMuxer.start();
mMuxerStarted = true;*/
} else if (encoderStatus < 0) {
Log.w(MainFrameProcessor.TAG, "unexpected result from encoder.dequeueOutputBuffer: " +
encoderStatus);
// let's ignore it
} else if (new_track){
MediaFormat newFormat = mEncoder.getOutputFormat();
Log.d(MainFrameProcessor.TAG, "encoder output format changed: " + newFormat);
// now that we have the Magic Goodies, start the muxer
mTrackIndex = mMuxer.addTrack(newFormat);
mMuxer.start();
mMuxerStarted = true;
new_track=false;
} else {
ByteBuffer encodedData = encoderOutputBuffers[encoderStatus];
if (encodedData == null) {
throw new RuntimeException("encoderOutputBuffer " + encoderStatus +
" was null");
}
if ((mBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
// The codec config data was pulled out and fed to the muxer when we got
// the INFO_OUTPUT_FORMAT_CHANGED status. Ignore it.
if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "ignoring BUFFER_FLAG_CODEC_CONFIG");
mBufferInfo.size = 0;
}
if (mBufferInfo.size != 0) {
if (!mMuxerStarted) {
throw new RuntimeException("muxer hasn't started");
}
// adjust the ByteBuffer values to match BufferInfo (not needed?)
encodedData.position(mBufferInfo.offset);
encodedData.limit(mBufferInfo.offset + mBufferInfo.size);
mMuxer.writeSampleData(mTrackIndex, encodedData, mBufferInfo);
if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "sent " + mBufferInfo.size + " bytes to muxer");
}
mEncoder.releaseOutputBuffer(encoderStatus, false);
if ((mBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
if (!endOfStream) {
Log.w(MainFrameProcessor.TAG, "reached end of stream unexpectedly");
} else {
if (MainFrameProcessor.VERBOSE) Log.d(MainFrameProcessor.TAG, "end of stream reached");
}
break; // out of while
}
}
}
}
public boolean checkDurationEnd() {
return System.nanoTime() >= desiredEnd;
}
}

IplImage crop and rotate - Android

I'm using ffmpeg to video capture for 30 seconds.
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
if (yuvIplimage != null && recording && rec)
{
new SaveFrame().execute(data);
}
}
}
save frame class is below
private class SaveFrame extends AsyncTask<byte[], Void, File> {
long t;
protected File doInBackground(byte[]... arg) {
t = 1000 * (System.currentTimeMillis() - firstTime - pausedTime);
toSaveFrames++;
File pathCache = new File(Environment.getExternalStorageDirectory()+"/DCIM", (System.currentTimeMillis() / 1000L)+ "_" + toSaveFrames + ".tmp");
BufferedOutputStream bos;
try {
bos = new BufferedOutputStream(new FileOutputStream(pathCache));
bos.write(arg[0]);
bos.flush();
bos.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
pathCache = null;
toSaveFrames--;
} catch (IOException e) {
e.printStackTrace();
pathCache = null;
toSaveFrames--;
}
return pathCache;
}
#Override
protected void onPostExecute(File filename)
{
if(filename!=null)
{
savedFrames++;
tempList.add(new FileFrame(t,filename));
}
}
}
finally i add all frames with crop and rotation
private class AddFrame extends AsyncTask<Void, Integer, Void> {
private int serial = 0;
#Override
protected Void doInBackground(Void... params) {
for(int i=0; i<tempList.size(); i++)
{
byte[] bytes = new byte[(int) tempList.get(i).file.length()];
try {
BufferedInputStream buf = new BufferedInputStream(new FileInputStream(tempList.get(i).file));
buf.read(bytes, 0, bytes.length);
buf.close();
IplImage image = IplImage.create(imageWidth, imageHeight, IPL_DEPTH_8U, 2);
// final int startY = 640*(480-480)/2;
// final int lenY = 640*480;
// yuvIplimage.getByteBuffer().put(bytes, startY, lenY);
// final int startVU = 640*480+ 640*(480-480)/4;
// final int lenVU = 640* 480/2;
// yuvIplimage.getByteBuffer().put(bytes, startVU, lenVU);
if (tempList.get(i).time > recorder.getTimestamp()) {
recorder.setTimestamp(tempList.get(i).time);
}
image = cropImage(image);
image = rotate(image, 270);
// image = rotateImage(image);
recorder.record(image);
Log.i(LOG_TAG, "record " + i);
image = null;
serial++;
publishProgress(serial);
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} catch (com.googlecode.javacv.FrameRecorder.Exception e) {
e.printStackTrace();
}
}
return null;
}
#Override
protected void onProgressUpdate(Integer... serial) {
int value = serial[0];
creatingProgress.setProgress(value);
}
#Override
protected void onPostExecute(Void v)
{
creatingProgress.dismiss();
if (recorder != null && recording) {
recording = false;
Log.v(LOG_TAG,"Finishing recording, calling stop and release on recorder");
try {
recorder.stop();
recorder.release();
finish();
startActivity(new Intent(RecordActivity.this,AnswerViewActivity.class));
} catch (FFmpegFrameRecorder.Exception e) {
e.printStackTrace();
}
recorder = null;
}
}
}
my crop and rotate method are below
private IplImage cropImage(IplImage src)
{
cvSetImageROI(src, r);
IplImage cropped = IplImage.create(imageHeight, imageHeight, IPL_DEPTH_8U, 2);
cvCopy(src, cropped);
return cropped;
}
public static IplImage rotate(IplImage image, double angle) {
IplImage copy = opencv_core.cvCloneImage(image);
IplImage rotatedImage = opencv_core.cvCreateImage(opencv_core.cvGetSize(copy), copy.depth(), copy.nChannels());
CvMat mapMatrix = opencv_core.cvCreateMat( 2, 3, opencv_core.CV_32FC1 );
//Define Mid Point
CvPoint2D32f centerPoint = new CvPoint2D32f();
centerPoint.x(copy.width()/2);
centerPoint.y(copy.height()/2);
//Get Rotational Matrix
opencv_imgproc.cv2DRotationMatrix(centerPoint, angle, 1.0, mapMatrix);
//Rotate the Image
opencv_imgproc.cvWarpAffine(copy, rotatedImage, mapMatrix, opencv_imgproc.CV_INTER_CUBIC + opencv_imgproc.CV_WARP_FILL_OUTLIERS, opencv_core.cvScalarAll(170));
opencv_core.cvReleaseImage(copy);
opencv_core.cvReleaseMat(mapMatrix);
return rotatedImage;
}
my final video crop and rotate but green frames and colored frames mixed with this.
How to fix this problem. I'm not aware of iplimage. In some blogs they mention its YUV format. first u need to convert Y and then convert UV.
How to solve this problem?
I have modified the onPreviewFrame method of this Open Source Android Touch-To-Record library to take transpose and resize a captured frame.
I defined "yuvIplImage" as following in my setCameraParams() method.
IplImage yuvIplImage = IplImage.create(mPreviewSize.height, mPreviewSize.width, opencv_core.IPL_DEPTH_8U, 2);
Also initialize your videoRecorder object as following, giving width as height and vice versa.
//call initVideoRecorder() method like this to initialize videoRecorder object of FFmpegFrameRecorder class.
initVideoRecorder(strVideoPath, mPreview.getPreviewSize().height, mPreview.getPreviewSize().width, recorderParameters);
//method implementation
public void initVideoRecorder(String videoPath, int width, int height, RecorderParameters recorderParameters)
{
Log.e(TAG, "initVideoRecorder");
videoRecorder = new FFmpegFrameRecorder(videoPath, width, height, 1);
videoRecorder.setFormat(recorderParameters.getVideoOutputFormat());
videoRecorder.setSampleRate(recorderParameters.getAudioSamplingRate());
videoRecorder.setFrameRate(recorderParameters.getVideoFrameRate());
videoRecorder.setVideoCodec(recorderParameters.getVideoCodec());
videoRecorder.setVideoQuality(recorderParameters.getVideoQuality());
videoRecorder.setAudioQuality(recorderParameters.getVideoQuality());
videoRecorder.setAudioCodec(recorderParameters.getAudioCodec());
videoRecorder.setVideoBitrate(1000000);
videoRecorder.setAudioBitrate(64000);
}
This is my onPreviewFrame() method:
#Override
public void onPreviewFrame(byte[] data, Camera camera)
{
long frameTimeStamp = 0L;
if(FragmentCamera.mAudioTimestamp == 0L && FragmentCamera.firstTime > 0L)
{
frameTimeStamp = 1000L * (System.currentTimeMillis() - FragmentCamera.firstTime);
}
else if(FragmentCamera.mLastAudioTimestamp == FragmentCamera.mAudioTimestamp)
{
frameTimeStamp = FragmentCamera.mAudioTimestamp + FragmentCamera.frameTime;
}
else
{
long l2 = (System.nanoTime() - FragmentCamera.mAudioTimeRecorded) / 1000L;
frameTimeStamp = l2 + FragmentCamera.mAudioTimestamp;
FragmentCamera.mLastAudioTimestamp = FragmentCamera.mAudioTimestamp;
}
synchronized(FragmentCamera.mVideoRecordLock)
{
if(FragmentCamera.recording && FragmentCamera.rec && lastSavedframe != null && lastSavedframe.getFrameBytesData() != null && yuvIplImage != null)
{
FragmentCamera.mVideoTimestamp += FragmentCamera.frameTime;
if(lastSavedframe.getTimeStamp() > FragmentCamera.mVideoTimestamp)
{
FragmentCamera.mVideoTimestamp = lastSavedframe.getTimeStamp();
}
try
{
yuvIplImage.getByteBuffer().put(lastSavedframe.getFrameBytesData());
IplImage bgrImage = IplImage.create(mPreviewSize.width, mPreviewSize.height, opencv_core.IPL_DEPTH_8U, 4);// In my case, mPreviewSize.width = 1280 and mPreviewSize.height = 720
IplImage transposed = IplImage.create(mPreviewSize.height, mPreviewSize.width, yuvIplImage.depth(), 4);
IplImage squared = IplImage.create(mPreviewSize.height, mPreviewSize.height, yuvIplImage.depth(), 4);
int[] _temp = new int[mPreviewSize.width * mPreviewSize.height];
Util.YUV_NV21_TO_BGR(_temp, data, mPreviewSize.width, mPreviewSize.height);
bgrImage.getIntBuffer().put(_temp);
opencv_core.cvTranspose(bgrImage, transposed);
opencv_core.cvFlip(transposed, transposed, 1);
opencv_core.cvSetImageROI(transposed, opencv_core.cvRect(0, 0, mPreviewSize.height, mPreviewSize.height));
opencv_core.cvCopy(transposed, squared, null);
opencv_core.cvResetImageROI(transposed);
videoRecorder.setTimestamp(lastSavedframe.getTimeStamp());
videoRecorder.record(squared);
}
catch(com.googlecode.javacv.FrameRecorder.Exception e)
{
e.printStackTrace();
}
}
lastSavedframe = new SavedFrames(data, frameTimeStamp);
}
}
This code uses a method "YUV_NV21_TO_BGR", which I found from this link
Basically this method is used to resolve, which I call as, "The Green Devil problem on Android", just like yours. I was having the same issue and wasted almost 3-4 days. Before adding "YUV_NV21_TO_BGR" method when I just took transpose of YuvIplImage, more importantly a combination of transpose, flip (with or without resizing), there was greenish output in resulting video. This "YUV_NV21_TO_BGR" method saved the day. Thanks to #David Han from above google groups thread.
Also you should know that all this processing (transpose, flip and resize), in onPreviewFrame, takes much time which causes a very serious hit on Frames Per Second (FPS) rate. When I used this code, inside onPreviewFrame method, the resulting FPS of the recorded video was down to 3 frames/sec from 30fps.
I would advise not to use this approach. Rather you can go for post-recording processing (transpose, flip and resize) of your video file using JavaCV in an AsyncTask. Hope this helps.
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
//IplImage newImage = cvCreateImage(cvGetSize(yuvIplimage), IPL_DEPTH_8U, 1);
if (recording) {
videoTimestamp = 1000 * (System.currentTimeMillis() - startTime);
yuvimage = IplImage.create(imageWidth, imageHeight * 3 / 2, IPL_DEPTH_8U,1);
yuvimage.getByteBuffer().put(data);
rgbimage = IplImage.create(imageWidth, imageHeight, IPL_DEPTH_8U, 3);
opencv_imgproc.cvCvtColor(yuvimage, rgbimage, opencv_imgproc.CV_YUV2BGR_NV21);
IplImage rotateimage=null;
try {
recorder.setTimestamp(videoTimestamp);
int rot=0;
switch (degrees) {
case 0:
rot =1;
rotateimage=rotate(rgbimage,rot);
break;
case 180:
rot = -1;
rotateimage=rotate(rgbimage,rot);
break;
default:
rotateimage=rgbimage;
}
recorder.record(rotateimage);
} catch (FFmpegFrameRecorder.Exception e) {
e.printStackTrace();
}
}
}
IplImage rotate(IplImage IplSrc,int angle) {
IplImage img= IplImage.create(IplSrc.height(), IplSrc.width(), IplSrc.depth(), IplSrc.nChannels());
cvTranspose(IplSrc, img);
cvFlip(img, img, angle);
return img;
}
}
after many searches this works for me.

startPreview failed but not all devices

I'm getting an error startPreview failed but not all devices.
In Motorola RAZR and Samsung Galaxy S3, it's working very well.
Some people told me they got the same problem in other devices (Galaxy SII Lite, Galaxy Ace Duos, Samsung Galaxy Y, etc)
I'm trying to test in a Samsung Galaxy Y and here's what I got in Logcat
java.lang.RuntimeException: startPreview failed
at android.hardware.Camera.startPreview(Native Method)
at br.com.timo.tubagram.CameraSurfaceView.surfaceCreated(CameraSurfaceView.java:47)
at android.view.SurfaceView.updateWindow(SurfaceView.java:601)
at android.view.SurfaceView.updateWindow(SurfaceView.java:413)
at android.view.SurfaceView.dispatchDraw(SurfaceView.java:358)
at android.view.View.draw(View.java:7083)
at android.view.SurfaceView.draw(SurfaceView.java:344)
at android.view.View.buildDrawingCache(View.java:6842)
at android.view.View.getDrawingCache(View.java:6628)
at android.view.ViewGroup.drawChild(ViewGroup.java:1571)
at android.view.ViewGroup.dispatchDraw(ViewGroup.java:1373)
at android.view.View.draw(View.java:7083)
at android.widget.FrameLayout.draw(FrameLayout.java:357)
at android.view.ViewGroup.drawChild(ViewGroup.java:1646)
at android.view.ViewGroup.dispatchDraw(ViewGroup.java:1373)
at android.view.ViewGroup.drawChild(ViewGroup.java:1644)
at android.view.ViewGroup.dispatchDraw(ViewGroup.java:1373)
at android.view.ViewGroup.drawChild(ViewGroup.java:1644)
at android.view.ViewGroup.dispatchDraw(ViewGroup.java:1373)
at android.view.View.draw(View.java:7083)
at android.widget.FrameLayout.draw(FrameLayout.java:357)
at com.android.internal.policy.impl.PhoneWindow$DecorView.draw(PhoneWindow.java:2108)
at android.view.ViewRoot.draw(ViewRoot.java:1540)
at android.view.ViewRoot.performTraversals(ViewRoot.java:1276)
at android.view.ViewRoot.handleMessage(ViewRoot.java:1878)
at android.os.Handler.dispatchMessage(Handler.java:99)
at android.os.Looper.loop(Looper.java:130)
at android.app.ActivityThread.main(ActivityThread.java:3770)
at java.lang.reflect.Method.invokeNative(Native Method)
at java.lang.reflect.Method.invoke(Method.java:507)
at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:912)
at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:670)
at dalvik.system.NativeStart.main(Native Method)
And here is my code
public class CameraSurfaceView extends SurfaceView implements SurfaceHolder.Callback, PreviewCallback{
private SurfaceHolder holder;
private Camera camera;
private Camera.Parameters parameters;
boolean front = false;
public CameraSurfaceView(Context context) {
super(context);
this.holder = this.getHolder();
this.holder.addCallback(this);
}
#Override
public void surfaceCreated(SurfaceHolder holder) {
try {
this.holder = holder;
camera = Camera.open();
camera.setPreviewDisplay(holder);
parameters = parametrosCamera();
camera.setParameters(parameters);
camera.setDisplayOrientation(90);
camera.startPreview();
} catch (IOException ioe) {
}
}
#Override
public void surfaceDestroyed(SurfaceHolder holder) {
camera.stopPreview();
camera.release();
camera = null;
}
private Parameters parametrosCamera(){
Parameters parameters = camera.getParameters();
List<Camera.Size> sizes = parameters.getSupportedPreviewSizes();
if (sizes != null){
Size min = sizes.get(0);
for (Size size : sizes){
if (size.width < min.width){
min = size;
}else{
parameters.setPreviewSize(min.width, min.height);
parameters.setPictureSize(min.width, min.height);
}
}
parameters.set("orientation", "portrait");
parameters.setRotation(90);
}
if (parameters.getFlashMode() != null){
parameters.setFlashMode(Parameters.FLASH_MODE_AUTO);
}
return parameters;
}
}
And my PrincipalActivity
public class PrincipalActivity extends Activity{
Camera camera;
File sdImageMainDirectory;
CameraSurfaceView cameraSurfaceView;
Button principalActivity_bt_TirarFoto;
Button principalActivity_bt_VirarFoto;
Button principalActivity_bt_Aceitar;
Button principalActivity_bt_Cancelar;
FrameLayout preview;
ImageView principalActivity_iv_UltimaFoto;
HandlePictureStorage hps;
int wid = 0;
boolean imageSelected = false;
boolean front = false;
String caminhoImagens;
String url;
// File storagePath = new File(Environment.getExternalStorageDirectory() + "/Tubagram/");
#SuppressWarnings("deprecation")
#SuppressLint("NewApi")
public void onCreate(Bundle savedInstanceState) {
Log.i("PrincipalActivity","onCreate");
super.onCreate(savedInstanceState);
setContentView(R.layout.principal_activity);
principalActivity_bt_TirarFoto = (Button) findViewById(R.id.principalActivity_bt_TirarFoto);
setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_PORTRAIT);
cameraSurfaceView = new CameraSurfaceView(principalActivity_bt_TirarFoto.getContext());
cameraSurfaceView.setSoundEffectsEnabled(true);
cameraSurfaceView.setDrawingCacheEnabled(true);
preview = (FrameLayout) findViewById(R.id.principalActivity_fl_Camera);
preview.addView(cameraSurfaceView);
principalActivity_bt_TirarFoto.setSoundEffectsEnabled(true);
principalActivity_bt_TirarFoto.setOnClickListener(new OnClickListener() {
#SuppressLint("NewApi")
#Override
public void onClick(View v) {
Log.i("PrincipalActivity","onClick - TirarFoto");
camera = cameraSurfaceView.getCamera();
camera.takePicture(new ShutterCallback() {
#Override
public void onShutter() {
AudioManager mgr = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
mgr.playSoundEffect(AudioManager.FLAG_PLAY_SOUND);
}
}, null, hps = new HandlePictureStorage());
imageSelected = false;
mostrarBotoesConfirma();
}
});
principalActivity_bt_Aceitar = (Button) findViewById(R.id.principalActivity_bt_Aceitar);
principalActivity_bt_Aceitar.setOnClickListener(new OnClickListener() {
#Override
public void onClick(View v) {
Log.i("PrincipalActivity","onClick - Aceitar");
if (verificaInstagram()){
ByteArrayOutputStream stream = new ByteArrayOutputStream();
if (imageSelected){
Bitmap bitmap = ((BitmapDrawable)cameraSurfaceView.getBackground()).getBitmap();
bitmap.compress(Bitmap.CompressFormat.PNG, 100, stream);
}else{
hps.getBitmap().compress(Bitmap.CompressFormat.PNG, 100, stream);
}
salvarImagemSelecionada(stream.toByteArray());
Intent shareIntent = new Intent(android.content.Intent.ACTION_SEND);
shareIntent.setType("image/*");
caminhoImagens = getRealPathFromURI(Uri.parse(url));
Log.i("Caminho imagem: ", caminhoImagens);
shareIntent.putExtra(Intent.EXTRA_STREAM, Uri.parse("file://" + caminhoImagens));
shareIntent.setPackage("com.instagram.android");
startActivity(shareIntent);
// }
}else{
Toast.makeText(v.getContext(), "Você não possui o Instagram no seu smartphone!", Toast.LENGTH_SHORT).show();
}
}
});
principalActivity_bt_Cancelar = (Button) findViewById(R.id.principalActivity_bt_Cancelar);
principalActivity_bt_Cancelar.setOnClickListener(new OnClickListener() {
#Override
public void onClick(View v) {
Log.i("PrincipalActivity","onClick - Cancelar");
esconderBotoesConfirma();
cameraSurfaceView.setBackgroundColor(Color.TRANSPARENT);
cameraSurfaceView.voltarCamera();
}
});
int qtdCameras = Camera.getNumberOfCameras();
principalActivity_bt_VirarFoto = (Button) findViewById(R.id.principalActivity_bt_VirarFoto);
if(qtdCameras > 1){
principalActivity_bt_VirarFoto.setOnClickListener(new OnClickListener() {
public void onClick(View v) {
Log.i("PrincipalActivity","onClick - VirarCamera");
Thread t = new Thread(){
#Override
public void run() {
cameraSurfaceView.flipit();
front = cameraSurfaceView.getFront();
}
};
t.start();
}
});
}else{
principalActivity_bt_VirarFoto.setVisibility(View.INVISIBLE);
}
LinearLayout principalActivity_ll_Molduras = (LinearLayout) findViewById(R.id.principalActivity_ll_Molduras);
principalActivity_ll_Molduras.setVisibility(View.VISIBLE);
List<Integer> listaMoldurasMenores = new ArrayList<Integer>();
final List<Integer> listaMoldurasMaiores = new ArrayList<Integer>();
// listaMoldurasMenores.add(R.drawable.moldura_menor0);
listaMoldurasMenores.add(R.drawable.moldura_menor1);
listaMoldurasMenores.add(R.drawable.moldura_menor2);
listaMoldurasMenores.add(R.drawable.moldura_menor3);
listaMoldurasMenores.add(R.drawable.moldura_menor4);
listaMoldurasMenores.add(R.drawable.moldura_menor5);
listaMoldurasMenores.add(R.drawable.moldura_menor6);
listaMoldurasMaiores.add(R.drawable.moldura_maior1);
listaMoldurasMaiores.add(R.drawable.moldura_maior2);
listaMoldurasMaiores.add(R.drawable.moldura_maior3);
listaMoldurasMaiores.add(R.drawable.moldura_maior4);
listaMoldurasMaiores.add(R.drawable.moldura_maior5);
listaMoldurasMaiores.add(R.drawable.moldura_maior6);
for (int i = 0; i < listaMoldurasMenores.size(); i++) {
final ImageView imagem_moldura = new ImageView(this);
imagem_moldura.setScaleType(ScaleType.FIT_XY);
imagem_moldura.setId(i);
imagem_moldura.setImageResource(listaMoldurasMenores.get(i));
imagem_moldura.setOnClickListener(new OnClickListener() {
#Override
public void onClick(View v) {
Log.i("PrincipalActivity","onClick - Molduras");
ImageView imagem_moldura_maior = new ImageView(v.getContext());
imagem_moldura_maior.setImageResource(listaMoldurasMaiores.get(imagem_moldura.getId()));
preview.setForeground(imagem_moldura_maior.getDrawable());
}
});
principalActivity_ll_Molduras.addView(imagem_moldura,i);
}
principalActivity_iv_UltimaFoto = (ImageView) findViewById(R.id.principalActivity_iv_UltimaFoto);
principalActivity_iv_UltimaFoto.setAdjustViewBounds(false);
Uri uri = buscaUltimaFotoAlbum();
Drawable backgroundGaleria;
if (uri != null){
String caminhosImagensGaleria = getRealPathFromURI(uri);
backgroundGaleria = Drawable.createFromPath(caminhosImagensGaleria);
principalActivity_iv_UltimaFoto.setBackgroundDrawable(backgroundGaleria);
}else{
principalActivity_iv_UltimaFoto.setBackgroundResource(R.drawable.box_imagem_album);
}
principalActivity_iv_UltimaFoto.setOnClickListener(new OnClickListener() {
#Override
public void onClick(View v) {
Intent intent = new Intent(Intent.ACTION_PICK,android.provider.MediaStore.Images.Media.INTERNAL_CONTENT_URI);
final int ACTION_SELECT_IMAGE = 1;
intent.putExtra("crop", "true");
intent.putExtra("aspectX", 10);
intent.putExtra("aspectY", 10);
intent.putExtra("outputX", 256);
intent.putExtra("outputY", 256);
intent.putExtra("scale", true);
intent.putExtra("return-data", true);
startActivityForResult(intent,ACTION_SELECT_IMAGE);
}
});
}
EDIT 1 - 06/20/2013
I change a little my code and now runs in my Galaxy Y but my FrameLayout with the camera isn't working. I getting a black screen on FrameLayout.
Anyone knows why?
What I change:
I added this line in my constructor
this.holder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
I changed my surfaceCreated and surfaceChanged to this
#Override
public void surfaceChanged(SurfaceHolder holder, int format, int width,
int height) {
try {
camera.stopPreview();
parameters = parametrosCamera();
camera.setParameters(parameters);
camera.setDisplayOrientation(90);
camera.setPreviewDisplay(holder);
camera.startPreview();
}
catch (IOException e) {
}
catch (Exception e){
}
}
#SuppressLint("NewApi")
#Override
public void surfaceCreated(SurfaceHolder holder) {
try {
camera = Camera.open(0);
camera.setPreviewDisplay(this.holder);
camera.startPreview();
} catch (IOException ioe) {
} catch (Exception e){
}
}
P.S.
I added some Log's to get the size my setPreviewSize is getting with my method
private Parameters parametersCamera(){
Parameters parameters = camera.getParameters();
List<Camera.Size> sizes = parameters.getSupportedPreviewSizes();
if (sizes != null){
Size min = sizes.get(0);
for (Size size : sizes){
if (size.width < min.width){
min = size;
}else{
parameters.setPreviewSize(min.width, min.height);
parameters.setPictureSize(min.width, min.height);
Log.i("Ultima Camera Width: " + min.width, "Ultima Camera Height: " + min.height);
}
}
parameters.set("orientation", "portrait");
parameters.setRotation(90);
}
if (parameters.getFlashMode() != null){
parameters.setFlashMode(Parameters.FLASH_MODE_AUTO);
}
return parameters;
}
and I added a Log in surfaceChanged to get the width and height (I don't use this width and height) and see if is the same and I get this result's
Motorola RAZR HD (works verywell)
surfaceChanged: 540 x 573
method parametrosCamera: 640 x 480
Samsung Galaxy Y (don't show my preview)
surfaceChanged: 240 x 162 (W x H)
method parametrosCamera: 320 x 240
and in my method parametersCamera I use these two lines
parameters.set("orientation", "portrait");
parameters.setRotation(90);
I think the answer to my question is in this part of my code.
You seem to be looking for the minimal supported preview size. But if this size is not supported as picture size, your code will initialize the camera into an unsupported state. Some devices may choose to fail to open preview in such state.
I am not sure you really need picture size to be equal to preview frame size. If you don't, you can simply iterate separately on the values from parameters.getSupportedPictureSizes().
Another common mistake (not necessarily applicable to your use case) is rooted in that the sizes for rear-facing camera do not match those of the front-facing camera. Therefore, the size must always be recalculated when you switch the camera.
Question isnt using camera manager related library, so it isnt useful to question poster (its ancient I assume he is already fixed or dont care anymore to issue), but this answer may help or fix on user that facing issue startpreview error only on some various devices.
Causes
After deep debug, I found that camera manager library try to use largest suitable resolution if no exact size match.
As in CameraConfiguration.findBestPreviewSizeValue() :
// If no exact match, use largest preview size. This was not a great
// idea on older devices because
// of the additional computation needed. We're likely to get here on
// newer Android 4+ devices, where
// the CPU is much more powerful.
if (!supportedPreviewSizes.isEmpty()) {
Camera.Size largestPreview = supportedPreviewSizes.get(0);
Point largestSize = new Point(largestPreview.width, largestPreview.height);
Log.i(TAG, "Using largest suitable preview size: " + largestSize);
return largestSize;
}
This is good on the device's manufacture is properly set on the supported preview sizes.
But thing sometime happens! Device may report ridiculous high resolution which overwhelming the device on startPreview(), which an internal driver error occur 'preview timeout', only visible on catlog.
Lets fix it!
Use closest resolution to the screen instead!
Modifiying CameraConfiguration.java or maybe inside CameraConfigurationUtils.java:
... Skip lines until you find next pattern (take note that your source may be a slightly different!)
+ Add lines
- Remove lines
...
...
public static Point findBestPreviewSizeValue(Camera.Parameters parameters, Point screenResolution) {
...
...
// Find a suitable size, with max resolution
int maxResolution = 0;
Camera.Size maxResPreviewSize = null;
+ double closestResolutionDrift = 1024*1024;
+ Camera.Size closestResolution = null;
for (Camera.Size size : rawSupportedSizes) {
...
...
Log.i(TAG, "Found preview size exactly matching screen size: " + exactPoint);
return exactPoint;
}
+
+ double drift = (maybeFlippedWidth * maybeFlippedHeight)-(screenResolution.x * screenResolution.y);
+ if (drift < closestResolutionDrift && resolution>(screenResolution.x * screenResolution.y)) {
+ closestResolutionDrift = drift;
+ closestResolution = size;
+ }
...
...
//If no exact match, use largest preview size. This was not a great idea on older devices because
//of the additional computation needed. We're likely to get here on newer Android 4+ devices, where
//the CPU is much more powerful.
- if (maxResPreviewSize != null) {
- Point largestSize = new Point(maxResPreviewSize.width, maxResPreviewSize.height);
- Log.i(TAG, "Using largest suitable preview size: " + largestSize);
- return largestSize;
- }
+ //if (maxResPreviewSize != null) {
+ // Point largestSize = new Point(maxResPreviewSize.width, maxResPreviewSize.height);
+ // Log.i(TAG, "Using largest suitable preview size: " + largestSize);
+ // return largestSize;
+ //}
+
+ // Dont! Some low end device still report ridiculous high resolution which overwhelming startPreview!
+ if(closestResolution!=null){
+ Point closestSize = new Point(closestResolution.width, closestResolution.height);
+ Log.i(TAG, "Using closest suitable preview size: " + closestSize);
+ return closestSize;
+ }

Categories

Resources