Note Im new to Android and OpenGL
Im building an Augmented Reality App based on ARToolKitX (Github: https://github.com/artoolkitx/artoolkitx/tree/8c6bd4e7be5e80c8439066b23473506aebbb496c/Source/ARXJ/ARXJProj/arxj/src/main/java/org/artoolkitx/arx/arxj).
The application shows the camera frame and displays objects with opengl on top.
My Problem:
ARToolKitX forces the app to be in landscape mode:
setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_LANDSCAPE);
but when I change the screen orientation to SCREEN_ORIENTATION_PORTRAIT, the camera image and the opengl objects dont rotate to the correct orientation and stay in landscape mode.
Inside the ARRenderer I can use the drawVideoSettings method to rotate the camera image by itself, but that doesnt apply to the opengl objects.
ARToolKitX also provides a SurfaceChanged method inside the CameraSurface class, with the comment: "This is where [...] to create transformation matrix to scale and then rotate surface view, if the app is going to handle orientation changes."
But I dont have any idea, how the transformation matrix has too look like and how to apply it.
Any help is appreciated.
ARRenderer:
public abstract class ARRenderer implements GLSurfaceView.Renderer {
private MyShaderProgram shaderProgram;
private int width, height, cameraIndex;
private int[] viewport = new int[4];
private boolean firstRun = true;
private final static String TAG = ARRenderer.class.getName();
/**
* Allows subclasses to load markers and prepare the scene. This is called after
* initialisation is complete.
*/
public boolean configureARScene() {
return true;
}
public void onSurfaceCreated(GL10 unused, EGLConfig config) {
// Transparent background
GLES20.glClearColor(0.0f, 0.0f, 0.0f, 0.f);
this.shaderProgram = new MyShaderProgram(new MyVertexShader(), new MyFragmentShader());
GLES20.glUseProgram(shaderProgram.getShaderProgramHandle());
}
public void onSurfaceChanged(GL10 unused, int w, int h) {
this.width = w;
this.height = h;
if(ARController.getInstance().isRunning()) {
//Update the frame settings for native rendering
ARController.getInstance().drawVideoSettings(cameraIndex, w, h, false, false, false, ARX_jni.ARW_H_ALIGN_CENTRE, ARX_jni.ARW_V_ALIGN_CENTRE, ARX_jni.ARW_SCALE_MODE_FILL, viewport);
}
}
public void onDrawFrame(GL10 unused) {
if (ARController.getInstance().isRunning()) {
// Initialize artoolkitX video background rendering.
if (firstRun) {
boolean isDisplayFrameInited = ARController.getInstance().drawVideoInit(cameraIndex);
if (!isDisplayFrameInited) {
Log.e(TAG, "Display Frame not inited");
}
if (!ARController.getInstance().drawVideoSettings(cameraIndex, this.width, this.height, false, false,
false, ARX_jni.ARW_H_ALIGN_CENTRE, ARX_jni.ARW_V_ALIGN_CENTRE,
ARX_jni.ARW_SCALE_MODE_FILL, viewport)) {
Log.e(TAG, "Error during call of displayFrameSettings.");
} else {
Log.i(TAG, "Viewport {" + viewport[0] + ", " + viewport[1] + ", " + viewport[2] + ", " + viewport[3] + "}.");
}
firstRun = false;
}
GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT | GLES20.GL_DEPTH_BUFFER_BIT);
if (!ARController.getInstance().drawVideoSettings(cameraIndex)) {
Log.e(TAG, "Error during call of displayFrame.");
}
draw();
}
}
/**
* Should be overridden in subclasses and used to perform rendering.
*/
public void draw() {
GLES20.glViewport(viewport[0], viewport[1], viewport[2], viewport[3]);
//TODO: Check how to refactor near and far plane
shaderProgram.setProjectionMatrix(ARController.getInstance().getProjectionMatrix(10.0f, 10000.0f));
float[] camPosition = {1f, 1f, 1f};
shaderProgram.render(camPosition);
}
#SuppressWarnings("unused")
public ShaderProgram getShaderProgram() {
return shaderProgram;
}
public void setCameraIndex(int cameraIndex) {
this.cameraIndex = cameraIndex;
}
}
CameraSurface
class CameraSurfaceImpl implements CameraSurface {
/**
* Android logging tag for this class.
*/
private static final String TAG = CameraSurfaceImpl.class.getSimpleName();
private CameraDevice mCameraDevice;
private ImageReader mImageReader;
private Size mImageReaderVideoSize;
private final Context mAppContext;
private final CameraDevice.StateCallback mCamera2DeviceStateCallback = new CameraDevice.StateCallback() {
#Override
public void onOpened(#NonNull CameraDevice camera2DeviceInstance) {
mCameraDevice = camera2DeviceInstance;
startCaptureAndForwardFramesSession();
}
#Override
public void onDisconnected(#NonNull CameraDevice camera2DeviceInstance) {
camera2DeviceInstance.close();
mCameraDevice = null;
}
#Override
public void onError(#NonNull CameraDevice camera2DeviceInstance, int error) {
camera2DeviceInstance.close();
mCameraDevice = null;
}
};
/**
* Listener to inform of camera related events: start, frame, and stop.
*/
private final CameraEventListener mCameraEventListener;
/**
* Tracks if SurfaceView instance was created.
*/
private boolean mImageReaderCreated;
public CameraSurfaceImpl(CameraEventListener cameraEventListener, Context appContext){
this.mCameraEventListener = cameraEventListener;
this.mAppContext = appContext;
}
private final ImageReader.OnImageAvailableListener mImageAvailableAndProcessHandler = new ImageReader.OnImageAvailableListener() {
#Override
public void onImageAvailable(ImageReader reader)
{
Image imageInstance = reader.acquireLatestImage();
if (imageInstance == null) {
//Note: This seems to happen quite often.
Log.v(TAG, "onImageAvailable(): unable to acquire new image");
return;
}
// Get a ByteBuffer for each plane.
final Image.Plane[] imagePlanes = imageInstance.getPlanes();
final int imagePlaneCount = Math.min(4, imagePlanes.length); // We can handle up to 4 planes max.
final ByteBuffer[] imageBuffers = new ByteBuffer[imagePlaneCount];
final int[] imageBufferPixelStrides = new int[imagePlaneCount];
final int[] imageBufferRowStrides = new int[imagePlaneCount];
for (int i = 0; i < imagePlaneCount; i++) {
imageBuffers[i] = imagePlanes[i].getBuffer();
// For ImageFormat.YUV_420_888 the order of planes in the array returned by Image.getPlanes()
// is guaranteed such that plane #0 is always Y, plane #1 is always U (Cb), and plane #2 is always V (Cr).
// The Y-plane is guaranteed not to be interleaved with the U/V planes (in particular, pixel stride is
// always 1 in yPlane.getPixelStride()). The U/V planes are guaranteed to have the same row stride and
// pixel stride (in particular, uPlane.getRowStride() == vPlane.getRowStride() and uPlane.getPixelStride() == vPlane.getPixelStride(); ).
imageBufferPixelStrides[i] = imagePlanes[i].getPixelStride();
imageBufferRowStrides[i] = imagePlanes[i].getRowStride();
}
if (mCameraEventListener != null) {
mCameraEventListener.cameraStreamFrame(imageBuffers, imageBufferPixelStrides, imageBufferRowStrides);
}
imageInstance.close();
}
};
#Override
public void surfaceCreated() {
Log.i(TAG, "surfaceCreated(): called");
SharedPreferences prefs = PreferenceManager.getDefaultSharedPreferences(mAppContext);
int defaultCameraIndexId = mAppContext.getResources().getIdentifier("pref_defaultValue_cameraIndex","string", mAppContext.getPackageName());
mCamera2DeviceID = Integer.parseInt(prefs.getString("pref_cameraIndex", mAppContext.getResources().getString(defaultCameraIndexId)));
Log.i(TAG, "surfaceCreated(): will attempt to open camera \"" + mCamera2DeviceID +
"\", set orientation, set preview surface");
/*
Set the resolution from the settings as size for the glView. Because the video stream capture
is requested based on this size.
WARNING: While coding the preferences are taken from the res/xml/preferences.xml!!!
When building for Unity the actual used preferences are taken from the UnityARPlayer project!!!
*/
int defaultCameraValueId = mAppContext.getResources().getIdentifier("pref_defaultValue_cameraResolution","string",mAppContext.getPackageName());
String camResolution = prefs.getString("pref_cameraResolution", mAppContext.getResources().getString(defaultCameraValueId));
String[] dims = camResolution.split("x", 2);
mImageReaderVideoSize = new Size(Integer.parseInt(dims[0]),Integer.parseInt(dims[1]));
// Note that maxImages should be at least 2 for acquireLatestImage() to be any different than acquireNextImage() -
// discarding all-but-the-newest Image requires temporarily acquiring two Images at once. Or more generally,
// calling acquireLatestImage() with less than two images of margin, that is (maxImages - currentAcquiredImages < 2)
// will not discard as expected.
mImageReader = ImageReader.newInstance(mImageReaderVideoSize.getWidth(),mImageReaderVideoSize.getHeight(), ImageFormat.YUV_420_888, /* The maximum number of images the user will want to access simultaneously:*/ 2 );
mImageReader.setOnImageAvailableListener(mImageAvailableAndProcessHandler, null);
mImageReaderCreated = true;
} // end: public void surfaceCreated(SurfaceHolder holder)
/* Interface implemented by this SurfaceView subclass
holder: SurfaceHolder instance associated with SurfaceView instance that changed
format: pixel format of the surface
width: of the SurfaceView instance
height: of the SurfaceView instance
*/
#Override
public void surfaceChanged() {
Log.i(TAG, "surfaceChanged(): called");
// This is where to calculate the optimal size of the display and set the aspect ratio
// of the surface view (probably the service holder). Also where to Create transformation
// matrix to scale and then rotate surface view, if the app is going to handle orientation
// changes.
if (!mImageReaderCreated) {
surfaceCreated();
}
if (!isCamera2DeviceOpen()) {
openCamera2(mCamera2DeviceID);
}
if (isCamera2DeviceOpen() && (null == mYUV_CaptureAndSendSession)) {
startCaptureAndForwardFramesSession();
}
}
private void openCamera2(int camera2DeviceID) {
Log.i(TAG, "openCamera2(): called");
CameraManager camera2DeviceMgr = (CameraManager)mAppContext.getSystemService(Context.CAMERA_SERVICE);
try {
if (PackageManager.PERMISSION_GRANTED == ContextCompat.checkSelfPermission(mAppContext, Manifest.permission.CAMERA)) {
camera2DeviceMgr.openCamera(Integer.toString(camera2DeviceID), mCamera2DeviceStateCallback, null);
return;
}
} catch (CameraAccessException ex) {
Log.e(TAG, "openCamera2(): CameraAccessException caught, " + ex.getMessage());
} catch (Exception ex) {
Log.e(TAG, "openCamera2(): exception caught, " + ex.getMessage());
}
if (null == camera2DeviceMgr) {
Log.e(TAG, "openCamera2(): Camera2 DeviceMgr not set");
}
Log.e(TAG, "openCamera2(): abnormal exit");
}
private int mCamera2DeviceID = -1;
private CaptureRequest.Builder mCaptureRequestBuilder;
private CameraCaptureSession mYUV_CaptureAndSendSession;
private void startCaptureAndForwardFramesSession() {
if ((null == mCameraDevice) || (!mImageReaderCreated) /*|| (null == mPreviewSize)*/) {
return;
}
closeYUV_CaptureAndForwardSession();
try {
mCaptureRequestBuilder = mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW);
List<Surface> surfaces = new ArrayList<>();
Surface surfaceInstance;
surfaceInstance = mImageReader.getSurface();
surfaces.add(surfaceInstance);
mCaptureRequestBuilder.addTarget(surfaceInstance);
mCameraDevice.createCaptureSession(
surfaces, // Output surfaces
new CameraCaptureSession.StateCallback() {
#Override
public void onConfigured(#NonNull CameraCaptureSession session) {
try {
if (mCameraEventListener != null) {
mCameraEventListener.cameraStreamStarted(mImageReaderVideoSize.getWidth(), mImageReaderVideoSize.getHeight(), "YUV_420_888", mCamera2DeviceID, false);
}
mYUV_CaptureAndSendSession = session;
// Session to repeat request to update passed in camSensorSurface
mYUV_CaptureAndSendSession.setRepeatingRequest(mCaptureRequestBuilder.build(), /* CameraCaptureSession.CaptureCallback cameraEventListener: */null, /* Background thread: */ null);
} catch (CameraAccessException e) {
e.printStackTrace();
}
}
#Override
public void onConfigureFailed(#NonNull CameraCaptureSession session) {
Toast.makeText(mAppContext, "Unable to setup camera sensor capture session", Toast.LENGTH_SHORT).show();
}
}, // Callback for capture session state updates
null); // Secondary thread message queue
} catch (CameraAccessException ex) {
ex.printStackTrace();
}
}
#Override
public void closeCameraDevice() {
closeYUV_CaptureAndForwardSession();
if (null != mCameraDevice) {
mCameraDevice.close();
mCameraDevice = null;
}
if (null != mImageReader) {
mImageReader.close();
mImageReader = null;
}
if (mCameraEventListener != null) {
mCameraEventListener.cameraStreamStopped();
}
mImageReaderCreated = false;
}
private void closeYUV_CaptureAndForwardSession() {
if (mYUV_CaptureAndSendSession != null) {
mYUV_CaptureAndSendSession.close();
mYUV_CaptureAndSendSession = null;
}
}
/**
* Indicates whether or not camera2 device instance is available, opened, enabled.
*/
#Override
public boolean isCamera2DeviceOpen() {
return (null != mCameraDevice);
}
#Override
public boolean isImageReaderCreated() {
return mImageReaderCreated;
}
}
Edit:
/**
* Override the draw function from ARRenderer.
*/
#Override
public void draw() {
super.draw();
fpsCounter.frame();
if(maxfps<fpsCounter.getFPS()){
maxfps= fpsCounter.getFPS();
}
logger.log(Level.INFO, "FPS: " + maxfps);
// Initialize GL
GLES20.glEnable(GLES20.GL_CULL_FACE);
GLES20.glEnable(GLES20.GL_DEPTH_TEST);
GLES20.glFrontFace(GLES20.GL_CCW);
// Look for trackables, and draw on each found one.
for (int trackableUID : trackables.keySet()) {
// If the trackable is visible, apply its transformation, and render the object
float[] modelViewMatrix = new float[16];
if (ARController.getInstance().queryTrackableVisibilityAndTransformation(trackableUID, modelViewMatrix)) {
float[] projectionMatrix = ARController.getInstance().getProjectionMatrix(10.0f, 10000.0f);
trackables.get(trackableUID).draw(projectionMatrix, modelViewMatrix);
}
}
}
Related
I'm making an android application that uses GLSurfaceView and android media effects to apply filters to images.The implementation is working fine except it appears as if there's a memory leak in the app.With each addition of an effect the graphics memory used by the app increases significantly and this memory is never released.
Does any one know how to reduce the memory footprint from this class or at least knows what is causing the high usage of memory and point me in the right direction to solving this?
I've tried to call GLES20.glDeleteTextures(2, mTextures, 0) just before GLES20.glGenTextures(2, mTextures, 0) but it didn't help.I've also experiment with GLES20.glFinish() and GLES20.glFlush() and mEffectContext.release() with no success or any observable change.
I've attached a screenshot from the profiling I've performed and also relevant pieces of the class itself below.
This is my first stackoverflow question so please feel free to correct me incase i've broken any community guidelines.
class ImageFilterView extends GLSurfaceView implements GLSurfaceView.Renderer {
private int[] mTextures = new int[2];
private void init() {
setEGLContextClientVersion(2);
setRenderer(this);
setRenderMode(GLSurfaceView.RENDERMODE_WHEN_DIRTY);
setFilterEffect(NONE);
}
#Override
public void onSurfaceChanged(GL10 gl, int width, int height) {
if (mTexRenderer != null) {
mTexRenderer.updateViewSize(width, height);
}
}
#Override
public void onDrawFrame(GL10 gl) {
if (!mInitialized) {
//Only need to do this once
mEffectContext = EffectContext.createWithCurrentGlContext();
mTexRenderer.init();
loadTextures();
mInitialized = true;
}
if (mCurrentEffect != NONE || mCustomEffect != null) {
//if an effect is chosen initialize it and apply it to the texture
initEffect();
applyEffect();
}
renderResult();
if (isSaveImage) {
final Bitmap mFilterBitmap = BitmapUtil.createBitmapFromGLSurface(this, gl);
Log.e(TAG, "onDrawFrame: " + mFilterBitmap);
isSaveImage = false;
if (mOnSaveBitmap != null) {
new Handler(Looper.getMainLooper()).post(new Runnable() {
#Override
public void run() {
mOnSaveBitmap.onBitmapReady(mFilterBitmap);
}
});
}
}
}
void setFilterEffect(PhotoFilter effect) {
mCurrentEffect = effect;
mCustomEffect = null;
requestRender();
}
void saveBitmap(OnSaveBitmap onSaveBitmap) {
mOnSaveBitmap = onSaveBitmap;
isSaveImage = true;
requestRender();
}
private void loadTextures() {
GLES20.glGenTextures(2, mTextures, 0);
// Load input bitmap
if (mSourceBitmap != null) {
mImageWidth = mSourceBitmap.getWidth();
mImageHeight = mSourceBitmap.getHeight();
mTexRenderer.updateTextureSize(mImageWidth, mImageHeight);
// Upload to texture
GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, mTextures[0]);
GLUtils.texImage2D(GLES20.GL_TEXTURE_2D, 0, mSourceBitmap, 0);
// Set texture parameters
GLToolbox.initTexParams();
}
}
private void initEffect() {
EffectFactory effectFactory = mEffectContext.getFactory();
if (mEffect != null) {
mEffect.release();
}
if (mCustomEffect != null) {
mEffect = effectFactory.createEffect(mCustomEffect.getEffectName());
Map<String, Object> parameters = mCustomEffect.getParameters();
for (Map.Entry<String, Object> param : parameters.entrySet()) {
mEffect.setParameter(param.getKey(), param.getValue());
}
} else {
// Initialize the correct effect based on the selected menu/action item
switch (mCurrentEffect) {
case VIGNETTE:
mEffect = effectFactory.createEffect(EFFECT_VIGNETTE);
mEffect.setParameter("scale", .5f);
break;
}
}
}
private void applyEffect() {
mEffect.apply(mTextures[0], mImageWidth, mImageHeight, mTextures[1]);
}
private void renderResult() {
if (mCurrentEffect != NONE || mCustomEffect != null) {
// if no effect is chosen, just render the original bitmap
mTexRenderer.renderTexture(mTextures[1]);
} else {
// render the result of applyEffect()
mTexRenderer.renderTexture(mTextures[0]);
}
}
}
Screenshot of profiling:Memory increases every time a filter is added
My use case is:
tap on the screen and save the "point" as starting anchor
tap on the screen second time and save the "point" as end anchor
push the button that will move the object from starting to end anchor
I've built my own node that is using ObjectAnimator similar like in the solar system example. My only problem is that I do not know how to determine start and end point for the the evaluator. My first thought was to take the x,y,z from Pose of start and end anchor
Vector3 start = new Vector3(startAnchor.getPose().tx(), startAnchor.getPose().ty(), startAnchor.getPose().tz());
Vector3 end = new Vector3(endAnchor.getPose().tx(), endAnchor.getPose().ty(), endAnchor.getPose().tz());
…
movingAnimation.setObjectValues(startingPoint, endPoint);
movingAnimation.setPropertyName("localPosition");
movingAnimation.setEvaluator(new Vector3Evaluator());
but when I do that animation is done from completely different places.
I haven't found any reference to built-in tools for such operation.
I'm using Sceneform.
So the question is: How to make a fluent animation (a simple slide is enough) from anchor A to anchor B?
I did this in the HelloSceneform sample. I created the first AnchorNode and added the "andy" node as a child. On the next tap, I created the endPosition AnchorNode and started the animation to move to that position.
The thing to remember is that if you are using the positions of objects with a different parent, you want to use worldPosition vs. localPosition.
private void onPlaneTap(HitResult hitResult, Plane plane, MotionEvent motionEvent) {
if (andyRenderable == null) {
return;
}
// Create the Anchor.
Anchor anchor = hitResult.createAnchor();
// Create the starting position.
if (startNode == null) {
startNode = new AnchorNode(anchor);
startNode.setParent(arFragment.getArSceneView().getScene());
// Create the transformable andy and add it to the anchor.
andy = new Node();
andy.setParent(startNode);
andy.setRenderable(andyRenderable);
} else {
// Create the end position and start the animation.
endNode = new AnchorNode(anchor);
endNode.setParent(arFragment.getArSceneView().getScene());
startWalking();
}
}
private void startWalking() {
objectAnimation = new ObjectAnimator();
objectAnimation.setAutoCancel(true);
objectAnimation.setTarget(andy);
// All the positions should be world positions
// The first position is the start, and the second is the end.
objectAnimation.setObjectValues(andy.getWorldPosition(), endNode.getWorldPosition());
// Use setWorldPosition to position andy.
objectAnimation.setPropertyName("worldPosition");
// The Vector3Evaluator is used to evaluator 2 vector3 and return the next
// vector3. The default is to use lerp.
objectAnimation.setEvaluator(new Vector3Evaluator());
// This makes the animation linear (smooth and uniform).
objectAnimation.setInterpolator(new LinearInterpolator());
// Duration in ms of the animation.
objectAnimation.setDuration(500);
objectAnimation.start();
}
/**
* This is an example activity that uses the Sceneform UX package to make common AR tasks easier.
*/
public class MainActivity extends AppCompatActivity {
private static final String TAG = MainActivity.class.getSimpleName();
private static final double MIN_OPENGL_VERSION = 3.1;
Session mSession;
private ArFragment arFragment;
private ArSceneView arSceneView;
private ModelRenderable andyRenderable;
private boolean shouldConfigureSession = false;
private boolean modelAdded = false;
private ObjectAnimator objectAnimation;
private TransformableNode andy;
private AnchorNode endNode;
private GestureDetector trackableGestureDetector;
/**
* Returns false and displays an error message if Sceneform can not run, true if Sceneform can run
* on this device.
* <p>
* <p>Sceneform requires Android N on the device as well as OpenGL 3.1 capabilities.
* <p>
* <p>Finishes the activity if Sceneform can not run
*/
public static boolean checkIsSupportedDeviceOrFinish(final Activity activity) {
if (Build.VERSION.SDK_INT < VERSION_CODES.N) {
Log.e(TAG, "Sceneform requires Android N or later");
Toast.makeText(activity, "Sceneform requires Android N or later", Toast.LENGTH_LONG).show();
activity.finish();
return false;
}
String openGlVersionString =
((ActivityManager) activity.getSystemService(Context.ACTIVITY_SERVICE))
.getDeviceConfigurationInfo()
.getGlEsVersion();
if (Double.parseDouble(openGlVersionString) < MIN_OPENGL_VERSION) {
Log.e(TAG, "Sceneform requires OpenGL ES 3.1 later");
Toast.makeText(activity, "Sceneform requires OpenGL ES 3.1 or later", Toast.LENGTH_LONG)
.show();
activity.finish();
return false;
}
return true;
}
#Override
#SuppressWarnings({"AndroidApiChecker", "FutureReturnValueIgnored"})
// CompletableFuture requires api level 24
// FutureReturnValueIgnored is not valid
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
if (!checkIsSupportedDeviceOrFinish(this)) {
return;
}
setContentView(R.layout.activity_main);
ActivityCompat.requestPermissions(this, new String[]{Manifest.permission.READ_EXTERNAL_STORAGE}, 105);
arFragment = (ArFragment) getSupportFragmentManager().findFragmentById(R.id.ux_fragment);
if (arFragment != null) {
arFragment.getPlaneDiscoveryController().hide();
arFragment.getPlaneDiscoveryController().setInstructionView(null);
}
arSceneView = arFragment.getArSceneView();
arSceneView.getScene().addOnUpdateListener((this::onUpdateFrame));
arFragment.getArSceneView().getScene().addOnPeekTouchListener(this::handleOnTouch);
this.trackableGestureDetector = new GestureDetector(this, new GestureDetector.SimpleOnGestureListener() {
public boolean onSingleTapUp(MotionEvent e) {
onSingleTap(e);
return true;
}
public boolean onDown(MotionEvent e) {
return true;
}
});
// When you build a Renderable, Sceneform loads its resources in the background while returning
// a CompletableFuture. Call thenAccept(), handle(), or check isDone() before calling get().
File file = new File(Environment.getExternalStorageDirectory(), "model.sfb");
Uri photoURI = Uri.fromFile(file);
Callable callable = () -> (InputStream) new FileInputStream(file);
FutureTask task = new FutureTask<>(callable);
new Thread(task).start();
ModelRenderable.builder()
.setSource(this, R.raw.model) //.setSource(this, callable)
.build()
.thenAccept(renderable -> andyRenderable = renderable)
.exceptionally(
throwable -> {
Toast toast =
Toast.makeText(this, "Unable to load andy renderable", Toast.LENGTH_LONG);
toast.setGravity(Gravity.CENTER, 0, 0);
toast.show();
return null;
});
arFragment.setOnTapArPlaneListener(
(HitResult hitResult, Plane plane, MotionEvent motionEvent) -> {
if (andyRenderable == null) {
return;
}
if (modelAdded) {
endNode = new AnchorNode(hitResult.createAnchor());
endNode.setParent(arFragment.getArSceneView().getScene());
startWalking();
}
});
}
private void handleOnTouch(HitTestResult hitTestResult, MotionEvent motionEvent) {
// First call ArFragment's listener to handle TransformableNodes.
arFragment.onPeekTouch(hitTestResult, motionEvent);
// Check for touching a Sceneform node
if (hitTestResult.getNode() != null) {
return;
}
// Otherwise call gesture detector.
trackableGestureDetector.onTouchEvent(motionEvent);
}
private void onSingleTap(MotionEvent motionEvent) {
Frame frame = arFragment.getArSceneView().getArFrame();
if (frame != null && motionEvent != null && frame.getCamera().getTrackingState() == TrackingState.TRACKING) {
for (HitResult hit : frame.hitTest(motionEvent)) {
Trackable trackable = hit.getTrackable();
if (trackable instanceof Plane && ((Plane) trackable).isPoseInPolygon(hit.getHitPose())) {
Plane plane = (Plane) trackable;
endNode = new AnchorNode(plane.createAnchor(plane.getCenterPose()));
endNode.setParent(arFragment.getArSceneView().getScene());
startWalking();
// Handle plane hits.
break;
} else if (trackable instanceof Point) {
// Handle point hits
Point point = (Point) trackable;
endNode = new AnchorNode(point.createAnchor(hit.getHitPose()));
endNode.setParent(arFragment.getArSceneView().getScene());
startWalking();
} else if (trackable instanceof AugmentedImage) {
// Handle image hits.
AugmentedImage image = (AugmentedImage) trackable;
endNode = new AnchorNode(image.createAnchor(image.getCenterPose()));
endNode.setParent(arFragment.getArSceneView().getScene());
startWalking();
}
}
}
}
private void startWalking() {
objectAnimation = new ObjectAnimator();
objectAnimation.setAutoCancel(true);
objectAnimation.setTarget(andy);
// All the positions should be world positions
// The first position is the start, and the second is the end.
objectAnimation.setObjectValues(andy.getWorldPosition(), endNode.getWorldPosition());
// Use setWorldPosition to position andy.
objectAnimation.setPropertyName("worldPosition");
// The Vector3Evaluator is used to evaluator 2 vector3 and return the next
// vector3. The default is to use lerp.
objectAnimation.setEvaluator(new Vector3Evaluator());
// This makes the animation linear (smooth and uniform).
objectAnimation.setInterpolator(new LinearInterpolator());
// Duration in ms of the animation.
objectAnimation.setDuration(500);
objectAnimation.start();
}
private void configureSession() {
Config config = new Config(mSession);
if (!setupAugmentedImageDb(config)) {
Toast.makeText(this, "Could not setup augmented", Toast.LENGTH_SHORT).show();
}
config.setUpdateMode(Config.UpdateMode.LATEST_CAMERA_IMAGE);
mSession.configure(config);
}
#Override
public void onPause() {
super.onPause();
if (mSession != null) {
// Note that the order matters - GLSurfaceView is paused first so that it does not try
// to query the session. If Session is paused before GLSurfaceView, GLSurfaceView may
// still call session.update() and get a SessionPausedException.
arSceneView.pause();
mSession.pause();
}
}
#Override
protected void onResume() {
super.onResume();
if (mSession == null) {
String message = null;
Exception exception = null;
try {
mSession = new Session(this);
} catch (UnavailableArcoreNotInstalledException
e) {
message = "Please install ARCore";
exception = e;
} catch (UnavailableApkTooOldException e) {
message = "Please update ARCore";
exception = e;
} catch (UnavailableSdkTooOldException e) {
message = "Please update this app";
exception = e;
} catch (Exception e) {
message = "This device does not support AR";
exception = e;
}
if (message != null) {
Toast.makeText(this, message, Toast.LENGTH_SHORT).show();
Log.e(TAG, "Exception creating session", exception);
return;
}
shouldConfigureSession = true;
}
if (shouldConfigureSession) {
configureSession();
shouldConfigureSession = false;
arSceneView.setupSession(mSession);
}
}
private void onUpdateFrame(FrameTime frameTime) {
Frame frame = arSceneView.getArFrame();
Collection<AugmentedImage> updatedAugmentedImages =
frame.getUpdatedTrackables(AugmentedImage.class);
Log.d("size----", String.valueOf(updatedAugmentedImages.size()));
for (AugmentedImage augmentedImage : updatedAugmentedImages) {
if (augmentedImage.getTrackingState() == TrackingState.TRACKING) {
// Check camera image matches our reference image
if (augmentedImage.getName().contains("car")) {
if (!modelAdded) {
modelAdded = true;
Anchor anchor = augmentedImage.createAnchor(augmentedImage.getCenterPose());
AnchorNode anchorNode = new AnchorNode(anchor);
anchorNode.setParent(arFragment.getArSceneView().getScene());
// Create the transformable andy and add it to the anchor.
andy = new TransformableNode(arFragment.getTransformationSystem());
andy.setParent(anchorNode);
andy.setRenderable(andyRenderable);
andy.select();
}
}
}
}
}
private boolean setupAugmentedImageDb(Config config) {
AugmentedImageDatabase augmentedImageDatabase;
Bitmap augmentedImageBitmap = loadAugmentedImage();
if (augmentedImageBitmap == null) {
return false;
}
augmentedImageDatabase = new AugmentedImageDatabase(mSession);
augmentedImageDatabase.addImage("car", augmentedImageBitmap);
config.setAugmentedImageDatabase(augmentedImageDatabase);
return true;
}
private Bitmap loadAugmentedImage() {
try (InputStream is = getAssets().open("car.jpeg")) {
return BitmapFactory.decodeStream(is);
} catch (IOException e) {
Log.e(TAG, "IO exception loading augmented image bitmap.", e);
}
return null;
}
}
I've been trying to route the images from the camera to an ImageReader so that I can manipulate the images directly using the Camera2 API. When I have the capture session stream to a SurfaceView, the stream works just fine. When I then set the capture session stream to my ImageReader, I notice that the images are somehow invalid.
In my ImageReader's OnImageAvailable callback function, I pull the next available Image and try to read it. This is where I have the problem. The Image isn't null and the planes are there, but the planes' buffers are null at first. When I try to grab the buffers, they are suddenly not null, but trying to read from them crashes the app without a stack trace. Further, the pixel and row strides in the planes are set to 0. The width and height of the image are properly set, though.
Therefore, I think that I'm not setting my ImageReader up correctly. The question is then what am I not doing correctly?
Code:
public class CompatibleCamera {
private static final int CAMERA2_API_LEVEL = 23;
public static final int FORMAT_RAW = ImageFormat.RAW_SENSOR;
public static final int FORMAT_JPEG = ImageFormat.JPEG;
private static final int MAX_IMAGES = 2;
// Interface for the user to use. User supplies the function to manipulate the image
public interface ImageTransform
{
void doTransform(Image image);
}
//***********Camera 2 API Members***********
// The camera2 API CameraManager. Used to access the camera device
private CameraManager mCamera2Manager;
// The information used by the device to reference the camera. Not a camera object itself
private CameraDevice mCamera2Device;
private String mCamera2DeviceID = "";
// The class that allows us to get the camera's image
private ImageReader mImageReader;
// This listener is where we have the programmer deal with the image. Just edit the interface
private ImageReader.OnImageAvailableListener mListener;
// This is the thread for the handler. It keeps it off the UI thread so we don't block the GUI
private HandlerThread mCameraCaptureHandlerThread;
// This runs in the background and handles the camera feed, activating the OnImageAvailableListener
private Handler mCameraCaptureHandler;
private HandlerThread mImageAvailableHandlerThread;
// This runs in the background and handles the camera feed, activating the OnImageAvailableListener
private Handler mImageAvailableHandler;
// This object is the camera feed, essentially. We store it so we can properly close it later
private CameraCaptureSession cameraCaptureSession;
// DEBUG
private boolean TEST_SURFACE_VIEW = false;
private Surface dbSurface;
// Mutex lock. Locks and unlocks when the ImageReader is pulling and processing an image
private Semaphore imageReaderLock = new Semaphore(1);
//***********Common Members***********
// The context of the activity holding this object
private Context mContext;
// Our ImageTransform implementation to alter the image as it comes in
private ImageTransform mTransform;
private int iImageFormat= FORMAT_RAW;
//==========Methods==========
public CompatibleCamera(Context context, ImageTransform transform, int imageFormat)
{
mContext = context;
mTransform = transform;
mListener = new ImageReader.OnImageAvailableListener() {
#Override
public void onImageAvailable(ImageReader imageReader) {
try {
imageReaderLock.acquire();
Image image = imageReader.acquireNextImage();
//<--------------Problem With Image is Here-------------->
mTransform.doTransform(image);
image.close();
imageReaderLock.release();
}
catch(InterruptedException ex)
{
ex.printStackTrace();
}
}
};
}
private boolean camera2GetManager()
{
//----First, get the CameraManager and a Camera Device----
mCamera2Manager = (CameraManager) mContext.getSystemService(Context.CAMERA_SERVICE);
if (mCamera2Manager == null) {
System.out.println(" DEBUG: Manager is null");
return false;
}
else {
System.out.println(" DEBUG: Camera Manager obtained");
try {
String[] cameraIDs = mCamera2Manager.getCameraIdList();
for (String cameraID : cameraIDs) {
CameraCharacteristics cameraCharacteristics = mCamera2Manager.getCameraCharacteristics(cameraID);
if (cameraCharacteristics.get(CameraCharacteristics.LENS_FACING) ==
CameraCharacteristics.LENS_FACING_BACK) {
mCamera2DeviceID = cameraID;
break;
}
}
if (mCamera2DeviceID.equals("")) {
System.out.println("No back camera, exiting");
return false;
}
System.out.println(" DEBUG: Camera Device obtained");
// Open the Camera Device
} catch (Exception ex) {
ex.printStackTrace();
return false;
}
return camera2OpenCamera();
}
}
private boolean camera2SetupImageReader()
{
// Get the largest image size available
CameraCharacteristics cameraCharacteristics;
try {
cameraCharacteristics= mCamera2Manager.getCameraCharacteristics(mCamera2DeviceID);
} catch(Exception e) {
e.printStackTrace();
return false;
}
StreamConfigurationMap map = cameraCharacteristics.get(
CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);
Size largestSize = Collections.max(
Arrays.asList(map.getOutputSizes(iImageFormat)),
new CompareSizesByArea());
// Set up the handler
mCameraCaptureHandlerThread = new HandlerThread("cameraCaptureHandlerThread");
mCameraCaptureHandlerThread.start();
mCameraCaptureHandler = new Handler(mCameraCaptureHandlerThread.getLooper());
mImageAvailableHandlerThread = new HandlerThread("imageReaderHandlerThread");
mImageAvailableHandlerThread.start();
mImageAvailableHandler = new Handler(mImageAvailableHandlerThread.getLooper());
mImageReader = ImageReader.newInstance( largestSize.getWidth(),
largestSize.getHeight(),
iImageFormat,
MAX_IMAGES);
mImageReader.setOnImageAvailableListener(mListener, mImageAvailableHandler);
// This callback is used to asynchronously set up the capture session on our end
final CameraCaptureSession.StateCallback captureStateCallback = new CameraCaptureSession.StateCallback() {
// When configured, set the target surface
#Override
public void onConfigured(#NonNull CameraCaptureSession session) {
try
{
CaptureRequest.Builder requestBuilder = session.getDevice().createCaptureRequest(CameraDevice.TEMPLATE_RECORD);
if (TEST_SURFACE_VIEW)
requestBuilder.addTarget(dbSurface);
else
requestBuilder.addTarget(mImageReader.getSurface());
//set to null - image data will be produced but will not receive metadata
session.setRepeatingRequest(requestBuilder.build(), null, mCameraCaptureHandler);
cameraCaptureSession = session;
}
catch (Exception ex)
{
ex.printStackTrace();
}
}
#Override
public void onConfigureFailed(#NonNull CameraCaptureSession cameraCaptureSession) {
System.out.println("Failed to configure the capture session :(");
}
};
ArrayList<Surface> surfaces = new ArrayList<>();
if (TEST_SURFACE_VIEW)
surfaces.add(dbSurface);
else
surfaces.add(mImageReader.getSurface());
try
{
mCamera2Device.createCaptureSession(surfaces, captureStateCallback, mCameraCaptureHandler);
}
catch(Exception ex)
{
ex.printStackTrace();
}
return true;
}
}
RAW_SENSOR is a special beast of formats.
General raw camera sensor image format, usually representing a single-channel Bayer-mosaic image. Each pixel color sample is stored with 16 bits of precision.
The layout of the color mosaic, the maximum and minimum encoding values of the raw pixel data, the color space of the image, and all other needed information to interpret a raw sensor image must be queried from the android.hardware.camera2.CameraDevice which produced the image.
You should not attempt to use its stride info directly, as if it were a YUV frame.
I have developed an application for barcode decoding in android using Google vision Library for GS1 data matrix and Zbar Library for GS1 128 barcode Unable to read FNC1 character at the first position of a GS1 128 Barcode using Zbar library.
The Zbar library is unable to display any sign of FNC1 character at the start of the Barcode!
Any Solutions. . . .
Instant Help is appreciable . . .
Below is my ZBar Scanner Activity
#SuppressWarnings("deprecation")
public class ZBarFirstScannerActivity extends AppCompatActivity{
//TextView tv;
ImageView iv;
LinearLayout ll;
private Camera mCamera;
private CameraPreview mPreview;
private Handler autoFocusHandler;
private ImageScanner scanner;
private boolean barcodeScanned = false;
private boolean previewing = true;
TextView tv;
static {
System.loadLibrary("iconv");
}
static {
System.loadLibrary("zbarjni");
}
public void onCreate(Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
setContentView(R.layout.barcode_capture1d);
tv = (TextView) findViewById(R.id.textVertical);
tv.setRotation(90);
initToolbar();
autoFocusHandler = new Handler();
mCamera = getCameraInstance();
// Instance barcode scanner
scanner = new ImageScanner();
scanner.setConfig(0, Config.X_DENSITY, 1);
scanner.setConfig(0, Config.Y_DENSITY, 1);
scanner.setConfig(Symbol.CODE128, Config.ENABLE,1);
scanner.setConfig(Symbol.EAN13, Config.ENABLE,1);
mPreview = new CameraPreview(this, mCamera, previewCb, autoFocusCB);
FrameLayout preview = (FrameLayout)findViewById(R.id.cameraPreview);
preview.addView(mPreview);
}
private void initToolbar() {
final Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
final ActionBar actionBar = getSupportActionBar();
if (actionBar != null) {
actionBar.setHomeButtonEnabled(true);
actionBar.setHomeAsUpIndicator(ContextCompat.getDrawable(this, R.drawable.abc_ic_ab_back_mtrl_am_alpha));
actionBar.setDisplayHomeAsUpEnabled(true);
}
}
/** A safe way to get an instance of the Camera object. */
public static Camera getCameraInstance()
{
Camera c = null;
try
{
c = Camera.open();
} catch (Exception e)
{
//nada
}
return c;
}
private void releaseCamera()
{
if (mCamera != null)
{
previewing = false;
mCamera.setPreviewCallback(null);
mCamera.release();
mCamera = null;
}
}
PreviewCallback previewCb = new PreviewCallback()
{
public void onPreviewFrame(byte[] data, Camera camera)
{
Camera.Parameters parameters = camera.getParameters();
Size size = parameters.getPreviewSize();
Image barcode = new Image(size.width, size.height, "Y800");
barcode.setData(data);
int result = scanner.scanImage(barcode);
if (result != 0)
{
previewing = false;
mCamera.setPreviewCallback(null);
mCamera.stopPreview();
SymbolSet syms = scanner.getResults();
for (Symbol sym : syms)
{
barcodeScanned = true;
Intent returnIntent = new Intent();
returnIntent.putExtra("BARCODE", sym.getData());
setResult(MainActivity.BAR_CODE_TYPE_128,returnIntent);
releaseCamera();
finish();
break;
}
}
}
};
// Mimic continuous auto-focusing
AutoFocusCallback autoFocusCB = new AutoFocusCallback()
{
public void onAutoFocus(boolean success, Camera camera)
{
autoFocusHandler.postDelayed(doAutoFocus, 3000);
}
};
private Runnable doAutoFocus = new Runnable()
{
public void run()
{
if (previewing)
mCamera.autoFocus(autoFocusCB);
}
};
public void onPause() {
super.onPause();
releaseCamera();
}
public void onResume(){
super.onResume();
new ZBarFirstScannerActivity();
}
#Override
public void onBackPressed() {
releaseCamera();
finish();
}
#Override
public boolean onOptionsItemSelected(MenuItem item) {
int id = item.getItemId();
if (id == android.R.id.home) {
onBackPressed();
return true;
}
return super.onOptionsItemSelected(item);
}
}
Below is my Google Scanner Activity
public final class GoogleScannerActivity extends AppCompatActivity {
private static final String TAG = "Barcode-reader";
// intent request code to handle updating play services if needed.
private static final int RC_HANDLE_GMS = 9001;
// permission request codes need to be < 256
private static final int RC_HANDLE_CAMERA_PERM = 2;
// constants used to pass extra data in the intent
public static final String AutoFocus = "AutoFocus";
public static final String UseFlash = "UseFlash";
public static final String BarcodeObject = "Barcode";
Bitmap bmp;
FileOutputStream fos = null;
private Camera c;
Switch aSwitch;
private CameraSource mCameraSource;
private CameraSourcePreview mPreview;
private GraphicOverlay<BarcodeGraphic> mGraphicOverlay;
// helper objects for detecting taps and pinches.
private ScaleGestureDetector scaleGestureDetector;
private GestureDetector gestureDetector;
/**
* Initializes the UI and creates the detector pipeline.
*/
#Override
public void onCreate(Bundle icicle) {
super.onCreate(icicle);
setContentView(R.layout.barcode_capture2d);
initToolbar();
ActivitySource.caller = this;
mPreview = (CameraSourcePreview) findViewById(R.id.preview);
mGraphicOverlay = (GraphicOverlay<BarcodeGraphic>) findViewById(R.id.graphicOverlay);
boolean autoFocus = true;
boolean useFlash = false;
// Check for the camera permission before accessing the camera. If the
// permission is not granted yet, request permission.
int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA);
if (rc == PackageManager.PERMISSION_GRANTED) {
createCameraSource(autoFocus, useFlash);
} else {
requestCameraPermission();
}
gestureDetector = new GestureDetector(this, new CaptureGestureListener());
scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener());
/*Snackbar.make(mGraphicOverlay, "Tap to capture. Pinch/Stretch to zoom",
Snackbar.LENGTH_LONG)
.show();*/
}
private void initToolbar() {
final Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
final ActionBar actionBar = getSupportActionBar();
if (actionBar != null) {
actionBar.setHomeButtonEnabled(true);
actionBar.setHomeAsUpIndicator(ContextCompat.getDrawable(this, R.drawable.abc_ic_ab_back_mtrl_am_alpha));
actionBar.setDisplayHomeAsUpEnabled(true);
}
}
private Camera.Size getBestPreviewSize(int width, int height, Camera.Parameters parameters){
Camera.Size bestSize = null;
List<Camera.Size> sizeList = parameters.getSupportedPreviewSizes();
bestSize = sizeList.get(0);
for(int i = 1; i < sizeList.size(); i++){
if((sizeList.get(i).width * sizeList.get(i).height) >
(bestSize.width * bestSize.height)){
bestSize = sizeList.get(i);
}
}
return bestSize;
}
/**
* Handles the requesting of the camera permission. This includes
* showing a "Snackbar" message of why the permission is needed then
* sending the request.
*/
private void requestCameraPermission() {
Log.w(TAG, "Camera permission is not granted. Requesting permission");
final String[] permissions = new String[]{Manifest.permission.CAMERA};
if (!ActivityCompat.shouldShowRequestPermissionRationale(this,
Manifest.permission.CAMERA)) {
ActivityCompat.requestPermissions(this, permissions, RC_HANDLE_CAMERA_PERM);
return;
}
final Activity thisActivity = this;
View.OnClickListener listener = new View.OnClickListener() {
#Override
public void onClick(View view) {
ActivityCompat.requestPermissions(thisActivity, permissions,
RC_HANDLE_CAMERA_PERM);
}
};
Snackbar.make(mGraphicOverlay, R.string.permission_camera_rationale,
Snackbar.LENGTH_INDEFINITE)
.setAction(R.string.ok, listener)
.show();
}
#Override
public boolean onTouchEvent(MotionEvent e) {
boolean b = scaleGestureDetector.onTouchEvent(e);
boolean c = gestureDetector.onTouchEvent(e);
return b || c || super.onTouchEvent(e);
}
/**
* Creates and starts the camera. Note that this uses a higher resolution in comparison
* to other detection examples to enable the barcode detector to detect small barcodes
* at long distances.
*
* Suppressing InlinedApi since there is a check that the minimum version is met before using
* the constant.
*/
#SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash) {
Context context = getApplicationContext();
// A barcode detector is created to track barcodes. An associated multi-processor instance
// is set to receive the barcode detection results, track the barcodes, and maintain
// graphics for each barcode on screen. The factory is used by the multi-processor to
// create a separate tracker instance for each barcode.
BarcodeDetector barcodeDetector = new BarcodeDetector.Builder(context).setBarcodeFormats(Barcode.CODE_128 | Barcode.DATA_MATRIX | Barcode.QR_CODE).build();
BarcodeTrackerFactory barcodeFactory = new BarcodeTrackerFactory(mGraphicOverlay);
barcodeDetector.setProcessor(
new MultiProcessor.Builder<>(barcodeFactory).build());
if (!barcodeDetector.isOperational()) {
// Note: The first time that an app using the barcode or face API is installed on a
// device, GMS will download a native libraries to the device in order to do detection.
// Usually this completes before the app is run for the first time. But if that
// download has not yet completed, then the above call will not detect any barcodes
// and/or faces.
//
// isOperational() can be used to check if the required native libraries are currently
// available. The detectors will automatically become operational once the library
// downloads complete on device.
Log.w(TAG, "Detector dependencies are not yet available.");
// Check for low storage. If there is low storage, the native library will not be
// downloaded, so detection will not become operational.
IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;
if (hasLowStorage) {
Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
Log.w(TAG, getString(R.string.low_storage_error));
}
}
// Creates and starts the camera. Note that this uses a higher resolution in comparison
// to other detection examples to enable the barcode detector to detect small barcodes
// at long distances.
CameraSource.Builder builder = new CameraSource.Builder(getApplicationContext(), barcodeDetector)
.setFacing(CameraSource.CAMERA_FACING_BACK)
.setRequestedPreviewSize(1100, 844)
.setRequestedFps(15.0f);
// make sure that auto focus is an available option
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
builder = builder.setFocusMode(
autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null);
}
mCameraSource = builder
.setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null)
.build();
}
/**
* Restarts the camera.
*/
#Override
protected void onResume() {
super.onResume();
startCameraSource();
}
/**
* Stops the camera.
*/
#Override
protected void onPause() {
super.onPause();
if (mPreview != null) {
mPreview.stop();
}
}
/**
* Releases the resources associated with the camera source, the associated detectors, and the
* rest of the processing pipeline.
*/
#Override
protected void onDestroy() {
super.onDestroy();
if (mPreview != null) {
mPreview.release();
}
}
#Override
public void onRequestPermissionsResult(int requestCode,
#NonNull String[] permissions,
#NonNull int[] grantResults) {
if (requestCode != RC_HANDLE_CAMERA_PERM) {
Log.d(TAG, "Got unexpected permission result: " + requestCode);
super.onRequestPermissionsResult(requestCode, permissions, grantResults);
return;
}
if (grantResults.length != 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) {
Log.d(TAG, "Camera permission granted - initialize the camera source");
// we have permission, so create the camerasource
boolean autoFocus = getIntent().getBooleanExtra(AutoFocus,false);
boolean useFlash = getIntent().getBooleanExtra(UseFlash, false);
createCameraSource(autoFocus, useFlash);
return;
}
Log.e(TAG, "Permission not granted: results len = " + grantResults.length +
" Result code = " + (grantResults.length > 0 ? grantResults[0] : "(empty)"));
DialogInterface.OnClickListener listener = new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int id) {
finish();
}
};
AlertDialog.Builder builder = new AlertDialog.Builder(this);
builder.setTitle("Multitracker sample")
.setMessage(R.string.no_camera_permission)
.setPositiveButton(R.string.ok, listener)
.show();
}
/**
* Starts or restarts the camera source, if it exists. If the camera source doesn't exist yet
* (e.g., because onResume was called before the camera source was created), this will be called
* again when the camera source is created.
*/
private void startCameraSource() throws SecurityException {
// check that the device has play services available.
int code = GoogleApiAvailability.getInstance().isGooglePlayServicesAvailable(
getApplicationContext());
if (code != ConnectionResult.SUCCESS) {
Dialog dlg =
GoogleApiAvailability.getInstance().getErrorDialog(this, code, RC_HANDLE_GMS);
dlg.show();
}
if (mCameraSource != null) {
try {
mPreview.start(mCameraSource, mGraphicOverlay);
} catch (IOException e) {
Log.e(TAG, "Unable to start camera source.", e);
mCameraSource.release();
mCameraSource = null;
}
}
}
/**
* onTap is called to capture the oldest barcode currently detected and
* return it to the caller.
*
* #param rawX - the raw position of the tap
* #param rawY - the raw position of the tap.
* #return true if the activity is ending.
*/
private boolean onTap(float rawX, float rawY) {
//TODO: use the tap position to select the barcode.
BarcodeGraphic graphic = mGraphicOverlay.getFirstGraphic();
Barcode barcode = null;
if (graphic != null) {
barcode = graphic.getBarcode();
if (barcode != null) {
Intent data = new Intent();
data.putExtra(BarcodeObject, barcode);
setResult(CommonStatusCodes.SUCCESS, data);
finish();
}
else {
Log.d(TAG, "barcode data is null");
}
}
else {
Log.d(TAG,"no barcode detected");
}
return barcode != null;
}
private class CaptureGestureListener extends GestureDetector.SimpleOnGestureListener {
#Override
public boolean onSingleTapConfirmed(MotionEvent e) {
return onTap(e.getRawX(), e.getRawY()) || super.onSingleTapConfirmed(e);
}
}
private class ScaleListener implements ScaleGestureDetector.OnScaleGestureListener {
/**
* Responds to scaling events for a gesture in progress.
* Reported by pointer motion.
*
* #param detector The detector reporting the event - use this to
* retrieve extended info about event state.
* #return Whether or not the detector should consider this event
* as handled. If an event was not handled, the detector
* will continue to accumulate movement until an event is
* handled. This can be useful if an application, for example,
* only wants to update scaling factors if the change is
* greater than 0.01.
*/
#Override
public boolean onScale(ScaleGestureDetector detector) {
return false;
}
/**
* Responds to the beginning of a scaling gesture. Reported by
* new pointers going down.
*
* #param detector The detector reporting the event - use this to
* retrieve extended info about event state.
* #return Whether or not the detector should continue recognizing
* this gesture. For example, if a gesture is beginning
* with a focal point outside of a region where it makes
* sense, onScaleBegin() may return false to ignore the
* rest of the gesture.
*/
#Override
public boolean onScaleBegin(ScaleGestureDetector detector) {
return true;
}
/**
* Responds to the end of a scale gesture. Reported by existing
* pointers going up.
* <p/>
* Once a scale has ended, {#link ScaleGestureDetector#getFocusX()}
* and {#link ScaleGestureDetector#getFocusY()} will return focal point
* of the pointers remaining on the screen.
*
* #param detector The detector reporting the event - use this to
* retrieve extended info about event state.
*/
#Override
public void onScaleEnd(ScaleGestureDetector detector) {
mCameraSource.doZoom(detector.getScaleFactor());
}
}
#Override
public boolean onOptionsItemSelected(MenuItem item) {
int id = item.getItemId();
if (id == android.R.id.home) {
onBackPressed();
return true;
}
return super.onOptionsItemSelected(item);
}
}
When scanning a GS1-128 symbol the FNC1 in first position acts as a flag character to indicate the presence of GS1 Application Identifier standard format data and is intentionally omitted from the scanned data, whilst any inter-field FNC1 formatting character is transmitted at GS (ASCII 29).
The implicit, leading FNC1 can be inferred if your reader is configured to emit symbology identifiers at the start of the scanned data. In this case your GS1-128 scanned data will begin with ]C1 rather than ]C0 for generic Code 128.
Unfortunately it does not appear as though either the ZBar library or Google Vision Library can be configured to return symbology identifiers which is a disappointing limitation.
Additionally the Google Vision Library erroneously returns a leading GS1 representing the FNC1 in first position.
Reading of GS1 formatted data is described in detail by this answer.
Specifically the ISO/IEC 15417 - Code 128 bar code symbology specification says:
"Any application which utilizes Code
128 symbols with FNC1 in the first or second data position should
require the transmission of symbology identifiers to be enabled. When
FNC1 is used in the first or second position it shall not be
represented in the transmitted message, although its presence is
indicated by the use of modifier values 1 or 2 respectively in the
symbology identifier."
I do have a Barcode Decoding Applcation : https://play.google.com/store/apps/details?id=com.barcodereader
I have Used Zbar library and Google Vision API for Scanning
Now what i want is while scanning the Barcode if user taps on a button at appbar for turning on Torch (Flash) then it should be turn on and off vice-versa.
But the Problem is the camera is already on with its all parameters so when user taps the button to turn on torch we need to interrupt the ongoing Camera parameters and I don't want to do that,
I am searching the other way to get torch on without changing the existing Camera Parameters..
Below is The Camera Activity of ZBar And Google Vision this both use some other Camera Classes for Camera Preview.
#SuppressWarnings("deprecation")
public class ZBarFirstScannerActivity extends AppCompatActivity{
//TextView tv;
ImageView iv;
LinearLayout ll;
private Camera mCamera;
private CameraPreview mPreview;
private Handler autoFocusHandler;
private ImageScanner scanner;
private boolean barcodeScanned = false;
private boolean previewing = true;
TextView tv;
static {
System.loadLibrary("iconv");
}
static {
System.loadLibrary("zbarjni");
}
public void onCreate(Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
setContentView(R.layout.barcode_capture1d);
tv = (TextView) findViewById(R.id.textVertical);
tv.setRotation(90);
initToolbar();
autoFocusHandler = new Handler();
mCamera = getCameraInstance();
// Instance barcode scanner
scanner = new ImageScanner();
scanner.setConfig(0, Config.X_DENSITY, 1);
scanner.setConfig(0, Config.Y_DENSITY, 1);
scanner.setConfig(Symbol.CODE128, Config.ENABLE,1);
scanner.setConfig(Symbol.EAN13, Config.ENABLE,1);
mPreview = new CameraPreview(this, mCamera, previewCb, autoFocusCB);
FrameLayout preview = (FrameLayout)findViewById(R.id.cameraPreview);
preview.addView(mPreview);
}
private void initToolbar() {
final Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
final ActionBar actionBar = getSupportActionBar();
if (actionBar != null) {
actionBar.setHomeButtonEnabled(true);
actionBar.setHomeAsUpIndicator(ContextCompat.getDrawable(this, R.drawable.abc_ic_ab_back_mtrl_am_alpha));
actionBar.setDisplayHomeAsUpEnabled(true);
}
}
/** A safe way to get an instance of the Camera object. */
public static Camera getCameraInstance()
{
Camera c = null;
try
{
c = Camera.open();
} catch (Exception e)
{
//nada
}
return c;
}
private void releaseCamera()
{
if (mCamera != null)
{
previewing = false;
mCamera.setPreviewCallback(null);
mCamera.release();
mCamera = null;
}
}
PreviewCallback previewCb = new PreviewCallback()
{
public void onPreviewFrame(byte[] data, Camera camera)
{
Camera.Parameters parameters = camera.getParameters();
Size size = parameters.getPreviewSize();
Image barcode = new Image(size.width, size.height, "Y800");
barcode.setData(data);
int result = scanner.scanImage(barcode);
if (result != 0)
{
previewing = false;
mCamera.setPreviewCallback(null);
mCamera.stopPreview();
SymbolSet syms = scanner.getResults();
for (Symbol sym : syms)
{
barcodeScanned = true;
Intent returnIntent = new Intent();
returnIntent.putExtra("BARCODE", sym.getData());
setResult(MainActivity.BAR_CODE_TYPE_128,returnIntent);
releaseCamera();
finish();
break;
}
}
}
};
// Mimic continuous auto-focusing
AutoFocusCallback autoFocusCB = new AutoFocusCallback()
{
public void onAutoFocus(boolean success, Camera camera)
{
autoFocusHandler.postDelayed(doAutoFocus, 3000);
}
};
private Runnable doAutoFocus = new Runnable()
{
public void run()
{
if (previewing)
mCamera.autoFocus(autoFocusCB);
}
};
public void onPause() {
super.onPause();
releaseCamera();
}
public void onResume(){
super.onResume();
new ZBarFirstScannerActivity();
}
#Override
public void onBackPressed() {
releaseCamera();
finish();
}
#Override
public boolean onOptionsItemSelected(MenuItem item) {
int id = item.getItemId();
if (id == android.R.id.home) {
onBackPressed();
return true;
}
return super.onOptionsItemSelected(item);
}
}
And GoogleScanner
public final class GoogleScannerActivity extends AppCompatActivity {
private static final String TAG = "Barcode-reader";
// intent request code to handle updating play services if needed.
private static final int RC_HANDLE_GMS = 9001;
// permission request codes need to be < 256
private static final int RC_HANDLE_CAMERA_PERM = 2;
// constants used to pass extra data in the intent
public static final String AutoFocus = "AutoFocus";
public static final String UseFlash = "UseFlash";
public static final String BarcodeObject = "Barcode";
Bitmap bmp;
FileOutputStream fos = null;
private Camera c;
Switch aSwitch;
private CameraSource mCameraSource;
private CameraSourcePreview mPreview;
private GraphicOverlay<BarcodeGraphic> mGraphicOverlay;
// helper objects for detecting taps and pinches.
private ScaleGestureDetector scaleGestureDetector;
private GestureDetector gestureDetector;
/**
* Initializes the UI and creates the detector pipeline.
*/
#Override
public void onCreate(Bundle icicle) {
super.onCreate(icicle);
setContentView(R.layout.barcode_capture2d);
initToolbar();
ActivitySource.caller = this;
mPreview = (CameraSourcePreview) findViewById(R.id.preview);
mGraphicOverlay = (GraphicOverlay<BarcodeGraphic>)findViewById(R.id.graphicOverlay);
boolean autoFocus = true;
boolean useFlash = false;
// Check for the camera permission before accessing the camera. If the
// permission is not granted yet, request permission.
int rc = ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA);
if (rc == PackageManager.PERMISSION_GRANTED) {
createCameraSource(autoFocus, useFlash);
} else {
requestCameraPermission();
}
gestureDetector = new GestureDetector(this, new CaptureGestureListener());
scaleGestureDetector = new ScaleGestureDetector(this, new ScaleListener());
/*Snackbar.make(mGraphicOverlay, "Tap to capture. Pinch/Stretch to zoom",
Snackbar.LENGTH_LONG)
.show();*/
}
private void initToolbar() {
final Toolbar toolbar = (Toolbar) findViewById(R.id.toolbar);
setSupportActionBar(toolbar);
final ActionBar actionBar = getSupportActionBar();
if (actionBar != null) {
actionBar.setHomeButtonEnabled(true);
actionBar.setHomeAsUpIndicator(ContextCompat.getDrawable(this, R.drawable.abc_ic_ab_back_mtrl_am_alpha));
actionBar.setDisplayHomeAsUpEnabled(true);
}
}
private Camera.Size getBestPreviewSize(int width, int height, Camera.Parameters parameters){
Camera.Size bestSize = null;
List<Camera.Size> sizeList = parameters.getSupportedPreviewSizes();
bestSize = sizeList.get(0);
for(int i = 1; i < sizeList.size(); i++){
if((sizeList.get(i).width * sizeList.get(i).height) >
(bestSize.width * bestSize.height)){
bestSize = sizeList.get(i);
}
}
return bestSize;
}
/**
* Handles the requesting of the camera permission. This includes
* showing a "Snackbar" message of why the permission is needed then
* sending the request.
*/
private void requestCameraPermission() {
Log.w(TAG, "Camera permission is not granted. Requesting permission");
final String[] permissions = new String[]{Manifest.permission.CAMERA};
if (!ActivityCompat.shouldShowRequestPermissionRationale(this,
Manifest.permission.CAMERA)) {
ActivityCompat.requestPermissions(this, permissions, RC_HANDLE_CAMERA_PERM);
return;
}
final Activity thisActivity = this;
View.OnClickListener listener = new View.OnClickListener() {
#Override
public void onClick(View view) {
ActivityCompat.requestPermissions(thisActivity, permissions,
RC_HANDLE_CAMERA_PERM);
}
};
Snackbar.make(mGraphicOverlay, R.string.permission_camera_rationale,
Snackbar.LENGTH_INDEFINITE)
.setAction(R.string.ok, listener)
.show();
}
#Override
public boolean onTouchEvent(MotionEvent e) {
boolean b = scaleGestureDetector.onTouchEvent(e);
boolean c = gestureDetector.onTouchEvent(e);
return b || c || super.onTouchEvent(e);
}
/**
* Creates and starts the camera. Note that this uses a higher resolution in comparison
* to other detection examples to enable the barcode detector to detect small barcodes
* at long distances.
*
* Suppressing InlinedApi since there is a check that the minimum version is met before using
* the constant.
*/
#SuppressLint("InlinedApi")
private void createCameraSource(boolean autoFocus, boolean useFlash) {
Context context = getApplicationContext();
// A barcode detector is created to track barcodes. An associated multi-processor instance
// is set to receive the barcode detection results, track the barcodes, and maintain
// graphics for each barcode on screen. The factory is used by the multi-processor to
// create a separate tracker instance for each barcode.
BarcodeDetector barcodeDetector = new BarcodeDetector.Builder(context).setBarcodeFormats(Barcode.CODE_128 | Barcode.DATA_MATRIX | Barcode.QR_CODE).build();
BarcodeTrackerFactory barcodeFactory = new BarcodeTrackerFactory(mGraphicOverlay);
barcodeDetector.setProcessor(
new MultiProcessor.Builder<>(barcodeFactory).build());
if (!barcodeDetector.isOperational()) {
// Note: The first time that an app using the barcode or face API is installed on a
// device, GMS will download a native libraries to the device in order to do detection.
// Usually this completes before the app is run for the first time. But if that
// download has not yet completed, then the above call will not detect any barcodes
// and/or faces.
//
// isOperational() can be used to check if the required native libraries are currently
// available. The detectors will automatically become operational once the library
// downloads complete on device.
Log.w(TAG, "Detector dependencies are not yet available.");
// Check for low storage. If there is low storage, the native library will not be
// downloaded, so detection will not become operational.
IntentFilter lowstorageFilter = new IntentFilter(Intent.ACTION_DEVICE_STORAGE_LOW);
boolean hasLowStorage = registerReceiver(null, lowstorageFilter) != null;
if (hasLowStorage) {
Toast.makeText(this, R.string.low_storage_error, Toast.LENGTH_LONG).show();
Log.w(TAG, getString(R.string.low_storage_error));
}
}
// Creates and starts the camera. Note that this uses a higher resolution in comparison
// to other detection examples to enable the barcode detector to detect small barcodes
// at long distances.
CameraSource.Builder builder = new CameraSource.Builder(getApplicationContext(), barcodeDetector)
.setFacing(CameraSource.CAMERA_FACING_BACK)
.setRequestedPreviewSize(1100, 844)
.setRequestedFps(15.0f);
// make sure that auto focus is an available option
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
builder = builder.setFocusMode(
autoFocus ? Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE : null);
}
mCameraSource = builder
.setFlashMode(useFlash ? Camera.Parameters.FLASH_MODE_TORCH : null)
.build();
}
/**
* Restarts the camera.
*/
#Override
protected void onResume() {
super.onResume();
startCameraSource();
}
/**
* Stops the camera.
*/
#Override
protected void onPause() {
super.onPause();
if (mPreview != null) {
mPreview.stop();
}
}
/**
* Releases the resources associated with the camera source, the associated detectors, and the
* rest of the processing pipeline.
*/
#Override
protected void onDestroy() {
super.onDestroy();
if (mPreview != null) {
mPreview.release();
}
}
#Override
public void onRequestPermissionsResult(int requestCode,
#NonNull String[] permissions,
#NonNull int[] grantResults) {
if (requestCode != RC_HANDLE_CAMERA_PERM) {
Log.d(TAG, "Got unexpected permission result: " + requestCode);
super.onRequestPermissionsResult(requestCode, permissions, grantResults);
return;
}
if (grantResults.length != 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) {
Log.d(TAG, "Camera permission granted - initialize the camera source");
// we have permission, so create the camerasource
boolean autoFocus = getIntent().getBooleanExtra(AutoFocus,false);
boolean useFlash = getIntent().getBooleanExtra(UseFlash, false);
createCameraSource(autoFocus, useFlash);
return;
}
Log.e(TAG, "Permission not granted: results len = " + grantResults.length +
" Result code = " + (grantResults.length > 0 ? grantResults[0] : "(empty)"));
DialogInterface.OnClickListener listener = new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int id) {
finish();
}
};
AlertDialog.Builder builder = new AlertDialog.Builder(this);
builder.setTitle("Multitracker sample")
.setMessage(R.string.no_camera_permission)
.setPositiveButton(R.string.ok, listener)
.show();
}
/**
* Starts or restarts the camera source, if it exists. If the camera source doesn't exist yet
* (e.g., because onResume was called before the camera source was created), this will be called
* again when the camera source is created.
*/
private void startCameraSource() throws SecurityException {
// check that the device has play services available.
int code = GoogleApiAvailability.getInstance().isGooglePlayServicesAvailable(
getApplicationContext());
if (code != ConnectionResult.SUCCESS) {
Dialog dlg =
GoogleApiAvailability.getInstance().getErrorDialog(this, code, RC_HANDLE_GMS);
dlg.show();
}
if (mCameraSource != null) {
try {
mPreview.start(mCameraSource, mGraphicOverlay);
} catch (IOException e) {
Log.e(TAG, "Unable to start camera source.", e);
mCameraSource.release();
mCameraSource = null;
}
}
}
/**
* onTap is called to capture the oldest barcode currently detected and
* return it to the caller.
*
* #param rawX - the raw position of the tap
* #param rawY - the raw position of the tap.
* #return true if the activity is ending.
*/
private boolean onTap(float rawX, float rawY) {
//TODO: use the tap position to select the barcode.
BarcodeGraphic graphic = mGraphicOverlay.getFirstGraphic();
Barcode barcode = null;
if (graphic != null) {
barcode = graphic.getBarcode();
if (barcode != null) {
Intent data = new Intent();
data.putExtra(BarcodeObject, barcode);
setResult(CommonStatusCodes.SUCCESS, data);
finish();
}
else {
Log.d(TAG, "barcode data is null");
}
}
else {
Log.d(TAG,"no barcode detected");
}
return barcode != null;
}
private class CaptureGestureListener extends GestureDetector.SimpleOnGestureListener {
#Override
public boolean onSingleTapConfirmed(MotionEvent e) {
return onTap(e.getRawX(), e.getRawY()) || super.onSingleTapConfirmed(e);
}
}
private class ScaleListener implements ScaleGestureDetector.OnScaleGestureListener {
/**
* Responds to scaling events for a gesture in progress.
* Reported by pointer motion.
*
* #param detector The detector reporting the event - use this to
* retrieve extended info about event state.
* #return Whether or not the detector should consider this event
* as handled. If an event was not handled, the detector
* will continue to accumulate movement until an event is
* handled. This can be useful if an application, for example,
* only wants to update scaling factors if the change is
* greater than 0.01.
*/
#Override
public boolean onScale(ScaleGestureDetector detector) {
return false;
}
/**
* Responds to the beginning of a scaling gesture. Reported by
* new pointers going down.
*
* #param detector The detector reporting the event - use this to
* retrieve extended info about event state.
* #return Whether or not the detector should continue recognizing
* this gesture. For example, if a gesture is beginning
* with a focal point outside of a region where it makes
* sense, onScaleBegin() may return false to ignore the
* rest of the gesture.
*/
#Override
public boolean onScaleBegin(ScaleGestureDetector detector) {
return true;
}
/**
* Responds to the end of a scale gesture. Reported by existing
* pointers going up.
* <p/>
* Once a scale has ended, {#link ScaleGestureDetector#getFocusX()}
* and {#link ScaleGestureDetector#getFocusY()} will return focal point
* of the pointers remaining on the screen.
*
* #param detector The detector reporting the event - use this to
* retrieve extended info about event state.
*/
#Override
public void onScaleEnd(ScaleGestureDetector detector) {
mCameraSource.doZoom(detector.getScaleFactor());
}
}
#Override
public boolean onOptionsItemSelected(MenuItem item) {
int id = item.getItemId();
if (id == android.R.id.home) {
onBackPressed();
return true;
}
return super.onOptionsItemSelected(item);
}
}