How to take picture with camera using ARCore - android

ARCore camera doesn't seem to support takePicture.
https://developers.google.com/ar/reference/java/com/google/ar/core/Camera
Anyone know how I can take pictures with ARCore?

I am assuming you mean a picture of what the camera is seeing and the AR objects. At a high level you need to get permission to write to external storage to save the picture, copy the frame from OpenGL and then save it as a png (for example). Here are the specifics:
Add the WRITE_EXTERNAL_STORAGE permission to the AndroidManifest.xml
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
Then change CameraPermissionHelper to iterate over both the CAMERA and WRITE_EXTERNAL_STORAGE permissions to make sure they are granted
private static final String REQUIRED_PERMISSIONS[] = {
Manifest.permission.WRITE_EXTERNAL_STORAGE,
Manifest.permission.CAMERA
};
/**
* Check to see we have the necessary permissions for this app.
*/
public static boolean hasCameraPermission(Activity activity) {
for (String p : REQUIRED_PERMISSIONS) {
if (ContextCompat.checkSelfPermission(activity, p) !=
PackageManager.PERMISSION_GRANTED) {
return false;
}
}
return true;
}
/**
* Check to see we have the necessary permissions for this app,
* and ask for them if we don't.
*/
public static void requestCameraPermission(Activity activity) {
ActivityCompat.requestPermissions(activity, REQUIRED_PERMISSIONS,
CAMERA_PERMISSION_CODE);
}
/**
* Check to see if we need to show the rationale for this permission.
*/
public static boolean shouldShowRequestPermissionRationale(Activity activity) {
for (String p : REQUIRED_PERMISSIONS) {
if (ActivityCompat.shouldShowRequestPermissionRationale(activity, p)) {
return true;
}
}
return false;
}
Next, add a couple fields to HelloARActivity to keep track of the dimensions of the frame and boolean to indicate when to save the picture.
private int mWidth;
private int mHeight;
private boolean capturePicture = false;
Set the width and height in onSurfaceChanged()
public void onSurfaceChanged(GL10 gl, int width, int height) {
mDisplayRotationHelper.onSurfaceChanged(width, height);
GLES20.glViewport(0, 0, width, height);
mWidth = width;
mHeight = height;
}
At the bottom of onDrawFrame(), add a check for the capture flag. This should be done after all the other drawing happens.
if (capturePicture) {
capturePicture = false;
SavePicture();
}
Then add the onClick method for a button to take the picture, and the actual code to save the image:
public void onSavePicture(View view) {
// Here just a set a flag so we can copy
// the image from the onDrawFrame() method.
// This is required for OpenGL so we are on the rendering thread.
this.capturePicture = true;
}
/**
* Call from the GLThread to save a picture of the current frame.
*/
public void SavePicture() throws IOException {
int pixelData[] = new int[mWidth * mHeight];
// Read the pixels from the current GL frame.
IntBuffer buf = IntBuffer.wrap(pixelData);
buf.position(0);
GLES20.glReadPixels(0, 0, mWidth, mHeight,
GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE, buf);
// Create a file in the Pictures/HelloAR album.
final File out = new File(Environment.getExternalStoragePublicDirectory(
Environment.DIRECTORY_PICTURES) + "/HelloAR", "Img" +
Long.toHexString(System.currentTimeMillis()) + ".png");
// Make sure the directory exists
if (!out.getParentFile().exists()) {
out.getParentFile().mkdirs();
}
// Convert the pixel data from RGBA to what Android wants, ARGB.
int bitmapData[] = new int[pixelData.length];
for (int i = 0; i < mHeight; i++) {
for (int j = 0; j < mWidth; j++) {
int p = pixelData[i * mWidth + j];
int b = (p & 0x00ff0000) >> 16;
int r = (p & 0x000000ff) << 16;
int ga = p & 0xff00ff00;
bitmapData[(mHeight - i - 1) * mWidth + j] = ga | r | b;
}
}
// Create a bitmap.
Bitmap bmp = Bitmap.createBitmap(bitmapData,
mWidth, mHeight, Bitmap.Config.ARGB_8888);
// Write it to disk.
FileOutputStream fos = new FileOutputStream(out);
bmp.compress(Bitmap.CompressFormat.PNG, 100, fos);
fos.flush();
fos.close();
runOnUiThread(new Runnable() {
#Override
public void run() {
showSnackbarMessage("Wrote " + out.getName(), false);
}
});
}
Last step is to add the button to the end of activity_main.xml layout
<Button
android:id="#+id/fboRecord_button"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:layout_alignStart="#+id/surfaceview"
android:layout_alignTop="#+id/surfaceview"
android:onClick="onSavePicture"
android:text="Snap"
tools:ignore="OnClick"/>

Acquiring the image buffer
In the latest ARCore SDK, we get access to the image buffer via public class Frame. Below is the sample code which gives us access to the image buffer.
private void onSceneUpdate(FrameTime frameTime) {
try {
Frame currentFrame = sceneView.getArFrame();
Image currentImage = currentFrame.acquireCameraImage();
int imageFormat = currentImage.getFormat();
if (imageFormat == ImageFormat.YUV_420_888) {
Log.d("ImageFormat", "Image format is YUV_420_888");
}
}
onSceneUpdate() will be called for every update if you register it to setOnUpdateListener() callback. Image will be in YUV_420_888 format, but it will have full Field of view of native high resolution camera.
Also do not forget to close resources of received image by calling currentImage.close(). Otherwise you will receive a ResourceExhaustedException on the next run of onSceneUpdate.
Writing the acquired image buffer to a file
Following implementation converts YUV buffer to compressed JPEG byte array
private static byte[] NV21toJPEG(byte[] nv21, int width, int height) {
ByteArrayOutputStream out = new ByteArrayOutputStream();
YuvImage yuv = new YuvImage(nv21, ImageFormat.NV21, width, height, null);
yuv.compressToJpeg(new Rect(0, 0, width, height), 100, out);
return out.toByteArray();
}
public static void WriteImageInformation(Image image, String path) {
byte[] data = null;
data = NV21toJPEG(YUV_420_888toNV21(image),
image.getWidth(), image.getHeight());
BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(path));
bos.write(data);
bos.flush();
bos.close();
}
private static byte[] YUV_420_888toNV21(Image image) {
byte[] nv21;
ByteBuffer yBuffer = image.getPlanes()[0].getBuffer();
ByteBuffer uBuffer = image.getPlanes()[1].getBuffer();
ByteBuffer vBuffer = image.getPlanes()[2].getBuffer();
int ySize = yBuffer.remaining();
int uSize = uBuffer.remaining();
int vSize = vBuffer.remaining();
nv21 = new byte[ySize + uSize + vSize];
//U and V are swapped
yBuffer.get(nv21, 0, ySize);
vBuffer.get(nv21, ySize, vSize);
uBuffer.get(nv21, ySize + vSize, uSize);
return nv21;
}

Sorry for answering late.You can use code to click picture in ARCore:
private String generateFilename() {
String date =
new SimpleDateFormat("yyyyMMddHHmmss", java.util.Locale.getDefault()).format(new Date());
return Environment.getExternalStoragePublicDirectory(
Environment.DIRECTORY_PICTURES) + File.separator + "Sceneform/" + date + "_screenshot.jpg";
}
private void saveBitmapToDisk(Bitmap bitmap, String filename) throws IOException {
File out = new File(filename);
if (!out.getParentFile().exists()) {
out.getParentFile().mkdirs();
}
try (FileOutputStream outputStream = new FileOutputStream(filename);
ByteArrayOutputStream outputData = new ByteArrayOutputStream()) {
bitmap.compress(Bitmap.CompressFormat.PNG, 100, outputData);
outputData.writeTo(outputStream);
outputStream.flush();
outputStream.close();
} catch (IOException ex) {
throw new IOException("Failed to save bitmap to disk", ex);
}
}
private void takePhoto() {
final String filename = generateFilename();
/*ArSceneView view = fragment.getArSceneView();*/
mSurfaceView = findViewById(R.id.surfaceview);
// Create a bitmap the size of the scene view.
final Bitmap bitmap = Bitmap.createBitmap(mSurfaceView.getWidth(), mSurfaceView.getHeight(),
Bitmap.Config.ARGB_8888);
// Create a handler thread to offload the processing of the image.
final HandlerThread handlerThread = new HandlerThread("PixelCopier");
handlerThread.start();
// Make the request to copy.
PixelCopy.request(mSurfaceView, bitmap, (copyResult) -> {
if (copyResult == PixelCopy.SUCCESS) {
try {
saveBitmapToDisk(bitmap, filename);
} catch (IOException e) {
Toast toast = Toast.makeText(DrawAR.this, e.toString(),
Toast.LENGTH_LONG);
toast.show();
return;
}
Snackbar snackbar = Snackbar.make(findViewById(android.R.id.content),
"Photo saved", Snackbar.LENGTH_LONG);
snackbar.setAction("Open in Photos", v -> {
File photoFile = new File(filename);
Uri photoURI = FileProvider.getUriForFile(DrawAR.this,
DrawAR.this.getPackageName() + ".ar.codelab.name.provider",
photoFile);
Intent intent = new Intent(Intent.ACTION_VIEW, photoURI);
intent.setDataAndType(photoURI, "image/*");
intent.addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION);
startActivity(intent);
});
snackbar.show();
} else {
Log.d("DrawAR", "Failed to copyPixels: " + copyResult);
Toast toast = Toast.makeText(DrawAR.this,
"Failed to copyPixels: " + copyResult, Toast.LENGTH_LONG);
toast.show();
}
handlerThread.quitSafely();
}, new Handler(handlerThread.getLooper()));
}

Related

Bitmap Image Saved to Internal Storage is Corrupt

I want to design an app that generates a QR code and gives the user the possibility to save the generated image to their internal storage only. I successfully generate the bitmap and save it as .PNG image, but when I try to open it from the gallery it appears broken or corrupt.
Below is the code to generate the bitmap and display it on an ImageView(qrCode):
bitmap = encodeAsBitmap(value);
qrCode.setImageBitmap(bitmap);
Bitmap encodeAsBitmap(String str) throws WriterException {
BitMatrix result;
try {
result = new MultiFormatWriter().encode(str,
BarcodeFormat.QR_CODE, WIDTH, WIDTH, null);
} catch (IllegalArgumentException iae) {
// Unsupported format
return null;
}
int w = result.getWidth();
int h = result.getHeight();
int[] pixels = new int[w * h];
for (int y = 0; y < h; y++) {
int offset = y * w;
for (int x = 0; x < w; x++) {
pixels[offset + x] = result.get(x, y) ? getResources().getColor(R.color.colorBlack) :
getResources().getColor(R.color.colorWhite);
}
}
Bitmap bitmap = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);
bitmap.setPixels(pixels, 0, 500, 0, 0, w, h);
return bitmap;
}
It works perfectly up to this level. The user can then click a button in order to save this image to their device's internal storage, thanks to the below method:
public void onClickSaveCode(View view) {
String title = getResources().getString(R.string.saved_image_title_prepend) + stringDate;
String format = getResources().getString(R.string.saved_image_format);
String directory = getResources().getString(R.string.saved_image_directory);
// Method call to save image
saveImageToInternalStorage(bitmap, directory, title, format);
}
public boolean saveImageToInternalStorage(Bitmap bitmap, String directory, String title, String format) {
ContextWrapper contextWrapper = new ContextWrapper(getApplicationContext());
File imageDirectory = contextWrapper.getDir(directory, Context.MODE_WORLD_READABLE);
File path = new File(imageDirectory, title + format);
try {
FileOutputStream fos = new FileOutputStream(path);
// Use the compress method on the Bitmap object to write image to the OutputStream
bitmap.compress(Bitmap.CompressFormat.PNG, QUALITY, fos);
fos.close();
new SingleMediaScanner(this, path);
Toast.makeText(this, getString(R.string.save_success), Toast.LENGTH_LONG).show();
return true;
} catch (Exception e) {
e.printStackTrace();
Toast.makeText(this, getString(R.string.save_failure), Toast.LENGTH_LONG).show();
return false;
}
}
And finally below is the MediaScannerConnection class to scan for all images saved to the device and display them in the gallery:
public class SingleMediaScanner implements MediaScannerConnectionClient {
private MediaScannerConnection mSC;
private File file;
public SingleMediaScanner(Context context, File f) {
file = f;
mSC = new MediaScannerConnection(context, this);
mSC.connect();
}
#Override
public void onMediaScannerConnected() {
mSC.scanFile(file.getAbsolutePath(), null);
}
#Override
public void onScanCompleted(String path, Uri uri) {
mSC.disconnect();
}
}
The images are saved, yet they appear in the gallery as broken files.
Any help will be greatly appreciated.
string path = Android.OS.Environment.ExternalStorageDirectory.AbsolutePath;
string filePath = System.IO.Path.Combine(path, "compressed.png");
//Bitmap bmp = ((BitmapDrawable)imgV.Drawable).Bitmap;
Bitmap b = newBitmap;
FileStream ms = new FileStream(filePath, FileMode.Create);
//FileOutputStream fos = new FileOutputStream(filePath,true);
await b.CompressAsync(Bitmap.CompressFormat.Png, 100, ms);
ms.Close();
//ByteArrayOutputStream opstream = new ByteArrayOutputStream();
//b.Compress(Bitmap.CompressFormat.Png, 100, opstream);
//byte[] bytArray = opstream.ToByteArray();
Toast.MakeText(Application.Context, "Compressed : " , ToastLength.Short).Show();
imgCompress.SetImageBitmap(b);

Accessing Full Frame Using ZXing on Android

We are developing an Android application and using ZXing for scanning QR codes. Is there a way to get full frame of decoded QR code using ZXing ?
Following method is added to PlanarYUVLuminanceSource class.
public void saveFrame() {
try {
YuvImage image = new YuvImage(yuvData, ImageFormat.NV21, getWidth(), getHeight(), null);
File file = new File(Environment.getExternalStorageDirectory().getPath() + "/frame.jpeg");
FileOutputStream fos = new FileOutputStream(file);
image.compressToJpeg(new Rect(0, 0, image.getWidth(), image.getHeight()), 100, fos);
} catch (FileNotFoundException e) {
Log.e(TAG, "FileNotFoundException!");
}
}
Called within DecodeHandler class as below,
private void decode(byte[] data, int width, int height) {
long start = System.currentTimeMillis();
Result rawResult = null;
PlanarYUVLuminanceSource source = CameraManager.get()
.buildLuminanceSource(data, width, height);
BinaryBitmap bitmap = new BinaryBitmap(new HybridBinarizer(source));
try {
rawResult = multiFormatReader.decodeWithState(bitmap);
} catch (ReaderException re) {
// continue
} finally {
multiFormatReader.reset();
}
if (rawResult != null) {
// Don't log the barcode contents for security.
long end = System.currentTimeMillis();
Log.d(TAG, "Found barcode in " + (end - start) + " ms");
Message message = Message.obtain(activity.getHandler(),
R.id.zxinglib_decode_succeeded, rawResult);
Bundle bundle = new Bundle();
bundle.putParcelable(DecodeThread.BARCODE_BITMAP,
source.renderCroppedGreyscaleBitmap());
source.saveFrame();
message.setData(bundle);
message.sendToTarget();
} else {
Message message = Message.obtain(activity.getHandler(),
R.id.zxinglib_decode_failed);
message.sendToTarget();
}
}
Here is result image,
Finally I found a solution. It is based on renderCroppedGreyscaleBitmap() method of PlanarYUVLuminanceSource class. Here is how I changed decode() method.
private void decode(byte[] data, int width, int height) {
long start = System.currentTimeMillis();
Result rawResult = null;
PlanarYUVLuminanceSource source = CameraManager.get().buildLuminanceSource(data, width, height);
BinaryBitmap bitmap = new BinaryBitmap(new HybridBinarizer(source));
try {
rawResult = multiFormatReader.decodeWithState(bitmap);
} catch (ReaderException re) {
// continue
} finally {
multiFormatReader.reset();
}
if (rawResult != null) {
// Don't log the barcode contents for security.
long end = System.currentTimeMillis();
Log.d(TAG, "Found barcode in " + (end - start) + " ms");
// Grab & save frame
Bitmap wholeBmp = renderGrayScaleBitmap(data, width, height);
if(wholeBmp != null)
saveBitmap(wholeBmp, "frame.png");
else
Log.e(TAG, "Bitmap of frame is empty!");
Message message = Message.obtain(activity.getHandler(), R.id.zxinglib_decode_succeeded, rawResult);
Bundle bundle = new Bundle();
bundle.putParcelable(DecodeThread.BARCODE_BITMAP, source.renderCroppedGreyscaleBitmap());
message.setData(bundle);
message.sendToTarget();
} else {
Message message = Message.obtain(activity.getHandler(), R.id.zxinglib_decode_failed);
message.sendToTarget();
}
}
Create bitmap from decode data
private Bitmap renderGrayScaleBitmap(byte[] data, int width, int height) {
int[] pixels = new int[width * height];
int inputOffset = width;
for (int y = 0; y < height; y++) {
int outputOffset = y * width;
for (int x = 0; x < width; x++) {
int grey = data[inputOffset + x] & 0xff;
pixels[outputOffset + x] = 0xFF000000 | (grey * 0x00010101);
}
inputOffset += width;
}
Bitmap bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
bitmap.setPixels(pixels, 0, width, 0, 0, width, height);
return bitmap;
}
Save bitmap to sdcard
private void saveBitmap(Bitmap bmp, String name) {
FileOutputStream out = null;
try {
String filename = Environment.getExternalStorageDirectory().toString() + "/" + name;
Log.i(TAG, "writtenPath=" + filename);
out = new FileOutputStream(filename);
bmp.compress(Bitmap.CompressFormat.PNG, 100, out);
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
if (out != null) {
out.close();
}
} catch (IOException e) {
e.printStackTrace();
}
}
}

Android: mPrevCallback to JPG, results in black pictures

I am trying to capture the preview of the camera on a surfaceview ,to save it as a JPEG in the internal memory. I found some code here on this site, that does mostly I want but saves the image to the SD Card. I changed that, and came up with the following code.
Camera.PreviewCallback mPrevCallback = new Camera.PreviewCallback()
{
#Override
public void onPreviewFrame( byte[] data, Camera Cam ) {
//Log.d(TAG, "FRAME");
Camera.Parameters parameters = Cam.getParameters();
int format = parameters.getPreviewFormat();
//Log.d(TAG, "FORMAT:" + format);
//YUV formats require more conversion
if (format == ImageFormat.NV21 || format == ImageFormat.YUY2 || format == ImageFormat.NV16) {
int w = parameters.getPreviewSize().width;
int h = parameters.getPreviewSize().height;
// Get the YuV image
YuvImage yuv_image = new YuvImage(data, format, w, h, null);
// Convert YuV to Jpeg
Rect rect = new Rect(0, 0, w, h);
ByteArrayOutputStream output_stream = new ByteArrayOutputStream();
yuv_image.compressToJpeg(rect, 100, output_stream);
byte[] byt = output_stream.toByteArray();
FileOutputStream outStream = null;
try {
outStream = new FileOutputStream("/data/data/com.example.max.camtest/files/test"+System.currentTimeMillis()+".jpg");
outStream.write(byt);
outStream.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
}
}
}
};
The preview is shown on the surfaceview and the mPrevCallback is triggered.It successfully saves pictures that have diffrent sizes (250~500Kb) but they are all black. When I try to capture a picture with the camera.takePicture function is it also black.
What Am I doing wrong? How can I debug this?
Thanks!
Use this intent to take picture
Intent intent = new Intent(MediaStore.ACTION_IMAGE_CAPTURE);
File f = new File(android.os.Environment.getExternalStorageDirectory(), AppInfo.getInstance().getCurrentLoginUserInfo().getId()+".jpg");
intent.putExtra(MediaStore.EXTRA_OUTPUT, Uri.fromFile(f));
intent.putExtra("return-data", true);
startActivityForResult(intent, 1);
and on Your Activity Result.... Note Bitmap bitmap = getScaledBitmap(uri.getPath(), 200, true); 200 is your max image size.
if(requestCode == 1)
{
String base = Environment.getExternalStorageDirectory().getAbsolutePath().toString();
final String imgPath = base + "/" +AppInfo.getInstance().getCurrentLoginUserInfo().getId()+".jpg";
File file = new File(imgPath);
if (file.exists())
{
Uri uri = Uri.fromFile(file);
Log.d(TAG, "Image Uri path: " + uri.getPath());
Bitmap bitmap = getScaledBitmap(uri.getPath(), 200, true);
}}
This method ll return image bitmap after resizing it-
private Bitmap getScaledBitmap(String imagePath, float maxImageSize, boolean filter) {
FileInputStream in;
BufferedInputStream buf;
try {
in = new FileInputStream(imagePath);
buf = new BufferedInputStream(in);
Bitmap realImage = BitmapFactory.decodeStream(buf);
float ratio = Math.min(
(float) maxImageSize / realImage.getWidth(),
(float) maxImageSize / realImage.getHeight());
int width = Math.round((float) ratio * realImage.getWidth());
int height = Math.round((float) ratio * realImage.getHeight());
Bitmap newBitmap = Bitmap.createScaledBitmap(realImage, width, height, filter);
return newBitmap;
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return null;
}
Now you have scaled bitmap image.
Hope this ll help you.

How to get Alpha of a Bitmap?

I have two problems.
First I am changing the alpha of a Bitmap and saving it to an ImageView but whenever I am getting the Bitmap from the ImageView it is different to how it looks in the ImageView, the RGB values are Changed.
Second, I am wondering how to get alpha of a bitmap.
imageview=(ImageView)findViewById(R.id.image);
public Bitmap ColorDodgeBlend(Bitmap source, Bitmap layer,int alpha) {
Bitmap base = source.copy(Config.ARGB_8888, true);
Bitmap blend = layer.copy(Config.ARGB_8888, false);
IntBuffer buffBase = IntBuffer.allocate(base.getWidth() * base.getHeight());
base.copyPixelsToBuffer(buffBase);
buffBase.rewind();
IntBuffer buffBlend = IntBuffer.allocate(blend.getWidth() * blend.getHeight());
blend.copyPixelsToBuffer(buffBlend);
buffBlend.rewind();
IntBuffer buffOut = IntBuffer.allocate(base.getWidth() * base.getHeight());
buffOut.rewind();
while (buffOut.position() < buffOut.limit()) {
int filterInt = buffBlend.get();
int srcInt = buffBase.get();
int redValueFilter = Color.red(filterInt);
int greenValueFilter = Color.green(filterInt);
int blueValueFilter = Color.blue(filterInt);
int redValueSrc = Color.red(srcInt);
int greenValueSrc = Color.green(srcInt);
int blueValueSrc = Color.blue(srcInt);
int redValueFinal = colordodge(redValueFilter, redValueSrc);
int greenValueFinal = colordodge(greenValueFilter, greenValueSrc);
int blueValueFinal = colordodge(blueValueFilter, blueValueSrc);
int pixel = Color.argb(alpha, redValueFinal, greenValueFinal, blueValueFinal);
buffOut.put(pixel);
}
buffOut.rewind();
base.copyPixelsFromBuffer(buffOut);
blend.recycle();
return base;
};
bmp=ColorDodgeBlend(Bitmap source, Bitmap layer,alpha); imageview.setImageBitmap(bmp);
But when I try to save bitmap from ImageView, the rgb of saved bitmap is different to how it appears in the ImageView, changing alpha changes the value of rgb.
public Bitmap loadBitmapFromView(View v) {
Bitmap b = Bitmap.createBitmap(v.getWidth(), v.getHeight(),
Bitmap.Config.ARGB_8888);
Canvas c = new Canvas(b);
v.draw(c);
return b
}
bitmap b=loadBitmapFromView(imageview);
saveBitmap(b);
private void saveBitmap(Bitmap bmp) {
try {
File f = new File(Environment.getExternalStorageDirectory()
+ "/Pictures/SketchPhoto/");
f.mkdirs();
Date d = new Date();
CharSequence s = DateFormat
.format("MM-dd-yy hh-mm-ss", d.getTime());
fileName = s.toString() + ".jpeg";
String fullf = f + "/" + fileName;
FileOutputStream fos = new FileOutputStream(fullf);
bmp.compress(Bitmap.CompressFormat.JPEG, 100, fos);
fos.close();
Toast.makeText(getApplicationContext(), "Sketch Saved", 100).show();
} catch (Exception ex) {
ex.printStackTrace();
}
}
I have done some research and found that it only happens when that value of alpha is smaller than 255.

download Bitmap and write into existing one

I'm downloading a bitmap from an URL with the following code. If I do this cyclic (like streaming images from a camera) then the bitmap will be reallocated again and again. So I wonder if there is a way to write the newly downloaded byte-array into the existing bitmap which is already allocated in memory.
public static Bitmap downloadBitmap(String url) {
try {
URL newUrl = new URL(url);
return BitmapFactory.decodeStream(newUrl.openConnection()
.getInputStream());
} catch (MalformedURLException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
Within this segment in the bitmap memory management section entitled 'Manage Memory on Android 3.0 and Higher' they start to speak of how to manipulate the bitmaps so that you can reuse the bitmap space so that the location for the Bitmap itself does not need to be re-allocated. If you are indeed looking at using the stream from the camera then this will cover back to Honeycomb since they will be the same sizes. Otherwise, it may only help out past 4.4 Kitkat.
But, you could store a local WeakReference (if you want it to be collected in case of memory issues) within the downloadBitmap class and then re-assign to that space and return there instead of creating a bitmap each time in a single line.
The app is slowed down because it allocates and de-allocates memory in each cycle. There are three ways to avoid that.
The first version works without OpenCV but still allocates some memory in each cycle. But the amount is much smaller and therefore it is at least two times faster. How? By re-using an existing and allready allocated buffer (byte[]). I'm using it with a pre-allocated SteamInfo buffer of 1.000.000 length (about double the size than I'm expecting).
By the way - reading the input stream in chunks and using BitmapFactory.decodeByteArray is much faster than putting the URL's input stream directly into BitmapFactory.decodeStream.
public static class StreamInfo {
public byte[] buffer;
public int length;
public StreamInfo(int length) {
buffer = new byte[length];
}
}
public static StreamInfo imageByte(StreamInfo buffer, String url) {
try {
URL newUrl = new URL(url);
InputStream is = (InputStream) newUrl.getContent();
byte[] tempBuffer = new byte[8192];
int bytesRead;
int position = 0;
if (buffer != null) {
// re-using existing buffer
while ((bytesRead = is.read(tempBuffer)) != -1) {
System.arraycopy(tempBuffer, 0, buffer.buffer, position,
bytesRead);
position += bytesRead;
}
buffer.length = position;
return buffer;
} else {
// allocating new buffer
ByteArrayOutputStream output = new ByteArrayOutputStream();
while ((bytesRead = is.read(tempBuffer)) != -1) {
output.write(tempBuffer, 0, bytesRead);
position += bytesRead;
}
byte[] result = output.toByteArray();
buffer = new StreamInfo(result.length * 2, false);
buffer.length = position;
System.arraycopy(result, 0, buffer.buffer, 0, result.length);
return buffer;
}
} catch (MalformedURLException e) {
e.printStackTrace();
return null;
} catch (IOException e) {
e.printStackTrace();
return null;
}
}
The second version uses OpenCV Mat and a pre-allocated Bitmap. Receiving the stream is done as in version one. So it does not need further memory allocation anymore (for details check out this link). This version works fine but it is a bit slower because it contains conversions between OpenCV Mat and Bitmap.
private NetworkCameraFrame frame;
private HttpUtils.StreamInfo buffer = new HttpUtils.StreamInfo(1000000);
private MatOfByte matForConversion;
private NetworkCameraFrame receive() {
buffer = HttpUtils.imageByte(buffer, uri);
if (buffer == null || buffer.length == 0)
return null;
Log.d(TAG, "Received image with byte-array of length: "
+ buffer.length / 1024 + "kb");
if (frame == null) {
final BitmapFactory.Options options = new BitmapFactory.Options();
options.inJustDecodeBounds = true;
Bitmap bmp = BitmapFactory.decodeByteArray(buffer.buffer, 0,
buffer.length);
frame = new NetworkCameraFrame(bmp.getWidth(), bmp.getHeight());
Log.d(TAG, "NetworkCameraFrame created");
bmp.recycle();
}
if (matForConversion == null)
matForConversion = new MatOfByte(buffer.buffer);
else
matForConversion.fromArray(buffer.buffer);
Mat newImage = Highgui.imdecode(matForConversion,
Highgui.IMREAD_UNCHANGED);
frame.put(newImage);
return frame;
}
private class NetworkCameraFrame implements CameraFrame {
Mat mat;
private int mWidth;
private int mHeight;
private Bitmap mCachedBitmap;
private boolean mBitmapConverted;
public NetworkCameraFrame(int width, int height) {
this.mWidth = width;
this.mHeight = height;
this.mat = new Mat(new Size(width, height), CvType.CV_8U);
this.mCachedBitmap = Bitmap.createBitmap(width, height,
Bitmap.Config.ARGB_8888);
}
#Override
public Mat gray() {
return mat.submat(0, mHeight, 0, mWidth);
}
#Override
public Mat rgba() {
return mat;
}
// #Override
// public Mat yuv() {
// return mYuvFrameData;
// }
#Override
public synchronized Bitmap toBitmap() {
if (mBitmapConverted)
return mCachedBitmap;
Mat rgba = this.rgba();
Utils.matToBitmap(rgba, mCachedBitmap);
mBitmapConverted = true;
return mCachedBitmap;
}
public synchronized void put(Mat frame) {
mat = frame;
invalidate();
}
public void release() {
mat.release();
mCachedBitmap.recycle();
}
public void invalidate() {
mBitmapConverted = false;
}
};
The third version uses the instructions "Usage of BitmapFactory" on BitmapFactory.Options and a mutable Bitmap that is then re-used while decoding. It even work ed for me on Android JellyBean. Make sure you're using the correct BitmapFactory.Options when created the very first Bitmap.
BitmapFactory.Options options = new BitmapFactory.Options();
options.inBitmap = bmp; // the old Bitmap that should be reused
options.inMutable = true;
options.inSampleSize = 1;
Bitmap bmp = BitmapFactory.decodeByteArray(buffer, 0, buffer.length, options);
options.inBitmap = bmp;
This was actually the fastest streaming then.

Categories

Resources