Android ContentResolver - save photo metadata - android

I'm creating a photo making app using Camera2 API.
I would want to set additional information to the photo (date, location), but I'm getting "Uknown URL" exception.
When i comment out contentResolver, photo is saved, but is lacking any additional information, and I need to have access to the location - I will be filtering gallery to only those which are in close proximity.
internal class ImageSaver(
private val image: Image,
private val file: File,
private val watermark: Bitmap,
private val mContext: Context
) : Runnable {
private val saveImageExecutor: Executor = Executors.newSingleThreadExecutor()
override fun run() {
val jpegByteBuffer = image.planes[0].buffer
val jpegByteArray = ByteArray(jpegByteBuffer.remaining())
jpegByteBuffer.get(jpegByteArray)
val width = image.width
val height = image.height
saveImageExecutor.execute {
val date = System.currentTimeMillis()
val location = getLocation(mContext)
val longitude = location?.longitude ?: 0.0
val latitude = location?.latitude ?: 0.0
// watermark
val options = BitmapFactory.Options()
options.inMutable = true
val original =
BitmapFactory.decodeByteArray(jpegByteArray, 0, jpegByteArray.size, options)
val overlayed = overlay(original, watermark)
val watermarkedByteArrayOS = ByteArrayOutputStream()
overlayed!!.compress(Bitmap.CompressFormat.JPEG, 100, watermarkedByteArrayOS)
val watermarkedByteArray = watermarkedByteArrayOS.toByteArray()
Log.d(TAG, "saving pic meta-data")
val values = ContentValues()
values.put(MediaStore.Images.ImageColumns.TITLE, file.name)
values.put(MediaStore.Images.ImageColumns.DISPLAY_NAME, file.name)
values.put(MediaStore.Images.ImageColumns.DATA, file.path)
values.put(MediaStore.Images.ImageColumns.DATE_TAKEN, date)
values.put(MediaStore.Images.ImageColumns.WIDTH, width)
values.put(MediaStore.Images.ImageColumns.HEIGHT, height)
values.put(MediaStore.Images.ImageColumns.LONGITUDE, longitude)
values.put(MediaStore.Images.ImageColumns.LATITUDE, latitude)
Log.d(TAG, "LON: ${values.get(MediaStore.Images.ImageColumns.LATITUDE)}")
Log.d(TAG, "LAT: ${values.get(MediaStore.Images.ImageColumns.LONGITUDE)}")
var output: FileOutputStream? = null
try {
output = FileOutputStream(file).apply {
write(watermarkedByteArray)
}
} catch (e: IOException) {
Log.e(TAG, e.toString())
} finally {
image.close()
output?.let {
try {
it.close()
} catch (e: IOException) {
Log.e(TAG, e.toString())
}
}
}
mContext.contentResolver.insert(Uri.fromFile(file), values)
}
}
Output:
java.lang.IllegalArgumentException: Unknown URL file:///storage/emulated/0/Android/data/(...)/DCIM/20200610165428492.jpg
at android.content.ContentResolver.insert(ContentResolver.java:1831)
at ...ImageSaver$run$1.run(ImageSaver.kt:86)
What should be the URI? Is there any better way to store location of a photo?

Related

Share Image Intent [The file format is not supported]

I implemented share image to other apps using intent, I followed the recommended approach by saving the image first then getting its LocalBitmapUri. but when i run the app and share image i get the Toast message [The file format is not supported]. and i cant seem to figure out what I did wrong.. thanks.
fun shareItem(url: String?,context: Context,scope: CoroutineScope) {
scope.launch {
withContext(Dispatchers.IO){
val i = Intent(Intent.ACTION_SEND)
i.type = "image/*"
i.putExtra(Intent.EXTRA_STREAM,
getBitmap("d",context,"https://cdn2.thecatapi.com/images/3o8.jpg")?.let {
getLocalBitmapUri(
it,
context
)
})
startActivity(context,Intent.createChooser( i, "Share Image"),null)
}
}
}
fun getLocalBitmapUri(bmp: Bitmap,context: Context): Uri? {
val builder = VmPolicy.Builder()
StrictMode.setVmPolicy(builder.build())
var bmpUri: Uri? = null
try {
val file = File(
context.getExternalFilesDir(Environment.DIRECTORY_PICTURES),
"share_image_jj" + System.currentTimeMillis() + ".png"
)
val out = FileOutputStream(file)
bmp.compress(Bitmap.CompressFormat.PNG, 90, out)
out.close()
bmpUri = Uri.fromFile(file)
} catch (e: IOException) {
e.printStackTrace()
}
return bmpUri
}
private suspend fun getBitmap(tag: String, context: Context, imageUrl: String): Bitmap? {
var bitmap: Bitmap? = null
val imageRequest = ImageRequest.Builder(context)
.data(imageUrl)
.target(
...//,
onSuccess = { result ->
Log.e(tag, "Coil loader success.")
bitmap = result.toBitmap()
}
)
.build()
context.imageLoader.execute(imageRequest)
return bitmap
}

How to upload encrypted image file on google drive from android using Google Drive API

I want to upload the encrypted image file from Android to a user-specific google drive, Also want to Decrypt the image while loading in-app.
Use the following function It will upload encrypted image file on google drive and also downloading the decryption image save it to the app private files folder.
suspend fun uploadFileToGDrive(path: String?, suffix: String?): String {
getDriveService()?.let { googleDriveService ->
try {
if (path != null) {
Log.e("pathGD", path)
}
googleDriveService.fetchOrCreateAppFolder(
context.getString(R.string.application_folder),
preferenceHelper
)
val encryptedData = ImageCrypto().encryptFile("$path")
Log.e("encryptedData", encryptedData)
val actualFile = File(encryptedData)
if (!actualFile.exists()) error("File $actualFile not exists.")
val gFile = com.google.api.services.drive.model.File()
// gFile.name = actualFile.name
val formatter = SimpleDateFormat("yyyyMMddHHmmss")
var dateString = formatter.format(Date())
gFile.name = dateString + suffix
gFile.parents = listOf(preferenceHelper.getFolderId())
val fileContent = FileContent("image/jpeg", actualFile)
val create = googleDriveService.files().create(gFile, fileContent)
.setFields("id, parents")
.execute()
driveFilePathId = create.id
} catch (exception: Exception) {
exception.printStackTrace()
}
} ?: ""
Toast.makeText(context, "Please Log In first!", LENGTH_LONG).show()
return driveFilePathId
}
and I have uploaded encryption and decryption image using AES on my github profile. Please check.
https://github.com/meshramaravind/FileEncryptedAES
And when you display image from google drive. you need to download the image using decryption.
First download the image from google drive function call.
fun downloadFileFromGDrive(id: String) {
getDriveService()?.let { googleDriveService ->
CoroutineScope(Dispatchers.IO).launch {
Log.e("idDownload", id)
val file = File(context.filesDir, "${id}.jpg")
if(!file.exists()) {
try {
val gDriveFile = googleDriveService.Files().get(id).execute()
createDirectoryAndSaveImagePackage(gDriveFile.id)
} catch (e: Exception) {
println("!!! Handle Exception $e")
}
}
}
} ?: ""
//Toast.makeText(context, "Please Log In first!", LENGTH_SHORT).show()
}
and save it to the app private files folder:
fun createDirectoryAndSaveImagePackage(id: String?) {
getDriveService()?.let { googleDriveService ->
CoroutineScope(Dispatchers.IO).launch {
val file = File(context.filesDir, "${id}.jpg")
Log.e("fileEncryptedDirGD", "$file")
try {
val outputStream = FileOutputStream(file)
googleDriveService.files()[id]
.executeMediaAndDownloadTo(outputStream)
if (id != null) {
googleDriveService.readFile(id)
}
val decryptedDataDir = ImageCrypto().decryptFile("$file")
Log.e("decryptedDataDir", decryptedDataDir)
outputStream.flush()
outputStream.close()
} catch (e: Exception) {
e.printStackTrace()
}
}
}
}
and display image jetpack compose.
fun displayImage() {
val fileGD = File(context.filesDir, "${it1}.jpg")
if (fileGD.exists()) {
val decryptedData2 =
ImageCrypto().decryptFile("$fileGD")
val painterBitmap = rememberImagePainter(
data = File(decryptedData2),
builder = {
crossfade(500)
})
val painterState = painterBitmap.state
val painter =
rememberImagePainter(
data = File(decryptedData2),
builder = {
placeholder(R.drawable.placeholder)
error(R.drawable.placeholder)
})
if (painterState is ImagePainter.State.Loading) {
CircularProgressIndicator(
modifier = Modifier.align(Alignment.Center),
color = MaterialTheme.colors.secondary
)
} else {
Image(
painter = painter,
contentScale = ContentScale.Inside,
modifier = Modifier
.width(200.dp)
.height(100.dp)
.padding(PADDING / 2),
contentDescription = null,
)
}
}
}

Image produced is incomplete - Cannot copy to a TensorFlowLite tensor (input_1) with bytes

I am trying to load a tflite model and run it on an image.
My tflite model has the dimensions you see in the image.
Right now, I am receiving:
Cannot copy to a TensorFlowLite tensor (input_1) with 49152 bytes from a Java Buffer with 175584 bytes.
I can't understand how to work with input and output tensor sizes. Right now, I am initializing using the input image size and the output image size will be input * 4.
At which point do I have to "add" the 1 * 64 * 64 * 3 dimensions since I need to manipulate every input image size?
try {
tflitemodel = loadModelFile()
tflite = Interpreter(tflitemodel, options)
} catch (e: IOException) {
Log.e(TAG, "Fail to load model", e)
}
val imageTensorIndex = 0
val imageShape: IntArray =
tflite.getInputTensor(imageTensorIndex).shape()
val imageDataType: DataType = tflite.getInputTensor(imageTensorIndex).dataType()
// Build a TensorImage object
var inputImageBuffer = TensorImage(imageDataType);
// Load the Bitmap
inputImageBuffer.load(bitmap)
// Preprocess image
val imgprocessor = ImageProcessor.Builder()
.add(ResizeOp(inputImageBuffer.height,
inputImageBuffer.width,
ResizeOp.ResizeMethod.NEAREST_NEIGHBOR))
//.add(NormalizeOp(127.5f, 127.5f))
//.add(QuantizeOp(128.0f, 1 / 128.0f))
.build()
// Process the image
val processedImage = imgprocessor.process(inputImageBuffer)
// Access the buffer ( byte[] ) of the processedImage
val imageBuffer = processedImage.buffer
val imageTensorBuffer = processedImage.tensorBuffer
// output result
val outputImageBuffer = TensorBuffer.createFixedSize(
intArrayOf( inputImageBuffer.height * 4 ,
inputImageBuffer.width * 4 ) ,
DataType.FLOAT32 )
// Normalize image
val tensorProcessor = TensorProcessor.Builder()
// Normalize the tensor given the mean and the standard deviation
.add( NormalizeOp( 127.5f, 127.5f ) )
.add( CastOp( DataType.FLOAT32 ) )
.build()
val processedOutputTensor = tensorProcessor.process(outputImageBuffer)
tflite.run(imageTensorBuffer.buffer, processedOutputTensor.buffer)
I tried to cast the output tensor either to FLOAT32 or UINT8.
UPDATE
I also tried this :
try {
tflitemodel = loadModelFile()
tflite = Interpreter(tflitemodel, options)
} catch (e: IOException) {
Log.e(TAG, "Fail to load model", e)
}
val imageTensorIndex = 0
val imageDataType: DataType = tflite.getInputTensor(imageTensorIndex).dataType()
val imgprocessor = ImageProcessor.Builder()
.add(ResizeOp(64,
64,
ResizeOp.ResizeMethod.NEAREST_NEIGHBOR)
)
.add( NormalizeOp( 0.0f, 255.0f ) )
.add( CastOp( DataType.FLOAT32 ) )
.build()
val inpIm = TensorImage(imageDataType)
inpIm.load(bitmap)
val processedImage = imgprocessor.process(inpIm)
val output = TensorBuffer.createFixedSize(
intArrayOf(
124 * 4,
118 * 4,
3,
1
),
DataType.FLOAT32
)
val tensorProcessor = TensorProcessor.Builder()
.add( NormalizeOp( 0.0f, 255.0f ) )
.add( CastOp( DataType.FLOAT32 ) )
.build()
val processedOutputTensor = tensorProcessor.process(output)
tflite.run(processedImage.buffer, processedOutputTensor.buffer)
which produces:
Note, that the current image I am using as input has 124 * 118 * 3 dimensions.
The output image will have (124 * 4) * (118 * 4) * 3 dimensions.
The model needs 64 * 64 * 3 as input layer.
I took a look at your project, your class will be like:
class MainActivity : AppCompatActivity() {
private val TAG = "SuperResolution"
private val MODEL_NAME = "model_edsr.tflite"
private val LR_IMAGE_HEIGHT = 24
private val LR_IMAGE_WIDTH = 24
private val UPSCALE_FACTOR = 4
private val SR_IMAGE_HEIGHT = LR_IMAGE_HEIGHT * UPSCALE_FACTOR
private val SR_IMAGE_WIDTH = LR_IMAGE_WIDTH * UPSCALE_FACTOR
private lateinit var photoButton: Button
private lateinit var srButton: Button
private lateinit var colorizeButton: Button
private var FILE_NAME = "photo.jpg"
private lateinit var filename:String
private var resultImg: Bitmap? = null
private lateinit var gpuSwitch: Switch
private lateinit var tflite: Interpreter
private lateinit var tflitemodel: ByteBuffer
private val INPUT_SIZE: Int = 96
private val PIXEL_SIZE: Int = 3
private val IMAGE_MEAN = 0
private val IMAGE_STD = 255.0f
private var bitmap: Bitmap? = null
private var bitmapResult: Bitmap? = null
/** A ByteBuffer to hold image data, to be feed into Tensorflow Lite as input/output */
private lateinit var imgDataInput: ByteBuffer
private lateinit var imgDataOutput: ByteBuffer
/** Dimensions of inputs. */
private val DIM_BATCH_SIZE = 1
private val DIM_PIXEL_SIZE = 3
private val DIM_IMG_SIZE_X = 64
private val DIM_IMG_SIZE_Y = 64
private lateinit var catBitmap: Bitmap
/* Preallocated buffers for storing image data in. */
private val intValues = IntArray(DIM_IMG_SIZE_X * DIM_IMG_SIZE_Y)
private lateinit var superImage: ImageView
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_main)
superImage = findViewById(R.id.super_resolution_image)
//val assetManager = assets
catBitmap = getBitmapFromAsset("cat.png")
srButton = findViewById(R.id.super_resolution)
srButton.setOnClickListener { view: View ->
val intent = Intent(this, SelectedImage::class.java)
getImageResult.launch(intent)
}
}
private fun getBitmapFromAsset(filePath: String?): Bitmap {
val assetManager = assets
val istr: InputStream
var bitmap: Bitmap? = null
try {
istr = assetManager.open(filePath!!)
bitmap = BitmapFactory.decodeStream(istr)
} catch (e: IOException) {
// handle exception
Log.e("Bitmap_except", e.toString())
}
if (bitmap != null) {
bitmap = Bitmap.createScaledBitmap(bitmap,64,64,true)
}
return bitmap?: Bitmap.createBitmap(10, 10, Bitmap.Config.ARGB_8888)
}
private val getImageResult =
registerForActivityResult(ActivityResultContracts.StartActivityForResult()) { result ->
if (result.resultCode == Activity.RESULT_OK) {
var theImageUri: Uri? = null
theImageUri = result.data?.getParcelableExtra<Uri>("imageuri")
filename = "SR_" + theImageUri?.getOriginalFileName(this).toString()
bitmap = uriToBitmap(theImageUri!!)!!//catBitmap//
Log.v("width", bitmap!!.width.toString())
if (bitmap != null) {
// call DL
val options = Interpreter.Options()
options.setNumThreads(5)
options.setUseNNAPI(true)
try {
tflitemodel = loadModelFile()
tflite = Interpreter(tflitemodel, options)
val index = tflite.getInputIndex("input_1")
tflite.resizeInput(
index,
intArrayOf(1, bitmap!!.width, bitmap!!.height, 3)
)
} catch (e: IOException) {
Log.e(TAG, "Fail to load model", e)
}
val imgprocessor = ImageProcessor.Builder()
.add(
ResizeOp(bitmap!!.width,
bitmap!!.height,
ResizeOp.ResizeMethod.NEAREST_NEIGHBOR)
)
.add( CastOp( DataType.FLOAT32 ) )
.build()
val inpIm = TensorImage(DataType.FLOAT32)
inpIm.load(bitmap)
// Process the image
val processedImage = imgprocessor.process(inpIm)
val output2 = Array(1) { Array(4*bitmap!!.width) { Array(4*bitmap!!.height) { FloatArray(3) } } }
tflite.run(processedImage.buffer, output2)
bitmapResult = convertArrayToBitmap(output2, 4*bitmap!!.height, 4*bitmap!!.width)
Log.v("widthHR", bitmapResult!!.height.toString())
superImage.setImageBitmap(bitmapResult)
}
}
}
#Throws(IOException::class)
private fun loadModelFile(): MappedByteBuffer {
val fileDescriptor = assets.openFd(MODEL_NAME)
val inputStream = FileInputStream(fileDescriptor.fileDescriptor)
val fileChannel = inputStream.channel
val startOffset = fileDescriptor.startOffset
val declaredLength = fileDescriptor.declaredLength
return fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength)
}
private fun uriToBitmap(selectedFileUri: Uri): Bitmap? {
try {
val parcelFileDescriptor = contentResolver.openFileDescriptor(selectedFileUri, "r")
val fileDescriptor: FileDescriptor = parcelFileDescriptor!!.fileDescriptor
val image = BitmapFactory.decodeFileDescriptor(fileDescriptor)
parcelFileDescriptor.close()
return image
} catch (e: IOException) {
e.printStackTrace()
}
return null
}
private fun getOutputImage(output: ByteBuffer): Bitmap? {
output.rewind()
val outputWidth = 124 * 4
val outputHeight = 118 * 4
val bitmap = Bitmap.createBitmap(outputWidth, outputHeight, Bitmap.Config.ARGB_8888)
val pixels = IntArray(outputWidth * outputHeight)
for (i in 0 until outputWidth * outputHeight) {
val a = 0xFF
val r = output.float * 255.0f
val g = output.float * 255.0f
val b = output.float * 255.0f
pixels[i] = a shl 24 or (r.toInt() shl 16) or (g.toInt() shl 8) or b.toInt()
}
bitmap.setPixels(pixels, 0, outputWidth, 0, 0, outputWidth, outputHeight)
return bitmap
}
// save bitmap image to gallery
private fun saveToGallery(context: Context, bitmap: Bitmap, albumName: String) {
//val filename = "${System.currentTimeMillis()}.png"
val write: (OutputStream) -> Boolean = {
bitmap.compress(Bitmap.CompressFormat.PNG, 100, it)
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q) {
val contentValues = ContentValues().apply {
put(MediaStore.MediaColumns.DISPLAY_NAME, filename)
put(MediaStore.MediaColumns.MIME_TYPE, "image/png")
put(MediaStore.MediaColumns.RELATIVE_PATH, "${Environment.DIRECTORY_DCIM}/$albumName")
}
context.contentResolver.let {
it.insert(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, contentValues)?.let { uri ->
it.openOutputStream(uri)?.let(write)
}
}
} else {
val imagesDir = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DCIM).toString() + File.separator + albumName
val file = File(imagesDir)
if (!file.exists()) {
file.mkdir()
}
val image = File(imagesDir, filename)
write(FileOutputStream(image))
}
}
// get the filename from an image uri
private fun Uri.getOriginalFileName(context: Context): String? {
return context.contentResolver.query(this,
null,
null,
null,
null)?.use {
val nameColumnIndex = it.getColumnIndex(OpenableColumns.DISPLAY_NAME)
it.moveToFirst()
it.getString(nameColumnIndex)
}
}
fun convertArrayToBitmap(
imageArray: Array<Array<Array<FloatArray>>>,
imageWidth: Int,
imageHeight: Int
): Bitmap {
val conf = Bitmap.Config.ARGB_8888 // see other conf types
val bitmap = Bitmap.createBitmap(imageWidth, imageHeight, conf)
for (x in imageArray[0].indices) {
for (y in imageArray[0][0].indices) {
// Create bitmap to show on screen after inference
val color = Color.rgb(
(imageArray[0][x][y][0]).toInt(),
(imageArray[0][x][y][1]).toInt(),
(imageArray[0][x][y][2]).toInt()
)
// this y, x is in the correct order!!!
bitmap.setPixel(y, x, color)
}
}
return bitmap
}
}
take a look inside how we resize the inputs of the model inside android, how we create input buffer and output array and how we convert the produced array to a Bitmap. For these procedures check if you can use Gpu of the phone to have x3 speed and of course there are plenty to read at the official documentation.

Kotlin - Strange behavior when taking screenshots of view

I am trying to take a screenshot of the displayed activity.
In this case, the activity contains a webview (wvMainView).
The problem is, the main content of the screen (usually a chart), does not appear in the screenshot. The only time I get the full screenshot is when I have a table inside the webpage.
Here is the code for the screenshot:
var lMainActivityLayout: ConstraintLayout? = findViewById(R.id.lMainActivityLayout)
val bitmap = getScreenShotFromView(lMainActivityLayout!!)
// val bitmap = getScreenShotFromView(wvMainView!!)
if (bitmap != null){ saveMediaToStorage(bitmap) }
private fun getScreenShotFromView(v: View): Bitmap?
{
Log.i("-","MainActivity > getScreenShotFromView")
var screenshot: Bitmap? = null
try
{
screenshot = Bitmap.createBitmap(v.measuredWidth, v.measuredHeight, Bitmap.Config.ARGB_8888)
val canvas = Canvas(screenshot)
v.draw(canvas)
}
catch (e: Exception)
{
Log.e("GFG", "Failed to capture screenshot because:" + e.message)
}
return screenshot
}
private fun saveMediaToStorage(bitmap: Bitmap)
{
Log.i("-","MainActivity > saveMediaToStorage")
val filename = "${System.currentTimeMillis()}.jpg"
var fos: OutputStream? = null
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q)
{
this.contentResolver?.also { resolver ->
val contentValues = ContentValues().apply
{
put(MediaStore.MediaColumns.DISPLAY_NAME, filename)
put(MediaStore.MediaColumns.MIME_TYPE, "image/jpg")
put(MediaStore.MediaColumns.RELATIVE_PATH, Environment.DIRECTORY_PICTURES)
}
val imageUri: Uri? = resolver.insert(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, contentValues)
fos = imageUri?.let { resolver.openOutputStream(it) }
}
}
else
{
val imagesDir = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_PICTURES)
val image = File(imagesDir, filename)
fos = FileOutputStream(image)
}
fos?.use
{
bitmap.compress(Bitmap.CompressFormat.JPEG, 100, it)
Toast.makeText(this , "Image saved to Gallery!" , Toast.LENGTH_SHORT).show()
}
}
As for the screenshot, take a look at the example below. When I run the app and take the screenshot, inside the gray area there is a bar chart that simple won't show up in the screenshot.
I tried taking a screenshot of the main layout as well as of the web view but with the same result.
The iOS version of the app works fine.
Any idea on what causes this strange behavior?
Maybe I should take the screenshot of the entire screen and not of a certain view (is this possible)?
And another small issue - the screenshot does no always appear in the Gallery app although I can find it using the Files app.
I ended up using ScreenShotty for this - https://github.com/bolteu/screenshotty
Add this to build.graddle:
androidTestImplementation 'androidx.test.ext:junit:1.1.2'
Here's the code, maybe it helps someone:
override fun onActivityResult(requestCode: Int, resultCode: Int, data: Intent?)
{
Log.i("","MainActivity > onActivityResult")
super.onActivityResult(requestCode, resultCode, data)
screenshotManager!!.onActivityResult(requestCode, resultCode, data)
}
fun getFullScreenshot()
{
Log.i("","MainActivity > getFullScreenshot")
val screenshotResult = screenshotManager!!.makeScreenshot()
val subscription = screenshotResult.observe(
onSuccess =
{
// Add a delay to prefent lag / crash on Android 5.0/5.1.
// Not sure if this is the correct way but it works for me
Handler(Looper.getMainLooper()).postDelayed({ editScreenshot(it) }, 1000)
},
onError =
{
Log.i("", "Screenshot failed!")
}
)
}
fun editScreenshot(screenshot: Screenshot)
{
Log.i("","MainActivity > editScreenshot")
val width: Int = Resources.getSystem().getDisplayMetrics().widthPixels
val height: Int = Resources.getSystem().getDisplayMetrics().heightPixels
val bitmap = when (screenshot)
{
is ScreenshotBitmap -> screenshot.bitmap
}
// Multiple resolutions cases go here
bitmap?.apply {
cropRectangle(
xOffset = 50,
yOffset = 250,
newWidth = width - 100,
newHeight = height - 450
)?.let { saveMediaToStorage(it) }
}
}
fun saveMediaToStorage(bitmap: Bitmap)
{
Log.i("","MainActivity > saveMediaToStorage")
val screenshotFileName = "${System.currentTimeMillis()}.jpg"
var fos: OutputStream? = null
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q)
{
this.contentResolver?.also { resolver ->
val contentValues = ContentValues().apply {
put(MediaStore.MediaColumns.DISPLAY_NAME, screenshotFileName)
put(MediaStore.MediaColumns.MIME_TYPE, "image/jpg")
put(MediaStore.MediaColumns.RELATIVE_PATH, Environment.DIRECTORY_PICTURES)
}
val imageUri: Uri? = resolver.insert(MediaStore.Images.Media.EXTERNAL_CONTENT_URI, contentValues)
fos = imageUri?.let { resolver.openOutputStream(it) }
}
}
else
{
val imagesDir = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_PICTURES)
val image = File(imagesDir, screenshotFileName)
fos = FileOutputStream(image)
}
fos?.use {
bitmap.compress(Bitmap.CompressFormat.JPEG, 100, it)
Toast.makeText(this , "Captured View and saved to Gallery" , Toast.LENGTH_SHORT).show()
}
}

How to fix 'pink' image capture in android Camera2API?

I am trying to capture the image using 2 different lenses (wide and normal). The preview works fine for both cameras simultaneously using the new multicamera support in Camera2API. I am using Huawei Mate20Pro.
However, when I capture the picture, it only saves pink colored JPEG images. But when the object is close enough, the picture is captured perfectly. Here is what I mean. This is how a pink JPEG looks like:
However, when the object is close enough, the capture is fine. Here is how it looks like:
Here is the main activity code:
button.setOnClickListener {
if (isRunning) {
handler.removeCallbacksAndMessages(null)
restartActivity()
} else {
button.text = "Stop"
handler.postDelayed(object : Runnable {
override fun run() {
twoLens.reset()
twoLens.isTwoLensShot = true
MainActivity.cameraParams.get(dualCamLogicalId).let {
if (it?.isOpen == true) {
Logd("In onClick. Taking Dual Cam Photo on logical camera: $dualCamLogicalId")
takePicture(this#MainActivity, it)
Toast.makeText(applicationContext, "Captured!", Toast.LENGTH_SHORT).show()
}
}
handler.postDelayed(this, 1000)
}
}, 2000)
}
isRunning = !isRunning
}
}
Here is the picture capture code.
fun captureStillPicture(activity: MainActivity, params: CameraParams) {
if (!params.isOpen) {
return
}
try {
Logd("In captureStillPicture.")
val camera = params.captureSession?.getDevice()
if (null != camera) {
params.captureBuilder = camera.createCaptureRequest(CameraDevice.TEMPLATE_STILL_CAPTURE)
params.captureBuilder?.set(CaptureRequest.CONTROL_AF_MODE, CaptureRequest.CONTROL_AF_MODE_AUTO)
if (params.id.equals(dualCamLogicalId) && twoLens.isTwoLensShot) {
val normalParams: CameraParams? = MainActivity.cameraParams.get(normalLensId)
val wideParams: CameraParams? = MainActivity.cameraParams.get(wideAngleId)
if (null == normalParams || null == wideParams)
return
Logd("In captureStillPicture. This is a Dual Cam shot.")
params.captureBuilder?.addTarget(normalParams.imageReader?.surface!!)
params.captureBuilder?.addTarget(wideParams.imageReader?.surface!!)
params.captureBuilder?.set(CaptureRequest.CONTROL_AE_EXPOSURE_COMPENSATION, 4)
params.captureBuilder?.set(CaptureRequest.JPEG_QUALITY, 100)
if (Build.VERSION.SDK_INT >= 28) { params.captureBuilder?.set(CaptureRequest.DISTORTION_CORRECTION_MODE, CameraMetadata.DISTORTION_CORRECTION_MODE_OFF)
//This is REQUIRED to disable HDR+ on Pixel 3 - even though Pixel 3 doesn't have sepia
params.captureBuilder?.set(CaptureRequest.CONTROL_EFFECT_MODE, CameraMetadata.CONTROL_EFFECT_MODE_SEPIA)
} else {
//This is REQUIRED to disable HDR+ on Pixel 3 - even though Pixel 3 doesn't have sepia
params.captureBuilder?.set(CaptureRequest.CONTROL_EFFECT_MODE, CameraMetadata.CONTROL_EFFECT_MODE_SEPIA)
Logd("DUAL CAM DEBUG: I am setting sepia mode.")
// Logd("DUAL CAM DEBUG: I am NOT setting sepia mode.")
}
val rotation = activity.getWindowManager().getDefaultDisplay().getRotation()
var capturedImageRotation = getOrientation(params, rotation)
params.captureBuilder?.set(CaptureRequest.JPEG_ORIENTATION, capturedImageRotation)
try {
params.captureSession?.stopRepeating()
// params.captureSession?.abortCaptures()
} catch (e: CameraAccessException) {
e.printStackTrace()
}
//Do the capture
// TODO: Capture BURST HERE
if (28 <= Build.VERSION.SDK_INT)
params.captureSession?.captureSingleRequest(params.captureBuilder?.build(), params.backgroundExecutor, StillCaptureSessionCallback(activity, params))
else
params.captureSession?.capture(params.captureBuilder?.build(), StillCaptureSessionCallback(activity, params),
params.backgroundHandler)
}
} catch (e: CameraAccessException) {
e.printStackTrace()
} catch (e: IllegalStateException) {
Logd("captureStillPicture IllegalStateException, aborting: " + e)
}
}
This is how I am grabbing the captured pictures.
fun getImagesCaptured(activity: MainActivity, twoLens: TwoLensCoordinator){
Logd("Normal image timestamp: " + twoLens.normalImage?.timestamp)
Logd("Wide image timestamp: " + twoLens.wideImage?.timestamp)
val wideBuffer: ByteBuffer? = twoLens.wideImage!!.planes[0].buffer
val wideBytes = ByteArray(wideBuffer!!.remaining())
wideBuffer.get(wideBytes)
val normalBuffer: ByteBuffer? = twoLens.normalImage!!.planes[0].buffer
val normalBytes = ByteArray(normalBuffer!!.remaining())
normalBuffer.get(normalBytes)
val options = BitmapFactory.Options()
val wideMat: Mat = Mat(twoLens.wideImage!!.height, twoLens.wideImage!!.width, CvType.CV_8UC1)
val tempWideBitmap = BitmapFactory.decodeByteArray(wideBytes, 0, wideBytes.size, options)
val normalMat: Mat = Mat(twoLens.normalImage!!.height, twoLens.normalImage!!.width, CvType.CV_8UC1)
val tempNormalBitmap = BitmapFactory.decodeByteArray(normalBytes, 0, normalBytes.size, options)
save(normalBytes, "NormalShot")
save(wideBytes, "WideShot")
}
The save function is here.
fun save(bytes: Bitmap, tempName: String) {
val timeStamp = SimpleDateFormat("yyyyMMdd_HHmmss").format(Date())
val dataDir = File(Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DOWNLOADS), "TwoCameraImages")
if (!dataDir.exists()) {
dataDir.mkdir()
}
val fileName = tempName + "_IMG_$timeStamp.jpg"
val fileDir = File(dataDir.path + File.separator + fileName)
try {
val fileOutputStream = FileOutputStream(fileDir)
bytes.compress(Bitmap.CompressFormat.JPEG, 100, fileOutputStream)
//fileOutputStream.write(bytes)
fileOutputStream.close()
} catch (e: FileNotFoundException) {
e.printStackTrace()
} catch (e: IOException) {
e.printStackTrace()
}
}
I built on top of code given here: https://github.com/google/basicbokeh and switched to rear cameras, and removed the face calculations. But this pink bitmap is not going away. Any help?

Categories

Resources