I'm currently using the cameraX sample code to record a video with audio . I didn't change anything from the samples but the camera simply won't launch. I have a swipe button in my fragment that is supposed to take me to the videoCaptureFragment but all I get are some logs and the fragments don't change. No errors or warnings , the app doesn't crash but it doesn't load the camera view.
here's my videoCaptureFragment:
class VideoCaptureFragment : Fragment() {
// UI with ViewBinding
private var _captureViewBinding: VideoCaptureFragmentBinding? = null
private val captureViewBinding get() = _captureViewBinding!!
private val captureLiveStatus = MutableLiveData<String>()
private val cameraCapabilities = mutableListOf<CameraCapability>()
private lateinit var videoCapture: VideoCapture<Recorder>
private var currentRecording: Recording? = null
private lateinit var recordingState:VideoRecordEvent
// Camera UI states and inputs
enum class UiState {
IDLE, // Not recording, all UI controls are active.
RECORDING, // Camera is recording, only display Pause/Resume & Stop button.
FINALIZED, // Recording just completes, disable all RECORDING UI controls.
RECOVERY // For future use.
}
private var cameraIndex = 0
private var qualityIndex = DEFAULT_QUALITY_IDX
private var audioEnabled = false
private val mainThreadExecutor by lazy { ContextCompat.getMainExecutor(requireContext()) }
private var enumerationDeferred:Deferred<Unit>? = null
// main cameraX capture functions
/**
* Always bind preview + video capture use case combinations in this sample
* (VideoCapture can work on its own). The function should always execute on
* the main thread.
*/
private suspend fun bindCaptureUsecase() {
val cameraProvider = ProcessCameraProvider.getInstance(requireContext()).await()
val cameraSelector = getCameraSelector(cameraIndex)
// create the user required QualitySelector (video resolution): we know this is
// supported, a valid qualitySelector will be created.
val quality = cameraCapabilities[cameraIndex].qualities[qualityIndex]
val qualitySelector = QualitySelector.from(quality)
captureViewBinding.previewView.updateLayoutParams<ConstraintLayout.LayoutParams> {
val orientation = this#RecordingInterviewFragment.resources.configuration.orientation
dimensionRatio = quality.getAspectRatioString(quality,
(orientation == Configuration.ORIENTATION_PORTRAIT))
}
val preview = Preview.Builder()
.setTargetAspectRatio(quality.getAspectRatio(quality))
.build().apply {
setSurfaceProvider(captureViewBinding.previewView.surfaceProvider)
}
// build a recorder, which can:
// - record video/audio to MediaStore(only shown here), File, ParcelFileDescriptor
// - be used create recording(s) (the recording performs recording)
val recorder = Recorder.Builder()
.setQualitySelector(qualitySelector)
.build()
videoCapture = VideoCapture.withOutput(recorder)
try {
cameraProvider.unbindAll()
cameraProvider.bindToLifecycle(
viewLifecycleOwner,
cameraSelector,
videoCapture,
preview
)
} catch (exc: Exception) {
// we are on main thread, let's reset the controls on the UI.
Log.e(TAG, "Use case binding failed", exc)
resetUIandState("bindToLifecycle failed: $exc")
}
enableUI(true)
}
/**
* Kick start the video recording
* - config Recorder to capture to MediaStoreOutput
* - register RecordEvent Listener
* - apply audio request from user
* - start recording!
* After this function, user could start/pause/resume/stop recording and application listens
* to VideoRecordEvent for the current recording status.
*/
#SuppressLint("MissingPermission")
private fun startRecording() {
// create MediaStoreOutputOptions for our recorder: resulting our recording!
val name = "CameraX-recording-" +
SimpleDateFormat(FILENAME_FORMAT, Locale.US)
.format(System.currentTimeMillis()) + ".mp4"
val contentValues = ContentValues().apply {
put(MediaStore.Video.Media.DISPLAY_NAME, name)
}
val mediaStoreOutput = MediaStoreOutputOptions.Builder(
requireActivity().contentResolver,
MediaStore.Video.Media.EXTERNAL_CONTENT_URI)
.setContentValues(contentValues)
.build()
// configure Recorder and Start recording to the mediaStoreOutput.
currentRecording = videoCapture.output
.prepareRecording(requireActivity(), mediaStoreOutput)
.apply { if (audioEnabled) withAudioEnabled() }
.start(mainThreadExecutor, captureListener)
Log.i(TAG, "Recording started")
}
/**
* CaptureEvent listener.
*/
private val captureListener = Consumer<VideoRecordEvent> { event ->
// cache the recording state
if (event !is VideoRecordEvent.Status)
recordingState = event
updateUI(event)
if (event is VideoRecordEvent.Finalize) {
// display the captured video
lifecycleScope.launch {
/*navController.navigate(
CaptureFragmentDirections.actionCaptureToVideoViewer(
event.outputResults.outputUri
)
)*/
}
}
}
/**
* Retrieve the asked camera's type(lens facing type). In this sample, only 2 types:
* idx is even number: CameraSelector.LENS_FACING_BACK
* odd number: CameraSelector.LENS_FACING_FRONT
*/
private fun getCameraSelector(idx: Int) : CameraSelector {
if (cameraCapabilities.size == 0) {
Log.i(TAG, "Error: This device does not have any camera, bailing out")
requireActivity().finish()
}
return (cameraCapabilities[idx % cameraCapabilities.size].camSelector)
}
data class CameraCapability(val camSelector: CameraSelector, val qualities:List<Quality>)
/**
* Query and cache this platform's camera capabilities, run only once.
*/
init {
enumerationDeferred = lifecycleScope.async {
whenCreated {
val provider = ProcessCameraProvider.getInstance(requireContext()).await()
provider.unbindAll()
for (camSelector in arrayOf(
CameraSelector.DEFAULT_BACK_CAMERA,
CameraSelector.DEFAULT_FRONT_CAMERA
)) {
try {
// just get the camera.cameraInfo to query capabilities
// we are not binding anything here.
if (provider.hasCamera(camSelector)) {
val camera = provider.bindToLifecycle(requireActivity(), camSelector)
QualitySelector
.getSupportedQualities(camera.cameraInfo)
.filter { quality ->
listOf(Quality.UHD, Quality.FHD, Quality.HD, Quality.SD)
.contains(quality)
}.also {
cameraCapabilities.add(CameraCapability(camSelector, it))
}
}
} catch (exc: java.lang.Exception) {
Log.e(TAG, "Camera Face $camSelector is not supported")
}
}
}
}
}
/**
* One time initialize for CameraFragment (as a part of fragment layout's creation process).
* This function performs the following:
* - initialize but disable all UI controls except the Quality selection.
* - set up the Quality selection recycler view.
* - bind use cases to a lifecycle camera, enable UI controls.
*/
private fun initCameraFragment() {
initializeUI()
Log.i("Camerx","insisde Init Camera")
viewLifecycleOwner.lifecycleScope.launch {
if (enumerationDeferred != null) {
enumerationDeferred!!.await()
enumerationDeferred = null
}
initializeQualitySectionsUI()
bindCaptureUsecase()
}
}
/**
* Initialize UI. Preview and Capture actions are configured in this function.
* Note that preview and capture are both initialized either by UI or CameraX callbacks
* (except the very 1st time upon entering to this fragment in onCreateView()
*/
#SuppressLint("ClickableViewAccessibility", "MissingPermission")
private fun initializeUI() {
captureViewBinding.cameraButton.apply {
setOnClickListener {
cameraIndex = (cameraIndex + 1) % cameraCapabilities.size
// camera device change is in effect instantly:
// - reset quality selection
// - restart preview
qualityIndex = DEFAULT_QUALITY_IDX
initializeQualitySectionsUI()
enableUI(false)
viewLifecycleOwner.lifecycleScope.launch {
bindCaptureUsecase()
}
}
isEnabled = false
}
// audioEnabled by default is disabled.
captureViewBinding.audioSelection.isChecked = audioEnabled
captureViewBinding.audioSelection.setOnClickListener {
audioEnabled = captureViewBinding.audioSelection.isChecked
}
Log.i("Camerx","inside init UI")
// React to user touching the capture button
captureViewBinding.captureButton.apply {
setOnClickListener {
if (!this#RecordingInterviewFragment::recordingState.isInitialized ||
recordingState is VideoRecordEvent.Finalize)
{
enableUI(false) // Our eventListener will turn on the Recording UI.
startRecording()
} else {
when (recordingState) {
is VideoRecordEvent.Start -> {
currentRecording?.pause()
captureViewBinding.stopButton.visibility = View.VISIBLE
}
is VideoRecordEvent.Pause -> currentRecording?.resume()
is VideoRecordEvent.Resume -> currentRecording?.pause()
else -> throw IllegalStateException("recordingState in unknown state")
}
}
}
isEnabled = false
}
captureViewBinding.stopButton.apply {
setOnClickListener {
// stopping: hide it after getting a click before we go to viewing fragment
captureViewBinding.stopButton.visibility = View.INVISIBLE
if (currentRecording == null || recordingState is VideoRecordEvent.Finalize) {
return#setOnClickListener
}
val recording = currentRecording
if (recording != null) {
recording.stop()
currentRecording = null
}
captureViewBinding.captureButton.setImageResource(R.drawable.ic_start)
}
// ensure the stop button is initialized disabled & invisible
visibility = View.INVISIBLE
isEnabled = false
}
captureLiveStatus.observe(viewLifecycleOwner) {
captureViewBinding.captureStatus.apply {
post { text = it }
}
}
captureLiveStatus.value = getString(R.string.Idle)
}
/**
* UpdateUI according to CameraX VideoRecordEvent type:
* - user starts capture.
* - this app disables all UI selections.
* - this app enables capture run-time UI (pause/resume/stop).
* - user controls recording with run-time UI, eventually tap "stop" to end.
* - this app informs CameraX recording to stop with recording.stop() (or recording.close()).
* - CameraX notify this app that the recording is indeed stopped, with the Finalize event.
* - this app starts VideoViewer fragment to view the captured result.
*/
private fun updateUI(event: VideoRecordEvent) {
val state = if (event is VideoRecordEvent.Status) recordingState.getNameString()
else event.getNameString()
when (event) {
is VideoRecordEvent.Status -> {
// placeholder: we update the UI with new status after this when() block,
// nothing needs to do here.
Log.i("Camerx","you're idk tbh")
}
is VideoRecordEvent.Start -> {
showUI(UiState.RECORDING, event.getNameString())
Log.i("Camerx","you're recording")
}
is VideoRecordEvent.Finalize-> {
showUI(UiState.FINALIZED, event.getNameString())
Log.i("Camerx","you're done, you're done")
}
is VideoRecordEvent.Pause -> {
captureViewBinding.captureButton.setImageResource(R.drawable.ic_resume)
Log.i("Camerx","you're paused")
}
is VideoRecordEvent.Resume -> {
captureViewBinding.captureButton.setImageResource(R.drawable.ic_pause)
Log.i("Camerx","you're resumed")
}
}
val stats = event.recordingStats
val size = stats.numBytesRecorded / 1000
val time = java.util.concurrent.TimeUnit.NANOSECONDS.toSeconds(stats.recordedDurationNanos)
var text = "${state}: recorded ${size}KB, in ${time}second"
if(event is VideoRecordEvent.Finalize)
text = "${text}\nFile saved to: ${event.outputResults.outputUri}"
captureLiveStatus.value = text
Log.i(TAG, "recording event: $text")
}
/**
* Enable/disable UI:
* User could select the capture parameters when recording is not in session
* Once recording is started, need to disable able UI to avoid conflict.
*/
private fun enableUI(enable: Boolean) {
Log.i("Camerx","insisde enableuia")
arrayOf(captureViewBinding.cameraButton,
captureViewBinding.captureButton,
captureViewBinding.stopButton,
captureViewBinding.audioSelection,
captureViewBinding.qualitySelection).forEach {
it.isEnabled = enable
}
// disable the camera button if no device to switch
if (cameraCapabilities.size <= 1) {
captureViewBinding.cameraButton.isEnabled = false
}
// disable the resolution list if no resolution to switch
if (cameraCapabilities[cameraIndex].qualities.size <= 1) {
captureViewBinding.qualitySelection.apply { isEnabled = false }
}
}
/**
* initialize UI for recording:
* - at recording: hide audio, qualitySelection,change camera UI; enable stop button
* - otherwise: show all except the stop button
*/
private fun showUI(state: UiState, status:String = "idle") {
Log.i("Camerx","insisde show ui")
captureViewBinding.let {
when(state) {
UiState.IDLE -> {
it.captureButton.setImageResource(R.drawable.ic_start)
it.stopButton.visibility = View.INVISIBLE
it.cameraButton.visibility= View.VISIBLE
it.audioSelection.visibility = View.VISIBLE
it.qualitySelection.visibility=View.VISIBLE
}
UiState.RECORDING -> {
it.cameraButton.visibility = View.INVISIBLE
it.audioSelection.visibility = View.INVISIBLE
it.qualitySelection.visibility = View.INVISIBLE
it.captureButton.setImageResource(R.drawable.ic_pause)
it.captureButton.isEnabled = true
it.stopButton.visibility = View.VISIBLE
it.stopButton.isEnabled = true
}
UiState.FINALIZED -> {
it.captureButton.setImageResource(R.drawable.ic_start)
it.stopButton.visibility = View.INVISIBLE
}
else -> {
val errorMsg = "Error: showUI($state) is not supported"
Log.e(TAG, errorMsg)
return
}
}
it.captureStatus.text = status
}
}
override fun onViewCreated(view: View, savedInstanceState: Bundle?) {
super.onViewCreated(view, savedInstanceState)
initCameraFragment()
}
override fun onDestroyView() {
_captureViewBinding = null
super.onDestroyView()
}
companion object {
// default Quality selection if no input from UI
const val DEFAULT_QUALITY_IDX = 0
val TAG:String = RecordingInterviewFragment::class.java.simpleName
private const val FILENAME_FORMAT = "yyyy-MM-dd-HH-mm-ss-SSS"
}
Thank you
Related
I want to avoid multiple function call when LaunchEffect key triggers.
LaunchedEffect(key1 = isEnableState, key2 = viewModel.uiState) {
viewModel.scanState(bluetoothAdapter)
}
when first composition isEnableState and viewModel.uiState both will trigger twice and call viewModel.scanState(bluetoothAdapter).
isEnableState is a Boolean type and viewModel.uiState is sealed class of UI types.
var uiState by mutableStateOf<UIState>(UIState.Initial)
private set
var isEnableState by mutableStateOf(false)
private set
So how can we handle idiomatic way to avoid duplicate calls?
Thanks
UPDATE
ContentStateful
#Composable
fun ContentStateful(
context: Context = LocalContext.current,
viewModel: ContentViewModel = koinViewModel(),
) {
LaunchedEffect(key1 = viewModel.isEnableState, key2 = viewModel.uiState) {
viewModel.scanState(bluetoothAdapter)
}
LaunchedEffect(viewModel.previous) {
viewModel.changeDeviceSate()
}
ContentStateLess{
viewModel.isEnableState = false
}
}
ContentStateLess
#Composable
fun ContentStateLess(changeAction: () -> Unit) {
Button(onClick = { changeAction() }) {
Text(text = "Click On me")
}
}
ContentViewModel
class ContentViewModel : BaseViewModel() {
var uiState by mutableStateOf<UIState>(UIState.Initial)
var isEnableState by mutableStateOf(false)
fun scanState(bluetoothAdapter: BluetoothAdapter) {
if (isEnableState && isInitialOrScanningUiState()) {
// start scanning
} else {
// stop scanning
}
}
private fun isInitialOrScanningUiState(): Boolean {
return (uiState == UIState.Initial || uiState == UIState.ScanningDevice)
}
fun changeDeviceSate() {
if (previous == BOND_NONE && newState == BONDING) {
uiState = UIState.LoadingState
} else if (previous == BONDING && newState == BONDED) {
uiState = UIState.ConnectedState(it)
} else {
uiState = UIState.ConnectionFailedState
}
}
}
scanState function is start and stop scanning of devices.
I guess the answer below would work or might require some modification to work but logic for preventing double clicks can be used only if you wish to prevent actions happen initially within time frame of small interval. To prevent double clicks you you set current time and check again if the time is above threshold to invoke click callback. In your situation also adding states with delay might solve the issue.
IDLE, BUSY, READY
var launchState by remember {mutableStateOf(IDLE)}
LaunchedEffect(key1 = isEnableState, key2 = viewModel.uiState) {
if(launchState != BUSY){
viewModel.scanState(bluetoothAdapter)
if(launchState == IDLE){ launchState = BUSY)
}
}
LaunchedEffect(launchState) {
if(launchState == BUSY){
delay(50)
launchState = READY
}
}
I'm developing an Android app for recognizing augmented images and displaying videos directly over them.
I have implemented everything and it's working well but the only issue is that the anchorNode model that gets attached to the augmented image anchor isn't very stable and doesn't remain exactly in the image's frame.
I tried to follow some best practices for improving tracking and got better results but there is only one point that I'm not understanding which is "If your image will never move from its initial position (for example, a poster affixed to a wall), you can attach a global anchor to the image to increase tracking stability.", could someone help explain how to that
, please? and It would be great if I can get some tips for improving tracking evermore.
Thanks in advance.
Here is the code I'm using for tracking and creating video anchors.
override fun onSessionConfiguration(session: Session?, config: Config?) {
Log.d("ttt","onSessionConfiguration")
this.session = session
// Disable plane detection
config!!.planeFindingMode = Config.PlaneFindingMode.DISABLED
Thread().run {
database = AugmentedImageDatabase(session).also {
for(fileName in requireContext().fileList().filter { it.contains("image.jpg") }){
val file = File(requireContext().filesDir,fileName)
if(file.exists()){
it.addImage(fileName.toString().split("-")[0],BitmapFactory.decodeFile(file.absolutePath),0.25f)
}
}
}
config.setAugmentedImageDatabase(database)
}
arFragment?.setOnAugmentedImageUpdateListener { it->
run {
onAugmentedImageTrackingUpdate(it)
}
}
}
private fun onAugmentedImageTrackingUpdate(augmentedImage: AugmentedImage) {
attachVideo(augmentedImage)
}
private fun attachVideo(augmentedImage: AugmentedImage){
if (augmentedImage.trackingState == TrackingState.TRACKING
&& augmentedImage.trackingMethod == AugmentedImage.TrackingMethod.FULL_TRACKING){
if (!matrixDetected) {
matrixDetected = true
lastTracketedImageName = augmentedImage.name
anchorNode = AnchorNode(augmentedImage.createAnchor(augmentedImage.centerPose))
anchorNode!!.isSmoothed = true
texture = ExternalTexture().also {
it.surfaceTexture.setOnFrameAvailableListener {
texture!!.surfaceTexture.setOnFrameAvailableListener(null)
val model = ShapeFactory.makeCube(
Vector3(augmentedImage.extentX,0f,augmentedImage.extentZ),
Vector3(0f,0f,0f),
plainVideoMaterial
)
model.isShadowReceiver = false
model.isShadowCaster = false
val renderable = anchorNode!!.setRenderable(model)
renderable.material!!.setExternalTexture("videoTexture",texture)
fadeInVideo(renderable.material)
}
}
val videoFile = File(requireContext().filesDir,"${augmentedImage.name}-video.mp4")
mediaPlayer = MediaPlayer.create(requireContext(),Uri.fromFile(videoFile)).also {
it.isLooping = true
it.setSurface(texture!!.surface)
it.start()
}
arFragment!!.arSceneView.scene.addChild(anchorNode)
}
}else{
if(matrixDetected){
if(anchorNode != null){
arFragment!!.arSceneView.scene.removeChild(anchorNode!!)
anchorNode = null
}
viewLifecycleOwner.lifecycleScope.launch {
val result = viewModel.endArVideo(deviceID,augmentedImage.name)
if(result.isSuccessful){
Log.d("ttt","stop ar request success")
}else{
Log.d("ttt","stop ar request failed ${result.errorBody()}")
}
}
matrixDetected = false
}
mediaPlayer?.let {
it.setSurface(null)
it.stop()
it.release()
mediaPlayer = null
}
}
}
private fun fadeInVideo(material: Material) {
ValueAnimator.ofFloat(0f, 1f).apply {
duration = 500L
interpolator = LinearInterpolator()
addUpdateListener { v ->
material.setFloat("videoAlpha", v.animatedValue as Float)
}
}.start()
}
I know this question has been asked quite often here, but non of the answers helped me.
I am writting a gallery app with a thumbes-regeneration feature. In oder to show the progress i added the progressbar which should count the number of created thumbnails. After each finished thumbnail-generation i dispatch a Redux event and listen to it in my Fragement, in order to change the progressbar.
Generating all thumbnails for all visible photos/videos
private fun onMenuRefreshThumbs(activity: Activity) {
val mediaPath = Redux.store.currentState.mediaPath
val fileRepository = FileRepository(context = activity, mediaPath = mediaPath)
activity.runOnUiThread {
fileRepository.regenerateThumbs(activity)
}
}
Functions inside the above used FileRepository:
fun regenerateThumbs(context: Context) {
val success = File(getAbsoluteThumbsDir(context, mediaPath)).deleteRecursively()
getMediaItems()
}
fun getMediaItems(): MediaItemList {
val success = File(thumbPath).mkdirs()
val isThumbsEmpty = File(thumbPath).listFiles().isEmpty()
val mediaFileList = File(mediaPath).listFiles().
.sortedByDescending { it.lastModified() }
val list = MediaItemList()
mediaFileList.apply {
forEach {
list.add(MediaItem(it.name, 0, 0))
if (isThumbsEmpty) {
getOrCreateThumb(it)
Redux.store.dispatch(FileUpdateAction(it))
}
}
}
return list
}
Subscribing to Redux in the Fragement:
private fun subscribeRedux() {
val handler = Handler(Looper.getMainLooper())
val activity = requireActivity()
subscriber = { state: AppState ->
when (state.action) {
...
is ClearSelection -> {
progressCounter = 0
// fragment_gallery_progress.visibility = View.GONE
}
is FileUpdateAction -> {
Handler().post {
progressCounter++
fragment_gallery_progress.visibility = View.VISIBLE
fragment_gallery_progress.progress = progressCounter
// fragment_gallery_progress.invalidate()
log.d("test: Thumb Index $progressCounter ${state.action.mediaItem.name} was created")
}
Unit
}
}
}.apply {
Redux.store.subscribe(this)
}
}
I tried all difference version of calling a thread in both cases. But no matter if its done with the handler or by activity.runOnUiThread, the progressbar never changes untill all thumbs are finished and the progressbar jumps from 0 to the maximum number. I can see the logs which are written in the right time, but not the progressbar changing.
I could fix my problem with following steps:
Removing the runOnUiThread() call
private fun onMenuRefreshThumbs(activity: Activity) {
val mediaPath = Redux.store.currentState.mediaPath
val fileRepository = FileRepository(context = activity, mediaPath = mediaPath)
fileRepository.regenerateThumbs(activity)
}
Adding a thread for each Thumbs-Generation:
fun getMediaItems(): MediaItemList {
val success = File(thumbPath).mkdirs()
val isThumbsEmpty = File(thumbPath).listFiles().isEmpty()
val mediaFileList = File(mediaPath).listFiles().
.sortedByDescending { it.lastModified() }
val list = MediaItemList()
mediaFileList.apply {
forEach {
list.add(MediaItem(it.name, 0, 0))
if (isThumbsEmpty) {
Thread {
getOrCreateThumb(it)
Redux.store.dispatch(FileUpdateAction(it))
}.start()
}
}
...
I have added CameraX to my ongoing development app a while ago. I know it was in alpha but I was ready to make the change when beta or final release will be available.
So I started working on it today. I have updated from
implementation 'androidx.camera:camera-core:1.0.0-alpha04'
implementation 'androidx.camera:camera-camera2:1.0.0-alpha04'
to this:
implementation 'androidx.camera:camera-core:1.0.0-beta01'
implementation 'androidx.camera:camera-camera2:1.0.0-beta01'
implementation 'androidx.camera:camera-lifecycle:1.0.0-beta01'
My Previous Working Code (alpha-04):
class ScannerX : AppCompatActivity() {
private lateinit var context: Context
var isOtpAuthCode = true
private val immersiveFlagTimeout = 500L
private val flagsFullscreen = View.SYSTEM_UI_FLAG_LOW_PROFILE or View.SYSTEM_UI_FLAG_FULLSCREEN or View.SYSTEM_UI_FLAG_LAYOUT_STABLE or
View.SYSTEM_UI_FLAG_IMMERSIVE_STICKY or View.SYSTEM_UI_FLAG_LAYOUT_HIDE_NAVIGATION or View.SYSTEM_UI_FLAG_HIDE_NAVIGATION
private var preview: Preview? = null
private var lensFacing = CameraX.LensFacing.BACK
private var imageAnalyzer: ImageAnalysis? = null
private lateinit var analyzerThread: HandlerThread
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_scanner_x)
context = this
btnCancel.setOnClickListener {
finish()
}
analyzerThread = if (GoogleApiAvailability.getInstance().isGooglePlayServicesAvailable(context) == ConnectionResult.SUCCESS) {
HandlerThread("BarcodeFirebaseAnalyzer").apply { start() }
} else {
HandlerThread("BarcodeZxingAnalyzer").apply { start() }
}
Dexter.withActivity(this)
.withPermissions(Manifest.permission.CAMERA)
.withListener(object : MultiplePermissionsListener {
override fun onPermissionsChecked(report: MultiplePermissionsReport?) {
textureView.post {
val metrics = DisplayMetrics().also { textureView.display.getRealMetrics(it) }
val screenAspectRatio = Rational(metrics.widthPixels, metrics.heightPixels)
val previewConfig = PreviewConfig.Builder().apply {
setLensFacing(lensFacing)
// We request aspect ratio but no resolution to let CameraX optimize our use cases
setTargetAspectRatio(screenAspectRatio)
// Set initial target rotation, we will have to call this again if rotation changes
// during the lifecycle of this use case
setTargetRotation(textureView.display.rotation)
}.build()
val analyzerConfig = ImageAnalysisConfig.Builder().apply {
setLensFacing(lensFacing)
// Use a worker thread for image analysis to prevent preview glitches
setCallbackHandler(Handler(analyzerThread.looper))
// In our analysis, we care more about the latest image than analyzing *every* image
setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE)
// Set initial target rotation, we will have to call this again if rotation changes
// during the lifecycle of this use case
setTargetRotation(textureView.display.rotation)
}.build()
preview = AutoFitPreviewBuilder.build(previewConfig, textureView)
imageAnalyzer = ImageAnalysis(analyzerConfig).apply {
analyzer = if (GoogleApiAvailability.getInstance().isGooglePlayServicesAvailable(context) == ConnectionResult.SUCCESS) {
BarcodeFirebaseAnalyzer { qrCode ->
if (isOtpAuthCode) {
if (qrCode.startsWith("otpauth")) {
toAddAuth(qrCode)
}
} else {
toAddAuth(qrCode)
}
}
} else {
BarcodeZxingAnalyzer { qrCode ->
if (isOtpAuthCode) {
if (qrCode.startsWith("otpauth")) {
toAddAuth(qrCode)
}
} else {
toAddAuth(qrCode)
}
}
}
}
// Apply declared configs to CameraX using the same lifecycle owner
CameraX.bindToLifecycle(this#ScannerX, preview, imageAnalyzer)
}
}
override fun onPermissionRationaleShouldBeShown(permissions: MutableList<PermissionRequest>?, token: PermissionToken?) {
//
}
}).check()
}
override fun onStart() {
super.onStart()
// Before setting full screen flags, we must wait a bit to let UI settle; otherwise, we may
// be trying to set app to immersive mode before it's ready and the flags do not stick
textureView.postDelayed({
textureView.systemUiVisibility = flagsFullscreen
}, immersiveFlagTimeout)
}
override fun onDestroy() {
analyzerThread.quit()
super.onDestroy()
}
private fun toAddAuth(scannedCode: String) {
if (CameraX.isBound(imageAnalyzer)) {
CameraX.unbind(imageAnalyzer)
}
val intent = Intent()
intent.putExtra("scanResult", scannedCode)
setResult(RESULT_OK, intent)
finish()
}
companion object {
private const val RESULT_OK = 666
}
}
And the code I have changed is as follows (beta-01):
class ScannerX : AppCompatActivity() {
private lateinit var context: Context
var isOtpAuthCode = true
private val immersiveFlagTimeout = 500L
private val flagsFullscreen = View.SYSTEM_UI_FLAG_LOW_PROFILE or View.SYSTEM_UI_FLAG_FULLSCREEN or View.SYSTEM_UI_FLAG_LAYOUT_STABLE or
View.SYSTEM_UI_FLAG_IMMERSIVE_STICKY or View.SYSTEM_UI_FLAG_LAYOUT_HIDE_NAVIGATION or View.SYSTEM_UI_FLAG_HIDE_NAVIGATION
private var preview: Preview? = null
private var lensFacing = CameraSelector.DEFAULT_BACK_CAMERA
private var imageAnalyzer: ImageAnalysis? = null
private lateinit var analysisExecutor: ExecutorService
private lateinit var processCameraProvider: ListenableFuture<ProcessCameraProvider>
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_scanner_x)
context = this
btnCancel.setOnClickListener {
finish()
}
Dexter.withActivity(this)
.withPermissions(Manifest.permission.CAMERA)
.withListener(object : MultiplePermissionsListener {
override fun onPermissionsChecked(report: MultiplePermissionsReport?) {
textureView.post {
analysisExecutor = Executors.newSingleThreadExecutor()
processCameraProvider = ProcessCameraProvider.getInstance(context)
preview = Preview.Builder()
.setTargetAspectRatio(AspectRatio.RATIO_16_9)
.setTargetRotation(textureView.display.rotation)
.build()
imageAnalyzer = ImageAnalysis.Builder()
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.setTargetRotation(textureView.display.rotation)
.build()
if (GoogleApiAvailability.getInstance().isGooglePlayServicesAvailable(context) == ConnectionResult.SUCCESS) {
imageAnalyzer?.apply {
setAnalyzer(analysisExecutor, BarcodeFirebaseAnalyzer { qrCode ->
if (isOtpAuthCode) {
if (qrCode.startsWith("otpauth")) {
toAddAuth(qrCode)
}
} else {
toAddAuth(qrCode)
}
})
}
} else {
imageAnalyzer?.apply {
setAnalyzer(analysisExecutor, BarcodeZxingAnalyzer { qrCode ->
if (isOtpAuthCode) {
if (qrCode.startsWith("otpauth")) {
toAddAuth(qrCode)
}
} else {
toAddAuth(qrCode)
}
})
}
}
processCameraProvider.get().bindToLifecycle(this#ScannerX, lensFacing, imageAnalyzer)
}
}
override fun onPermissionRationaleShouldBeShown(permissions: MutableList<PermissionRequest>?, token: PermissionToken?) {
//
}
}).check()
}
override fun onStart() {
super.onStart()
// Before setting full screen flags, we must wait a bit to let UI settle; otherwise, we may
// be trying to set app to immersive mode before it's ready and the flags do not stick
textureView.postDelayed({
textureView.systemUiVisibility = flagsFullscreen
}, immersiveFlagTimeout)
}
override fun onDestroy() {
if (!analysisExecutor.isShutdown) {
analysisExecutor.shutdown()
}
super.onDestroy()
}
private fun toAddAuth(scannedCode: String) {
/*if (CameraX.isBound(imageAnalyzer)) {
CameraX.unbind(imageAnalyzer)
}*/
val intent = Intent()
intent.putExtra("scanResult", scannedCode)
setResult(RESULT_OK, intent)
finish()
}
companion object {
private const val RESULT_OK = 666
}
}
After I upgraded there were so many changes in library and now I cant make it work.
I also cant use Google Provided AutoFitPreview Class along with initial alpha release of this library. This was not necessary even with alpha04 since the only problem without this class was camera view little bit stretched out but scanning and analyzing worked properly.
/**
* Builder for [Preview] that takes in a [WeakReference] of the view finder and [PreviewConfig],
* then instantiates a [Preview] which automatically resizes and rotates reacting to config changes.
*/
class AutoFitPreviewBuilder private constructor(config: PreviewConfig, viewFinderRef: WeakReference<TextureView>) {
/** Public instance of preview use-case which can be used by consumers of this adapter */
val useCase: Preview
/** Internal variable used to keep track of the use case's output rotation */
private var bufferRotation: Int = 0
/** Internal variable used to keep track of the view's rotation */
private var viewFinderRotation: Int? = null
/** Internal variable used to keep track of the use-case's output dimension */
private var bufferDimens: Size = Size(0, 0)
/** Internal variable used to keep track of the view's dimension */
private var viewFinderDimens: Size = Size(0, 0)
/** Internal variable used to keep track of the view's display */
private var viewFinderDisplay: Int = -1
/** Internal reference of the [DisplayManager] */
private lateinit var displayManager: DisplayManager
/**
* We need a display listener for orientation changes that do not trigger a configuration
* change, for example if we choose to override config change in manifest or for 180-degree
* orientation changes.
*/
private val displayListener = object : DisplayManager.DisplayListener {
override fun onDisplayAdded(displayId: Int) = Unit
override fun onDisplayRemoved(displayId: Int) = Unit
override fun onDisplayChanged(displayId: Int) {
val viewFinder = viewFinderRef.get() ?: return
if (displayId == viewFinderDisplay) {
val display = displayManager.getDisplay(displayId)
val rotation = getDisplaySurfaceRotation(display)
updateTransform(viewFinder, rotation, bufferDimens, viewFinderDimens)
}
}
}
init {
// Make sure that the view finder reference is valid
val viewFinder = viewFinderRef.get() ?:
throw IllegalArgumentException("Invalid reference to view finder used")
// Initialize the display and rotation from texture view information
viewFinderDisplay = viewFinder.display.displayId
viewFinderRotation = getDisplaySurfaceRotation(viewFinder.display) ?: 0
// Initialize public use-case with the given config
useCase = Preview(config)
// Every time the view finder is updated, recompute layout
useCase.onPreviewOutputUpdateListener = Preview.OnPreviewOutputUpdateListener {
val viewFinderI = viewFinderRef.get() ?: return#OnPreviewOutputUpdateListener
Log.d(TAG, "Preview output changed. " +
"Size: ${it.textureSize}. Rotation: ${it.rotationDegrees}")
// To update the SurfaceTexture, we have to remove it and re-add it
val parent = viewFinderI.parent as ViewGroup
parent.removeView(viewFinderI)
parent.addView(viewFinderI, 0)
// Update internal texture
viewFinderI.surfaceTexture = it.surfaceTexture
// Apply relevant transformations
bufferRotation = it.rotationDegrees
val rotation = getDisplaySurfaceRotation(viewFinderI.display)
updateTransform(viewFinderI, rotation, it.textureSize, viewFinderDimens)
}
// Every time the provided texture view changes, recompute layout
viewFinder.addOnLayoutChangeListener { view, left, top, right, bottom, _, _, _, _ ->
val viewFinderII = view as TextureView
val newViewFinderDimens = Size(right - left, bottom - top)
Log.d(TAG, "View finder layout changed. Size: $newViewFinderDimens")
val rotation = getDisplaySurfaceRotation(viewFinderII.display)
updateTransform(viewFinderII, rotation, bufferDimens, newViewFinderDimens)
}
// Every time the orientation of device changes, recompute layout
// NOTE: This is unnecessary if we listen to display orientation changes in the camera
// fragment and call [Preview.setTargetRotation()] (like we do in this sample), which will
// trigger [Preview.OnPreviewOutputUpdateListener] with a new
// [PreviewOutput.rotationDegrees]. CameraX Preview use case will not rotate the frames for
// us, it will just tell us about the buffer rotation with respect to sensor orientation.
// In this sample, we ignore the buffer rotation and instead look at the view finder's
// rotation every time [updateTransform] is called, which gets triggered by
// [CameraFragment] display listener -- but the approach taken in this sample is not the
// only valid one.
displayManager = viewFinder.context
.getSystemService(Context.DISPLAY_SERVICE) as DisplayManager
displayManager.registerDisplayListener(displayListener, null)
// Remove the display listeners when the view is detached to avoid holding a reference to
// it outside of the Fragment that owns the view.
// NOTE: Even though using a weak reference should take care of this, we still try to avoid
// unnecessary calls to the listener this way.
viewFinder.addOnAttachStateChangeListener(object : View.OnAttachStateChangeListener {
override fun onViewAttachedToWindow(view: View?) =
displayManager.registerDisplayListener(displayListener, null)
override fun onViewDetachedFromWindow(view: View?) =
displayManager.unregisterDisplayListener(displayListener)
})
}
/** Helper function that fits a camera preview into the given [TextureView] */
private fun updateTransform(textureView: TextureView?, rotation: Int?, newBufferDimens: Size, newViewFinderDimens: Size) {
// This should not happen anyway, but now the linter knows
val textureViewI = textureView ?: return
if (rotation == viewFinderRotation &&
Objects.equals(newBufferDimens, bufferDimens) &&
Objects.equals(newViewFinderDimens, viewFinderDimens)) {
// Nothing has changed, no need to transform output again
return
}
if (rotation == null) {
// Invalid rotation - wait for valid inputs before setting matrix
return
} else {
// Update internal field with new inputs
viewFinderRotation = rotation
}
if (newBufferDimens.width == 0 || newBufferDimens.height == 0) {
// Invalid buffer dimens - wait for valid inputs before setting matrix
return
} else {
// Update internal field with new inputs
bufferDimens = newBufferDimens
}
if (newViewFinderDimens.width == 0 || newViewFinderDimens.height == 0) {
// Invalid view finder dimens - wait for valid inputs before setting matrix
return
} else {
// Update internal field with new inputs
viewFinderDimens = newViewFinderDimens
}
val matrix = Matrix()
Log.d(TAG, "Applying output transformation.\n" +
"View finder size: $viewFinderDimens.\n" +
"Preview output size: $bufferDimens\n" +
"View finder rotation: $viewFinderRotation\n" +
"Preview output rotation: $bufferRotation")
// Compute the center of the view finder
val centerX = viewFinderDimens.width / 2f
val centerY = viewFinderDimens.height / 2f
// Correct preview output to account for display rotation
matrix.postRotate(-viewFinderRotation!!.toFloat(), centerX, centerY)
// Buffers are rotated relative to the device's 'natural' orientation: swap width and height
val bufferRatio = bufferDimens.height / bufferDimens.width.toFloat()
val scaledWidth: Int
val scaledHeight: Int
// Match longest sides together -- i.e. apply center-crop transformation
if (viewFinderDimens.width > viewFinderDimens.height) {
scaledHeight = viewFinderDimens.width
scaledWidth = (viewFinderDimens.width * bufferRatio).roundToInt()
} else {
scaledHeight = viewFinderDimens.height
scaledWidth = (viewFinderDimens.height * bufferRatio).roundToInt()
}
// Compute the relative scale value
val xScale = scaledWidth / viewFinderDimens.width.toFloat()
val yScale = scaledHeight / viewFinderDimens.height.toFloat()
// Scale input buffers to fill the view finder
matrix.preScale(xScale, yScale, centerX, centerY)
// Finally, apply transformations to our TextureView
textureViewI.setTransform(matrix)
}
companion object {
private val TAG = AutoFitPreviewBuilder::class.java.simpleName
/** Helper function that gets the rotation of a [Display] in degrees */
fun getDisplaySurfaceRotation(display: Display?) = when(display?.rotation) {
Surface.ROTATION_0 -> 0
Surface.ROTATION_90 -> 90
Surface.ROTATION_180 -> 180
Surface.ROTATION_270 -> 270
else -> null
}
/**
* Main entry point for users of this class: instantiates the adapter and returns an instance
* of [Preview] which automatically adjusts in size and rotation to compensate for
* config changes.
*/
fun build(config: PreviewConfig, viewFinder: TextureView) =
AutoFitPreviewBuilder(config, WeakReference(viewFinder)).useCase
}
}
Please Help
How to record the state (click) of two buttons over time. To then reproduce their clicks over time? I think this can be realized through the creation of a .midi file and then its further opening and playback, or can it be done through performClick ()?
Piano-type applications use this feature, where you can record that you played the piano in .midi or .mid format and then play
example of "piano apps"
You can make a ClickRecorder that records button id and delay over a period of time.
data class Click(val id: Int, val timeMs: Long)
class ClickRecorder {
var isRecording = false
private var currentTIme: Long = 0
private val clicks = mutableListOf<Click>()
fun getClicks(): List<Click>? = if (!isRecording) clicks else null
fun recordClick(id: Int) = if (isRecording) {
val diff = System.currentTimeMillis().minus(currentTIme)
clicks.add(Click(id, diff))
currentTIme = System.currentTimeMillis()
} else null
fun start() {
clicks.clear()
currentTIme = System.currentTimeMillis()
isRecording = true
}
fun stop() {
isRecording = false
}
}
Use the ClickRecorder instance in the following way:
val clickRecorder = ClickRecorder()
fun onRecordButtonClick(v: View) {
val btRecord = v as Button
when (btRecord.text) {
RECORD -> {
clickRecorder.start()
btRecord.text = STOP
}
STOP -> {
clickRecorder.stop()
btRecord.text = RECORD
}
}
}
fun onPianoButtonsClick(v: View) {
clickRecorder.recordClick(v.id)
}
After finishing recording you can access recorded clicks by ClickRecorder.getClicks(). To simulate the clicks you need to make ClickPlayer
class ClickPlayer() {
private val index = AtomicInteger(0)
private val handler = Handler()
fun play(list: List<Click>, callback: (Click) -> Unit) {
index.set(0)
val runnable = object : Runnable {
override fun run() {
if (index.get() < list.size) {
val click = list[index.getAndIncrement()]
callback(click)
val nextIndex = index.get()
if (nextIndex < list.size) handler.postDelayed(this, list[nextIndex].timeMs)
}
}
}
handler.postDelayed(runnable, list[index.get()].timeMs)
}
}
Use ClickPlayer in the following way
fun playRecordedClicks(clickPlayer: ClickPlayer, clicks: List<Click>) {
clickPlayer.play(clicks) {
findViewById<Button>(it.id).performClick()
}
}