android: SurfaceTexure, camera frame wait time out - android

I'm trying to use MediaCodec and MediaMux, and I meet some trouble.
Here is the errors from the logcat:
12-13 11:59:58.238: E/AndroidRuntime(23218): FATAL EXCEPTION: main
12-13 11:59:58.238: E/AndroidRuntime(23218): java.lang.RuntimeException: Unable to resume activity {com.brendon.cameratompeg/com.brendon.cameratompeg.CameraToMpeg}: java.lang.IllegalStateException: Can't stop due to wrong state.
12-13 11:59:58.238: E/AndroidRuntime(23218): at android.app.ActivityThread.performResumeActivity(ActivityThread.java:2918)
The code get wrong at "mStManager.awaitNewImage();", which is in the onResume function. And the logcat says "camera frame wait time out".
mStManager is an instance of the class SurfaceTextureManager. And "camera frame wait time out" comes from the awaitNewImage() function. I've added that class to my post.
Part of my code is like this(The onCreate function and onResume function):
#Override
protected void onCreate(Bundle savedInstanceState) {
// arbitrary but popular values
int encWidth = 640;
int encHeight = 480;
int encBitRate = 6000000; // Mbps
Log.d(TAG, MIME_TYPE + " output " + encWidth + "x" + encHeight + " #" + encBitRate);
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_camera_to_mpeg);
prepareCamera(encWidth, encHeight);
prepareEncoder(encWidth, encHeight, encBitRate);
mInputSurface.makeCurrent();
prepareSurfaceTexture();
mCamera.startPreview();
}
#Override
public void onResume(){
try {
long startWhen = System.nanoTime();
long desiredEnd = startWhen + DURATION_SEC * 1000000000L;
SurfaceTexture st = mStManager.getSurfaceTexture();
int frameCount = 0;
while (System.nanoTime() < desiredEnd) {
// Feed any pending encoder output into the muxer.
drainEncoder(false);
// Switch up the colors every 15 frames. Besides demonstrating the use of
// fragment shaders for video editing, this provides a visual indication of
// the frame rate: if the camera is capturing at 15fps, the colors will change
// once per second.
if ((frameCount % 15) == 0) {
String fragmentShader = null;
if ((frameCount & 0x01) != 0) {
fragmentShader = SWAPPED_FRAGMENT_SHADER;
}
mStManager.changeFragmentShader(fragmentShader);
}
frameCount++;
// Acquire a new frame of input, and render it to the Surface. If we had a
// GLSurfaceView we could switch EGL contexts and call drawImage() a second
// time to render it on screen. The texture can be shared between contexts by
// passing the GLSurfaceView's EGLContext as eglCreateContext()'s share_context
// argument.
mStManager.awaitNewImage();
mStManager.drawImage();
// Set the presentation time stamp from the SurfaceTexture's time stamp. This
// will be used by MediaMuxer to set the PTS in the video.
if (VERBOSE) {
Log.d(TAG, "present: " +
((st.getTimestamp() - startWhen) / 1000000.0) + "ms");
}
mInputSurface.setPresentationTime(st.getTimestamp());
// Submit it to the encoder. The eglSwapBuffers call will block if the input
// is full, which would be bad if it stayed full until we dequeued an output
// buffer (which we can't do, since we're stuck here). So long as we fully drain
// the encoder before supplying additional input, the system guarantees that we
// can supply another frame without blocking.
if (VERBOSE) Log.d(TAG, "sending frame to encoder");
mInputSurface.swapBuffers();
}
// send end-of-stream to encoder, and drain remaining output
drainEncoder(true);
} catch(Exception e) {
Log.d(TAG, e.getMessage());
// release everything we grabbed
releaseCamera();
releaseEncoder();
releaseSurfaceTexture();
}
}
a class in the code that is relevant to the error
private static class SurfaceTextureManager
implements SurfaceTexture.OnFrameAvailableListener {
private SurfaceTexture mSurfaceTexture;
private CameraToMpeg.STextureRender mTextureRender;
private Object mFrameSyncObject = new Object(); // guards mFrameAvailable
private boolean mFrameAvailable;
/**
* Creates instances of TextureRender and SurfaceTexture.
*/
public SurfaceTextureManager() {
mTextureRender = new CameraToMpeg.STextureRender();
mTextureRender.surfaceCreated();
if (VERBOSE) Log.d(TAG, "textureID=" + mTextureRender.getTextureId());
mSurfaceTexture = new SurfaceTexture(mTextureRender.getTextureId());
// This doesn't work if this object is created on the thread that CTS started for
// these test cases.
//
// The CTS-created thread has a Looper, and the SurfaceTexture constructor will
// create a Handler that uses it. The "frame available" message is delivered
// there, but since we're not a Looper-based thread we'll never see it. For
// this to do anything useful, OutputSurface must be created on a thread without
// a Looper, so that SurfaceTexture uses the main application Looper instead.
//
// Java language note: passing "this" out of a constructor is generally unwise,
// but we should be able to get away with it here.
mSurfaceTexture.setOnFrameAvailableListener(this);
}
public void release() {
// this causes a bunch of warnings that appear harmless but might confuse someone:
// W BufferQueue: [unnamed-3997-2] cancelBuffer: BufferQueue has been abandoned!
//mSurfaceTexture.release();
mTextureRender = null;
mSurfaceTexture = null;
}
/**
* Returns the SurfaceTexture.
*/
public SurfaceTexture getSurfaceTexture() {
return mSurfaceTexture;
}
/**
* Replaces the fragment shader.
*/
public void changeFragmentShader(String fragmentShader) {
mTextureRender.changeFragmentShader(fragmentShader);
}
/**
* Latches the next buffer into the texture. Must be called from the thread that created
* the OutputSurface object.
*/
public void awaitNewImage() {
final int TIMEOUT_MS = 2500;
synchronized (mFrameSyncObject) {
while (!mFrameAvailable) {
try {
// Wait for onFrameAvailable() to signal us. Use a timeout to avoid
// stalling the test if it doesn't arrive.
mFrameSyncObject.wait(TIMEOUT_MS);
if (!mFrameAvailable) {
// TODO: if "spurious wakeup", continue while loop
throw new RuntimeException("Camera frame wait timed out");
}
} catch (InterruptedException ie) {
// shouldn't happen
throw new RuntimeException(ie);
}
}
mFrameAvailable = false;
}
// Latch the data.
mTextureRender.checkGlError("before updateTexImage");
mSurfaceTexture.updateTexImage();
}
/**
* Draws the data from SurfaceTexture onto the current EGL surface.
*/
public void drawImage() {
mTextureRender.drawFrame(mSurfaceTexture);
}
#Override
public void onFrameAvailable(SurfaceTexture st) {
if (VERBOSE) Log.d(TAG, "new frame available");
synchronized (mFrameSyncObject) {
if (mFrameAvailable) {
throw new RuntimeException("mFrameAvailable already set, frame could be dropped");
}
mFrameAvailable = true;
mFrameSyncObject.notifyAll();
}
}
}
Does anyone have any ideas? Thank you!

I encountered this issue as well. The reason therefore is that your code is running on a thread that has a looper. You have to make sure that the code is running on a thread that does not have a looper. If it does, SurfaceTexture.OnFrameAvailableListener will deliver the "frame available" message to the waiting thread, rather than sending the Message to the Handler on the main thread, and you'll get stuck.
Bigflake's examples provide you with a detailed description on that:
/**
* Wraps testEditVideo, running it in a new thread. Required because of the way
* SurfaceTexture.OnFrameAvailableListener works when the current thread has a Looper
* configured.
*/
private static class VideoEditWrapper implements Runnable {
private Throwable mThrowable;
private DecodeEditEncodeTest mTest;
private VideoEditWrapper(DecodeEditEncodeTest test) {
mTest = test;
}
#Override
public void run() {
try {
mTest.videoEditTest();
} catch (Throwable th) {
mThrowable = th;
}
}
/** Entry point. */
public static void runTest(DecodeEditEncodeTest obj) throws Throwable {
VideoEditWrapper wrapper = new VideoEditWrapper(obj);
Thread th = new Thread(wrapper, "codec test");
th.start();
th.join();
if (wrapper.mThrowable != null) {
throw wrapper.mThrowable;
}
}
}

As Florian correctly explained, the issue is that your code is running on a thread that has a looper. You need to make sure that the code is running on a thread that does not have a looper.
The way I solved it was by modifying the setup() method in OutputSurface and ensuring that the setOnFrameListener() is attached to another Handler Thread.
Here is the code for the same:
class OutputSurface implements SurfaceTexture.OnFrameAvailableListener {
private static final String TAG = "OutputSurface";
private static final boolean VERBOSE = false;
private EGLDisplay mEGLDisplay = EGL14.EGL_NO_DISPLAY;
private EGLContext mEGLContext = EGL14.EGL_NO_CONTEXT;
private EGLSurface mEGLSurface = EGL14.EGL_NO_SURFACE;
private SurfaceTexture mSurfaceTexture;
private Surface mSurface;
private Object mFrameSyncObject = new Object();
private boolean mFrameAvailable;
private TextureRender mTextureRender;
private HandlerThread mHandlerThread;
private Handler mHandler;
public OutputSurface(int width, int height) {
if (width <= 0 || height <= 0) {
throw new IllegalArgumentException();
}
eglSetup(width, height);
makeCurrent();
setup();
}
public OutputSurface() {
setup();
}
private void setup() {
mTextureRender = new TextureRender();
mTextureRender.surfaceCreated();
mHandlerThread = new HandlerThread("callback-thread");
mHandlerThread.start();
mHandler = new Handler(mHandlerThread.getLooper());
// Even if we don't access the SurfaceTexture after the constructor returns, we
// still need to keep a reference to it. The Surface doesn't retain a reference
// at the Java level, so if we don't either then the object can get GCed, which
// causes the native finalizer to run.
if (VERBOSE) Log.d(TAG, "textureID=" + mTextureRender.getTextureId());
mSurfaceTexture = new SurfaceTexture(mTextureRender.getTextureId());
// This doesn't work if OutputSurface is created on the thread that CTS started for
// these test cases.
//
// The CTS-created thread has a Looper, and the SurfaceTexture constructor will
// create a Handler that uses it. The "frame available" message is delivered
// there, but since we're not a Looper-based thread we'll never see it. For
// this to do anything useful, OutputSurface must be created on a thread without
// a Looper, so that SurfaceTexture uses the main application Looper instead.
//
// Java language note: passing "this" out of a constructor is generally unwise,
// but we should be able to get away with it here.
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
mSurfaceTexture.setOnFrameAvailableListener(this, mHandler);
} else {
mSurfaceTexture.setOnFrameAvailableListener(this);
}
mSurface = new Surface(mSurfaceTexture);
}
}
The rest of the OutputSurface class can remain the same.

Related

WebRTC cannot record screen

I'm trying to make screen sharing app using WebRTC. I have code that can get and share video stream from camera. I need to modify it to instead get video via MediaProjection API. Based on this post I have modified my code to use org.webrtc.ScreenCapturerAndroid, but there is no video output shown. There is only black screen. If I use camera, everything works fine (I can see camera output on screen). Could someone please check my code and maybe point me in right direction? I have been stuck on this for three days already.
Here is my code:
public class MainActivity extends AppCompatActivity {
private static final String TAG = "VIDEO_CAPTURE";
private static final int CAPTURE_PERMISSION_REQUEST_CODE = 1;
private static final String VIDEO_TRACK_ID = "video_stream";
PeerConnectionFactory peerConnectionFactory;
SurfaceViewRenderer localVideoView;
ProxyVideoSink localSink;
VideoSource videoSource;
VideoTrack localVideoTrack;
EglBase rootEglBase;
boolean camera = false;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
rootEglBase = EglBase.create();
localVideoView = findViewById(R.id.local_gl_surface_view);
localVideoView.init(rootEglBase.getEglBaseContext(), null);
startScreenCapture();
}
#TargetApi(21)
private void startScreenCapture() {
MediaProjectionManager mMediaProjectionManager = (MediaProjectionManager) getApplication().getSystemService(Context.MEDIA_PROJECTION_SERVICE);
startActivityForResult(mMediaProjectionManager.createScreenCaptureIntent(), CAPTURE_PERMISSION_REQUEST_CODE);
}
#Override
public void onActivityResult(int requestCode, int resultCode, Intent data) {
if (requestCode != CAPTURE_PERMISSION_REQUEST_CODE) { return; }
start(data);
}
private void start(Intent permissionData) {
//Initialize PeerConnectionFactory globals.
PeerConnectionFactory.InitializationOptions initializationOptions =
PeerConnectionFactory.InitializationOptions.builder(this)
.setEnableVideoHwAcceleration(true)
.createInitializationOptions();
PeerConnectionFactory.initialize(initializationOptions);
//Create a new PeerConnectionFactory instance - using Hardware encoder and decoder.
PeerConnectionFactory.Options options = new PeerConnectionFactory.Options();
DefaultVideoEncoderFactory defaultVideoEncoderFactory = new DefaultVideoEncoderFactory(
rootEglBase.getEglBaseContext(), true,true);
DefaultVideoDecoderFactory defaultVideoDecoderFactory = new DefaultVideoDecoderFactory(rootEglBase.getEglBaseContext());
peerConnectionFactory = PeerConnectionFactory.builder()
.setOptions(options)
.setVideoDecoderFactory(defaultVideoDecoderFactory)
.setVideoEncoderFactory(defaultVideoEncoderFactory)
.createPeerConnectionFactory();;
VideoCapturer videoCapturerAndroid;
if (camera) {
videoCapturerAndroid = createCameraCapturer(new Camera1Enumerator(false));
} else {
videoCapturerAndroid = new ScreenCapturerAndroid(permissionData, new MediaProjection.Callback() {
#Override
public void onStop() {
super.onStop();
Log.e(TAG, "user has revoked permissions");
}
});
}
videoSource = peerConnectionFactory.createVideoSource(videoCapturerAndroid);
DisplayMetrics metrics = new DisplayMetrics();
MainActivity.this.getWindowManager().getDefaultDisplay().getRealMetrics(metrics);
videoCapturerAndroid.startCapture(metrics.widthPixels, metrics.heightPixels, 30);
localVideoTrack = peerConnectionFactory.createVideoTrack(VIDEO_TRACK_ID, videoSource);
localVideoTrack.setEnabled(true);
//localVideoTrack.addRenderer(new VideoRenderer(localRenderer));
localSink = new ProxyVideoSink().setTarget(localVideoView);
localVideoTrack.addSink(localSink);
}
//find first camera, this works without problem
private VideoCapturer createCameraCapturer(CameraEnumerator enumerator) {
final String[] deviceNames = enumerator.getDeviceNames();
// First, try to find front facing camera
Logging.d(TAG, "Looking for front facing cameras.");
for (String deviceName : deviceNames) {
if (enumerator.isFrontFacing(deviceName)) {
Logging.d(TAG, "Creating front facing camera capturer.");
VideoCapturer videoCapturer = enumerator.createCapturer(deviceName, null);
if (videoCapturer != null) {
return videoCapturer;
}
}
}
// Front facing camera not found, try something else
Logging.d(TAG, "Looking for other cameras.");
for (String deviceName : deviceNames) {
if (!enumerator.isFrontFacing(deviceName)) {
Logging.d(TAG, "Creating other camera capturer.");
VideoCapturer videoCapturer = enumerator.createCapturer(deviceName, null);
if (videoCapturer != null) {
return videoCapturer;
}
}
}
return null;
}
}
ProxyVideoSink
public class ProxyVideoSink implements VideoSink {
private VideoSink target;
synchronized ProxyVideoSink setTarget(VideoSink target) { this.target = target; return this; }
#Override
public void onFrame(VideoFrame videoFrame) {
if (target == null) {
Log.w("VideoSink", "Dropping frame in proxy because target is null.");
return;
}
target.onFrame(videoFrame);
}
}
In logcat I can see, that some frames are rendered, but nothing is shown (black screen).
06-18 17:42:44.750 11357-11388/com.archona.webrtcscreencapturetest I/org.webrtc.Logging: EglRenderer: local_gl_surface_viewDuration: 4000 ms. Frames received: 117. Dropped: 0. Rendered: 117. Render fps: 29.2. Average render time: 4754 μs. Average swapBuffer time: 2913 μs.
06-18 17:42:48.752 11357-11388/com.archona.webrtcscreencapturetest I/org.webrtc.Logging: EglRenderer: local_gl_surface_viewDuration: 4001 ms. Frames received: 118. Dropped: 0. Rendered: 118. Render fps: 29.5. Average render time: 5015 μs. Average swapBuffer time: 3090 μs.
I'm using latest version of WebRTC library: implementation 'org.webrtc:google-webrtc:1.0.23546'.
My device has API level 24 (Android 7.0), but I have tested this code on 3 different devices with different API levels, so I don't suspect device specific problem.
I have tried building another app that uses MediaProjection API (without WebRTC) and I can see correct output inside SurfaceView.
I have tried downgrading webrtc library, but nothing seems to work.
Thanks for any help.
I was faced same issue using WebRTC library org.webrtc:google-webrtc:1.0.22672. I am using android 7.0 device. Video call is working fine. Issue is with screen sharing. Screen sharing showing black screen always.
Then I added following:
peerConnectionFactory.setVideoHwAccelerationOptions(rootEglBase.getEglBaseContext(), rootEglBase.getEglBaseContext());
Now it is working perfectly.

Eliminating volume envelope retrigger clicks - Jsyn on Android

I am looking for ideas on how to handle envelope re-triggering of new notes in a monophonic sampler setup causing clicks if the previous note's envelope hasn't finished. In the current setup the previous note's instance is killed on the spot when a new note is triggered (the synth.stop method call), causing a click as the envelope doesn't get a chance to finish and reach 0 volume. Any hints are welcome.
I have also added in the below code my own un-satisfactory solution putting the gain of the voice to 0 and then putting the voice to sleep for 70ms. This introduces a 70ms latency to the user interaction but gets rid of any clicks. Any values below 70ms in the sleep don't solve the clicking.
The variable are public static at the moment just so I can still play around with where I'm calling them.
Here is my listener code:
buttonNoteC1Get.setOnTouchListener(new View.OnTouchListener() {
#Override
public boolean onTouch(View v, MotionEvent event) {
if (event.getAction() == MotionEvent.ACTION_UP) {
buttonNoteC1Get.setBackgroundColor(myColorWhite); // reset gui color
if (sample.getSustainBegin() > 0) { // trigger release for looping sample
ampEnv.dataQueue.queue(ampEnvelope, 3, 1); // release called
}
limit = 0; // reset action down limiter
return true;
}
if (limit == 0) { // respond only to first touch event
if (samplePlayer != null) { // check if a previous note exists
synth.stop(); // stop instance of previous note
}
buttonNoteC1Get.setBackgroundColor(myColorGrey); // key pressed gui color
samplePitch = octave * 1; // set samplerate multiplier
Sampler.player(); // call setup code for new note
Sampler.play(); // play new note
limit = 1; // prevent stacking of action down touch events
}
return false;
}
}); // end listener
Here is my Sampler code
public class Sampler {
public static VariableRateDataReader samplePlayer;
public static LineOut lineOut;
public static FloatSample sample;
public static SegmentedEnvelope ampEnvelope;
public static VariableRateMonoReader ampEnv;
public static MixerMonoRamped mixerMono;
public static double[] ampData;
public static FilterStateVariable mMainFilter;
public static Synthesizer synth = JSyn.createSynthesizer(new JSynAndroidAudioDevice());
// load the chosen sample, called by instrument select spinner
static void loadSample(){
SampleLoader.setJavaSoundPreferred(false);
try {
sample = SampleLoader.loadFloatSample(sampleFile);
} catch (IOException e) {
e.printStackTrace();
}
} // end load sample
// initialize sampler voice
static void player() {
// Create an amplitude envelope and fill it with data.
ampData = new double[] {
envA, 0.9, // pair 0, "attack"
envD, envS, // pair 2, "decay"
0, envS, // pair 3, "sustain"
envR, 0.0, // pair 4, "release"
/* 0.04, 0.0 // pair 5, "silence"*/
};
// initialize voice
ampEnvelope = new SegmentedEnvelope(ampData);
synth.add(ampEnv = new VariableRateMonoReader());
synth.add(lineOut = new LineOut());
synth.add(mixerMono = new MixerMonoRamped(2));
synth.add(mMainFilter = new FilterStateVariable());
// connect signal flow
mixerMono.output.connect(mMainFilter.input);
mMainFilter.output.connect(0, lineOut.input, 0);
mMainFilter.output.connect(0, lineOut.input, 1);
// set control values
mixerMono.amplitude.set(sliderVal / 100.0f);
mMainFilter.amplitude.set(0.9);
mMainFilter.frequency.set(mainFilterCutFloat);
mMainFilter.resonance.set(mainFilterResFloat);
// initialize and connect sampler voice
if (sample.getChannelsPerFrame() == 1) {
synth.add(samplePlayer = new VariableRateMonoReader());
ampEnv.output.connect(samplePlayer.amplitude);
samplePlayer.output.connect(0, mixerMono.input, 0);
samplePlayer.output.connect(0, mixerMono.input, 1);
} else if (sample.getChannelsPerFrame() == 2) {
synth.add(samplePlayer = new VariableRateStereoReader());
ampEnv.output.connect(samplePlayer.amplitude);
samplePlayer.output.connect(0, mixerMono.input, 0);
samplePlayer.output.connect(1, mixerMono.input, 1);
} else {
throw new RuntimeException("Can only play mono or stereo samples.");
}
} // end player
// play the sample
public static void play() {
if (samplePlayer != null)
{samplePlayer.dataQueue.clear();
samplePlayer.rate.set(sample.getFrameRate() * samplePitch); // set pitch
}
// start the synth engine
synth.start();
lineOut.start();
ampEnv.start();
// play one shot sample
if (sample.getSustainBegin() < 0) {
samplePlayer.dataQueue.queue(sample);
ampEnv.dataQueue.queue( ampEnvelope );
// play sustaining sample
} else {
samplePlayer.dataQueue.queueOn(sample);
ampEnv.dataQueue.queue( ampEnvelope, 0,3);
ampEnv.dataQueue.queueLoop( ampEnvelope, 1, 2 );
}
} }
Unsatisfactory solution that introduces 70ms of latency, changing the action down listener handling of a previous note to this:
if (limit == 0) {
if (samplePlayer != null) {
mixerMono.amplitude.set(0);
try {
synth.sleepFor(0.07);
synth.stop(); // stop instance of previous note
}catch (InterruptedException e) {
e.printStackTrace();
}
}
You should not call synth.start() and synth.stop() for every note. Think of it like powering on a physical synthesizer. Just start the synth and the lineOut once. If the ampEnv is connected indirectly to something else that is start()ed then you do not need to start() the ampEnv.
Then just queue your samples and envelopes when you want to start a note.
When you are all done playing notes then call synth.stop().

is it safe to use ThreadPoolExecutor inside AsyncTask doInBackground

I have to download a Json with a list of files, and then parallel download the files in the list. I would like to update periodically the ProgressDialog, so I implemented in this way
I create and show the dialog
I start an AsyncTask
onProgressUpdate receives 2 Integers, current progress and max progress, and updates the progress bar
doInBackground
downloads the json file and obtains the list of files to download
creates a ThreadPoolExecutor (tpe), with a LinkedBlockingQueue<Runnable>
submit a runnable for each file, that download the file to disk using Apache commons-io FileUtils.copyURLToFile
exec shutdown
in a while cycle. tpe.awaitTermination(1, TimeUnit.SECONDS) invokes periodically publishProgress( (int) tpe.getCompletedTaskCount(), tot), to update the progress bar
onPostExecute hides and dismisses the progres bar, and manages the files downloades
is there any problem in using ThreadPoolExecutor inside an AsynTask?
I am discussing with a colleague who claims that there could be problems in the threads management, that could deadlock, and that might give us problems on future versions
that's the code
public static void syncFiles(...)
{
PowerManager pm = (PowerManager) context.getSystemService(Context.POWER_SERVICE);
sWakelock = pm.newWakeLock(PowerManager.PARTIAL_WAKE_LOCK, TAG);
sWakelock.acquire();
sProgress = new ProgressDialog(context);
sProgress.setCancelable(false);
sProgress.setTitle("MyTitle");
sProgress.setMessage("Sincronizzazione in corso");
sProgress.setProgressStyle(ProgressDialog.STYLE_HORIZONTAL);
sProgress.setIndeterminate(false);
sProgress.show();
sCurrentTask = new AsyncTask<Void, Integer, Manifest>()
{
#Override
protected void onCancelled()
{
if ((sProgress != null) && sProgress.isShowing())
sProgress.dismiss();
if ((sWakelock != null) && sWakelock.isHeld())
sWakelock.release();
};
#Override
protected Manifest doInBackground(Void... params)
{
ArrayList files = getFiles(....)// download the jsonfile, and return the list of files
final String baseurl = ... // get the remote base url
final String baselocal = ... //get the local base path ;
int tot = m.size();
publishProgress(0, tot);
final int MAX_THREADS = Runtime.getRuntime().availableProcessors(); * 4;
ThreadPoolExecutor tpe = new ThreadPoolExecutor(
MAX_THREADS,
MAX_THREADS,
1,
TimeUnit.MINUTES,
new LinkedBlockingQueue<Runnable>()
);
for (final String s: files)
{
tpe.submit(new Runnable()
{
#Override
public void run()
{
try
{
URL remoteUrl = new URL(baseurl + s);
File localUrl = new File(baselocal, s);
FileUtils.copyURLToFile(remoteUrl, localUrl, 60000, 60000);
Log.w(TAG, "Downloaded " + localUrl.getAbsolutePath() + " in " + remoteUrl);
} catch (Exception e)
{
e.printStackTrace();
Log.e(TAG, "download error " + e);
// error management logic
}
}
});
}
tpe.shutdown();
int num = 0;
publishProgress(num, tot);
try
{
while (!tpe.awaitTermination(1, TimeUnit.SECONDS))
{
int n = (int) tpe.getCompletedTaskCount();
Log.w(TAG, "COUTN: " + n + "/" + tot);
if (n != num)
{
num = n;
publishProgress(num, tot);
}
}
} catch (InterruptedException e)
{
// TODO Auto-generated catch block
e.printStackTrace();
}
return m;
}
protected void onProgressUpdate(Integer... prog)
{
if (sProgress.getMax() != prog[1]) {
sProgress.setMax(prog[1]);
}
sProgress.setProgress(prog[0]);
}
#Override
protected void onPostExecute(Manifest result)
{
sWakelock.release();
sProgress.hide();
sProgress.dismiss();
// manage results
}
}.execute();
}
If you'll checkout the implementation of AsyncTask then youi can find that AsyncTask itself has ThreadPool so it will start the task on separate thread. Acutually when we can the .execute() to start the background task this method is typically used with THREAD_POOL_EXECUTOR to allow multiple tasks to run in parallel on a pool of threads managed by AsyncTask. So why you need to implement another.
Update
Read about executeOnExecutor in this may be this can help you... It clearly says that if you are allowing multiple tasks to run in parallel from a thread pool is generally not what one wants, because the order of their operation is not defined....but here you want to download the files so I don't think the order is important so in my view you can use it and it'll not create any issue.

Repeat AsyncTask

I have a doubt about the possibility of repeating an AsyncTask in an application for Android. I would like to repeat some operations, the download of a file from a server for example, n times if it is impossible for some reasons download the file. There is a quick way to do this?
You cannot repeat an AsyncTask but you could repeat the operations it executes.
I've made this little helper class that you might want to extend in place of AsyncTask, the only big difference is that you will use repeatInBackground instead of doInBackground and that onPostExecute will have a new parameter, the eventual Exception thrown.
Anything inside repeatInBackground will be repeated automatically until result is different from null / exception is not thrown and there are been less than maxTries.
The last exception thrown inside the loop will be returned in the onPostExecute(Result, Exception).
You can set max tries using the RepeatableAsyncTask(int retries) constructor.
public abstract class RepeatableAsyncTask<A, B, C> extends AsyncTask<A, B, C> {
private static final String TAG = "RepeatableAsyncTask";
public static final int DEFAULT_MAX_RETRY = 5;
private int mMaxRetries = DEFAULT_MAX_RETRY;
private Exception mException = null;
/**
* Default constructor
*/
public RepeatableAsyncTask() {
super();
}
/**
* Constructs an AsyncTask that will repeate itself for max Retries
* #param retries Max Retries.
*/
public RepeatableAsyncTask(int retries) {
super();
mMaxRetries = retries;
}
/**
* Will be repeated for max retries while the result is null or an exception is thrown.
* #param inputs Same as AsyncTask's
* #return Same as AsyncTask's
*/
protected abstract C repeatInBackground(A...inputs);
#Override
protected final C doInBackground(A...inputs) {
int tries = 0;
C result = null;
/* This is the main loop, repeatInBackground will be repeated until result will not be null */
while(tries++ < mMaxRetries && result == null) {
try {
result = repeatInBackground(inputs);
} catch (Exception exception) {
/* You might want to log the exception everytime, do it here. */
mException = exception;
}
}
return result;
}
/**
* Like onPostExecute but will return an eventual Exception
* #param c Result same as AsyncTask
* #param exception Exception thrown in the loop, even if the result is not null.
*/
protected abstract void onPostExecute(C c, Exception exception);
#Override
protected final void onPostExecute(C c) {
super.onPostExecute(c);
onPostExecute(c, mException);
}
}
You cannot reuse the same AsyncTask object as, according to the AsyncTask Docs
The task can be executed only once (an exception will be thrown if a second execution is attempted.)
But you can create however many new objects of that class you need inside of a loop. However a better way you be to do the download operation n number of times inside your doInBackground().
If this doesn't answer your question then please be more specific as to your problem
I did it that way. It can try and try until (tries == MAX_RETRY) or the result is not null. A slightly modified code from accepted answer, better for me.
private class RssReaderTask extends AsyncTask<String, Void, ArrayList<RssItem>> {
// max number of tries when something is wrong
private static final int MAX_RETRY = 3;
#Override
protected ArrayList<RssItem> doInBackground(String... params) {
ArrayList<RssItem> result = null;
int tries = 0;
while(tries++ < MAX_RETRY && result == null) {
try {
Log.i("RssReaderTask", "********** doInBackground: Processing... Trial: " + tries);
URL url = new URL(params[0]);
RssFeed feed = RssReader.read(url);
result = feed.getRssItems();
} catch (Exception ex) {
Log.i("RssReaderTask", "********** doInBackground: Feed error!");
}
}
return result;
}
#Override
protected void onPostExecute(ArrayList<RssItem> result) {
// deal with result
}
}

Sound recognition in Android

I want my Android app to recognize sound. For example I want to know if the sound from microphone is a clapping or knocking or something else.
Do I need to use math, or can I just use some library for that?
If there are any libraries for sound analysis please let me know. Thanks.
Musicg library is useful for whistle detection. Concerning claps, I wouldn't recommend use it, cause it reacts to every loud sound (even speech).
For clap and other percussive sounds detection I recommend TarsosDSP. It has a simple API with a rich functionality (pitch detection and so on). For clap detection you can use something like (if you use TarsosDSPAndroid-v3):
MicrophoneAudioDispatcher mDispatcher = new MicrophoneAudioDispatcher((int) SAMPLE_RATE, BUFFER_SIZE, BUFFER_OVERLAP);
double threshold = 8;
double sensitivity = 20;
mPercussionDetector = new PercussionOnsetDetector(22050, 1024,
new OnsetHandler() {
#Override
public void handleOnset(double time, double salience) {
Log.d(TAG, "Clap detected!");
}
}, sensitivity, threshold);
mDispatcher.addAudioProcessor(mPercussionDetector);
new Thread(mDispatcher).start();
You can tune your detector by adjusting sensitivity (0-100) and threshold (0-20).
Good luck!
There is an Api that works very well for your needs in my opinion.
http://code.google.com/p/musicg/
Good Luck!!!
You don't need math and you don't need AudioRecord. Just check MediaRecorder.getMaxAmplitude() every 1000 milliseconds.
this code and this code might be helpful.
Here is some code you will need.
public class Clapper
{
private static final String TAG = "Clapper";
private static final long DEFAULT_CLIP_TIME = 1000;
private long clipTime = DEFAULT_CLIP_TIME;
private AmplitudeClipListener clipListener;
private boolean continueRecording;
/**
* how much louder is required to hear a clap 10000, 18000, 25000 are good
* values
*/
private int amplitudeThreshold;
/**
* requires a little of noise by the user to trigger, background noise may
* trigger it
*/
public static final int AMPLITUDE_DIFF_LOW = 10000;
public static final int AMPLITUDE_DIFF_MED = 18000;
/**
* requires a lot of noise by the user to trigger. background noise isn't
* likely to be this loud
*/
public static final int AMPLITUDE_DIFF_HIGH = 25000;
private static final int DEFAULT_AMPLITUDE_DIFF = AMPLITUDE_DIFF_MED;
private MediaRecorder recorder;
private String tmpAudioFile;
public Clapper() throws IOException
{
this(DEFAULT_CLIP_TIME, "/tmp.3gp", DEFAULT_AMPLITUDE_DIFF, null, null);
}
public Clapper(long snipTime, String tmpAudioFile,
int amplitudeDifference, Context context, AmplitudeClipListener clipListener)
throws IOException
{
this.clipTime = snipTime;
this.clipListener = clipListener;
this.amplitudeThreshold = amplitudeDifference;
this.tmpAudioFile = tmpAudioFile;
}
public boolean recordClap()
{
Log.d(TAG, "record clap");
boolean clapDetected = false;
try
{
recorder = AudioUtil.prepareRecorder(tmpAudioFile);
}
catch (IOException io)
{
Log.d(TAG, "failed to prepare recorder ", io);
throw new RecordingFailedException("failed to create recorder", io);
}
recorder.start();
int startAmplitude = recorder.getMaxAmplitude();
Log.d(TAG, "starting amplitude: " + startAmplitude);
do
{
Log.d(TAG, "waiting while recording...");
waitSome();
int finishAmplitude = recorder.getMaxAmplitude();
if (clipListener != null)
{
clipListener.heard(finishAmplitude);
}
int ampDifference = finishAmplitude - startAmplitude;
if (ampDifference >= amplitudeThreshold)
{
Log.d(TAG, "heard a clap!");
clapDetected = true;
}
Log.d(TAG, "finishing amplitude: " + finishAmplitude + " diff: "
+ ampDifference);
} while (continueRecording || !clapDetected);
Log.d(TAG, "stopped recording");
done();
return clapDetected;
}
private void waitSome()
{
try
{
// wait a while
Thread.sleep(clipTime);
} catch (InterruptedException e)
{
Log.d(TAG, "interrupted");
}
}
/**
* need to call this when completely done with recording
*/
public void done()
{
Log.d(TAG, "stop recording");
if (recorder != null)
{
if (isRecording())
{
stopRecording();
}
//now stop the media player
recorder.stop();
recorder.release();
}
}
public boolean isRecording()
{
return continueRecording;
}
public void stopRecording()
{
continueRecording = false;
}
}
I realize this is a year old, but I stumbled across it. I'm pretty sure that general, open domain sound recognition is not a solved problem. So, no, you're not going to find any kind of library to do what you want on Android, because such code doesn't exist anywhere yet. If you pick some restricted domain, you could train a classifier to recognize the kinds of sounds your interested in, but that would require lots of math, and lots of examples of each of the potential sounds. It would be pretty cool if the library you wanted existed, but as far as I know, the technology just isn't there yet.

Categories

Resources