i've successfully cross compiled the sdl library for the android platform now i want to display my sdl forms like SDL_Surface and the SDL_Rect in the android screen .
How is that possible?
here is my first try
SDLRenderer::SDLRenderer () :
bmp (NULL),
screen (NULL),
imgConvertCtx (NULL),
isInit (false),
quitKeyPressed (false)
{
}
SDLRenderer::~SDLRenderer ()
{
}
bool SDLRenderer::init (int width, int height)
{ LOGI("sdlrenderer init");
this->screen = SDL_SetVideoMode(width, height, 0, 0);
if(!screen){
LOGI("!screen");
return false;
}
this->bmp = SDL_CreateYUVOverlay(width, height, SDL_YV12_OVERLAY, this->screen);
LOGI("SDL_CreateYUVOverlay passed");
return true;
}
bool SDLRenderer::processEvents ()
{
SDL_Event sdlEvent;
while(SDL_PollEvent(&sdlEvent))
{
switch(sdlEvent.type)
{
case SDL_KEYDOWN:
if(sdlEvent.key.keysym.sym == SDLK_ESCAPE)
this->quitKeyPressed = true;
break;
case SDL_QUIT: this->quitKeyPressed = true; break;
}
}
return true;
}
bool SDLRenderer::isQuitKeyPressed ()
{
return this->quitKeyPressed;
}
void SDLRenderer::onVideoDataAvailable (const uint8_t **data, videoFrameProperties* props)
{LOGI("sdlrenderer data availabe");
if(!this->isInit){
this->isInit = this->init(props->width, props->height);
LOGI("sdlrenderer data availabe calling render init");
}
LOGI("before SDL_LockYUVOverlay(bmp);");
SDL_LockYUVOverlay(bmp);
LOGI("after SDL_LockYUVOverlay(bmp);");
AVPicture pict;
LOGI("after AVPicture pict;");
pict.data[0] = bmp->pixels[0];
pict.data[1] = bmp->pixels[2];
pict.data[2] = bmp->pixels[1];
pict.linesize[0] = bmp->pitches[0];
pict.linesize[1] = bmp->pitches[2];
pict.linesize[2] = bmp->pitches[1];
LOGI("after creating avpicture");
// Convert the image into YUV format that SDL uses
if(imgConvertCtx == NULL)
{
int w = props->width;
int h = props->height;
imgConvertCtx = sws_getContext(props->width, props->height, (PixelFormat)props- >pxlFmt, w, h, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
if(imgConvertCtx == NULL)
{ LOGI("imgConvertCtx == NULL");
fprintf(stderr, "Cannot initialize the conversion context!\n");
exit(1);
}
}
sws_scale(imgConvertCtx, data, props->linesize, 0, props->height, pict.data, pict.linesize);
LOGI("calling SDL_UnlockYUVOverlay(bmp);");
SDL_UnlockYUVOverlay(bmp);
rect.x = 0;
rect.y = 0;
rect.w = props->width;
rect.h = props->height;
LOGI("sdlrenderer displaying");
SDL_DisplayYUVOverlay(bmp, &rect);
}
there is my main
int main(int argc, char *argv[])
{
SDLRenderer *renderer = new SDLRenderer();
DASHReceiver *receiver = new DASHReceiver(30);
receiver->Init("http://www----custom url here");
LibavDecoder *decoder = new LibavDecoder(receiver);
decoder->attachVideoObserver(renderer);
decoder->setFrameRate(24);
decoder->init();
bool eos = false;
while(!renderer->isQuitKeyPressed() && !eos)
{
eos = !decoder->decode();
renderer->processEvents();
}
decoder->stop();
return 0;
}
Thanks in advance!
You are missing an SDL_Flip or an SDL_UpdateRect to be called on your main SDL_surface, which will update it on the screen.
As far as I can see you are trying to port the bitmovin opensource dash player.
I have already done that and once SDL was ported to android, all other parts of the sofware were working.
I've got exactly the same code as you and this part is working well
be sure to define the surface in the java part
Try google with SDLActivity and use the java code provided from here
Then take a carefull look here http://lists.libsdl.org/pipermail/sdl-libsdl.org/2011-July/081481.html to make some little modification to the java code
// The Unimplemented OpenGL ES API notices *always* indicate you have
// the incorrect context version, which has to be fixed in SDLActivity.java .
int EGL_CONTEXT_CLIENT_VERSION = 0x3098;
int contextAttrs[] = new int[]{
EGL_CONTEXT_CLIENT_VERSION, majorVersion,
EGL10.EGL_NONE
};
EGLContext ctx = egl.eglCreateContext(dpy, config,EGL10.EGL_NO_CONTEXT, contextAttrs);
if (ctx == EGL10.EGL_NO_CONTEXT) {
Log.e("SDL", "Couldn't create context");
return false;
}
/*
EGLContext ctx = egl.eglCreateContext(dpy, config, EGL10.EGL_NO_CONTEXT, null);
if (ctx == EGL10.EGL_NO_CONTEXT) {
Log.e("SDL", "Couldn't create context");
return false;
}
*/
Related
I have a class named FJpegStreamReader, that has loaded a jni (.so):
System.loadLibrary("fjpeg");
and has a singleton constructor:
private static volatile FJpegStreamReader mInstances;
public static FJpegStreamReader getInstance() {
if (mInstances == null) {
synchronized (FJpegStreamReader.class) {
if (mInstances == null) {
mInstances = new FJpegStreamReader();
}
}
}
return mInstances;
}
when i invoke it with singleton, it work well,
FJpegStreamReader.getInstance().open("/sdcard/markers.jpg", FJpeg.MODEL_OPEN);
But now, i dont want to use singleton mode, I try to change FJpegStreamReader , and invoke like this :
FJpegStreamReader readerA = new FJpegStreamReader();
FJpegStreamReader readerB = new FJpegStreamReader();
readerA.open("/sdcard/markers1.jpg", FJpeg.MODEL_OPEN);
readerB.open("/sdcard/markers2.jpg", FJpeg.MODEL_OPEN);
Bitmap bmA = readerA.getBackgroundImage();
Bitmap bmB = readerB.getBackgroundImage();
and readerA work well, but readerB goes error. i want it can be instanced more than one time in the same time, how to make that readerA and readerB all work?
25884-25884 I: JNI_OnLoad Called
25884-25884 I: FilePath: /sdcard/markers1.jpg, and OpenMode: 0
25884-25884 I: offset is:15079
25884-25884 I: Start to Parser Data
25884-25884 I: Open Succeed
25884-25884 I: FilePath: /sdcard/markers2.jpg, and OpenMode: 0
25884-25884 E: Fired to Open File(code: -1001)
This is my c++ method:
unique_ptr<FJpeg> f(new FJpeg);
static jboolean
FJpeg_open(JNIEnv *env, jobject thiz, jstring fileName, jint model) {
const char *c_path = NULL;
c_path = env->GetStringUTFChars(fileName, NULL);
LOG_I("FilePath: %s, and OpenMode: %d", c_path, model);
int rel = f->openFile(c_path, model);
if (rel < 0) {
LOG_E("Fired to Open File(code: %d)", rel);
return false;
}
LOG_I("Open Succeed");
return true;
}
...
int FJpeg::openFile() {
if (file_path == NULL || *file_path == '\0' || (open_mode != 0 && open_mode != 1)) return ERROR_CODE_UNSUPPORTED_OPERAND;
if (open_mode == 0) {
f_in.open(file_path, ios::in | ios::binary);
if (f_in.fail()) return ERROR_CODE_FILE_NOT_FOUND;
return checkOffsetAndEnd();
} else {
f_out.open(file_path, ios::out | ios::binary);
if (f_out.fail()) return ERROR_CODE_FILE_NOT_FOUND;
return 0;
}
}
It looks like all your Java FJpegStreamReader objects share a single c++ FJpeg object.
You will need to change that to associate each Java object with a unique FJpeg object, for example bystoring either the raw address in a long field in the Java class, or a handle to it and converting both back to a pointer in your Jni methods.
Trying to access the depth map generated by TOF camera by camera2 api of android, but the app always crash when I copy the depth map.
Android os : 10
Android api : 28
arch : armv8a
Qt version : Felgo 3.6.0
Develop on : windows 10 64 bits
ndk : r18b
phone : mate30
Error messages : F libc : Fatal signal 11 (SIGSEGV), code 2 (SEGV_ACCERR), fault addr 0x741c259980 in tid 31512 (ImageReader-480), pid 31412 (P.Androidndkcam)
Find the back camera support depth16
std::tuple<std::string, bool> get_camera_depth_id(ACameraManager *cam_manager, int camera_facing)
{
auto camera_ids = get_camera_id_list(cam_manager);
if(camera_ids){
qInfo()<<__func__<<": found camera count "<<camera_ids->numCameras;
for(int i = 0; i < camera_ids->numCameras; ++i){
const char *id = camera_ids->cameraIds[i];
camera_status_t ret = ACAMERA_OK;
auto chars = get_camera_characteristics(cam_manager, id, &ret);
if(ret != ACAMERA_OK){
qInfo()<<__func__<<": cannot obtain characteristics of camera id = "<<id;
continue;
}
auto const entry = get_camera_capabilities(chars.get(), &ret);
if(ret != ACAMERA_OK){
qInfo()<<__func__<<": cannot obtain capabilities of camera id = "<<id;
continue;
}
ACameraMetadata_const_entry lens_info;
ACameraMetadata_getConstEntry(chars.get(), ACAMERA_LENS_FACING, &lens_info);
auto const facing = static_cast<acamera_metadata_enum_android_lens_facing_t>(lens_info.data.u8[i]);
bool is_right_face = facing == camera_facing;
bool support_bc = false, support_depth = false;
for(uint32_t i = 0; i < entry.count; i++) {
if(entry.data.u8[i] == ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE){
support_bc = true;
}
if(entry.data.u8[i] == ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT){
support_depth = true;
}
}
qInfo()<<__func__<<" support bc = "<<support_bc<<", support depth = "<<support_depth
<<", is right face = "<<is_right_face;
if(is_right_face && support_depth){
qInfo()<<__func__<<": obtain depth camera id = "<<id;
return {id, support_bc};
}
}
}else{
qInfo()<<__func__<<": cannot get depth cam";
}
return {};
}
open camera
void initCam()
{
qDebug()<<__func__<<": init camera manager";
cameraManager = ACameraManager_create();
qDebug()<<__func__<<": get back facing camera id";
auto [id, support_bc] = get_camera_depth_id(cameraManager, ACAMERA_LENS_FACING_BACK);
//auto const id = get_camera_id(cameraManager, ACAMERA_LENS_FACING_BACK);
qInfo()<<__func__<<": back camera id = "<<id.c_str();
if(!id.empty()){
auto const cam_status =
ACameraManager_openCamera(cameraManager, id.c_str(), &cameraDeviceCallbacks, &cameraDevice);
qInfo()<<__func__<<" cam status = "<<cam_status;
qDebug()<<__func__<<": open camera";
android_cam_info cam_info(*cameraManager, id.c_str());
qInfo()<<__func__<<" print depth stream configuration info";
//print the format, width, height, is input information
cam_info.stream_config(ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS).print();
//obtain minimum widh and height for the depth map
std::tie(width_, height_) =
cam_info.stream_config(ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS).get_minimum_dimension();
imageReader = createJpegReader();
if(imageReader){
imageWindow = createSurface(imageReader);
ANativeWindow_acquire(imageWindow);
ACameraDevice_createCaptureRequest(cameraDevice, TEMPLATE_PREVIEW, &request);
ACameraOutputTarget_create(imageWindow, &imageTarget);
ACaptureRequest_addTarget(request, imageTarget);
ACaptureSessionOutput_create(imageWindow, &imageOutput);
ACaptureSessionOutputContainer_create(&outputs);
ACaptureSessionOutputContainer_add(outputs, imageOutput);
ACameraDevice_createCaptureSession(cameraDevice, outputs, &sessionStateCallbacks, &textureSession);
// Start capturing continuously
ACameraCaptureSession_setRepeatingRequest(textureSession, &captureCallbacks, 1, &request, nullptr);
}
}
}
The way I create the imageReader
AImageReader* createJpegReader()
{
AImageReader* reader = nullptr;
media_status_t status = AImageReader_new(width_, height_, AIMAGE_FORMAT_DEPTH16, 1, &reader);
if(status != AMEDIA_OK){
qInfo()<<__func__<<": cannot create AImageReader, error code is = "<<status;
return nullptr;
}
AImageReader_ImageListener listener;
listener.context = this;
listener.onImageAvailable = imageCallback;
AImageReader_setImageListener(reader, &listener);
return reader;
}
Create surface
ANativeWindow* createSurface(AImageReader* reader)
{
ANativeWindow *nativeWindow;
AImageReader_getWindow(reader, &nativeWindow);
return nativeWindow;
}
The call back of the AImageReader
static void process_depth_16(void* context, AImage *image)
{
uint16_t *data = nullptr;
int len = 0;
auto const status = AImage_getPlaneData(image, 0, reinterpret_cast<uint8_t**>(&data), &len);
if(status != AMEDIA_OK){
qInfo()<<__func__<<": AImage_getPlaneData fail, error code = "<<status;
return;
}
qInfo()<<__func__<<": image len = "<<len;
auto *impl = static_cast<pimpl*>(context);
convert_depth_16_to_cvmat(data, impl->width_, impl->height_);
}
static void imageCallback(void* context, AImageReader* reader)
{
qDebug()<<__func__;
int status = -1;
auto image = get_next_image(reader, &status);
if(status != AMEDIA_OK){
qInfo()<<__func__<<": cannot acquire next image, error code = "<<status;
return;
}
int32_t format = -1;
AImage_getFormat(image.get(), &format);
if(format == AIMAGE_FORMAT_DEPTH16){
process_depth_16(context, image.get());
}else{
qInfo()<<__func__<<": do not support format = "<<format;
}
}
Copy depth map(this function cause the app crash)
cv::Mat convert_depth_16_to_cvmat(uint16_t *data, int32_t width, int32_t height)
{
auto *depth_ptr = data;
cv::Mat output(height, width, CV_16U);
for(int32_t row = 0; row != height; ++row){
depth_ptr += width * row;
auto *output_ptr = output.ptr<ushort>(row);
qInfo()<<__func__<<": row = "<<row; //crash when row equal to 27,sometimes over 50
for(int32_t col = 0; col != width; ++col){
output_ptr[col] = depth_ptr[col];
}
}
return output;
}
Minimum width and height I obtain from the ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS are 480x360. Anything wrong with the codes?
Other utility functions put at pastebin.
Edit: The length of the data is 345600, that means there should have 172800(480x360) pixels with data types uint16_t
I find out the answer, mate30 do not support depth map, but the native api do not tell us that, maybe the functions of the native api related to depth map not that mature yet.
We are working on a project that consumes FFMPEG library for video frame extraction on Android platform.
On Windows, we have observed:
Using CLI, ffmpeg is capable of extracting frames at 30 fps using command ffmpeg -i input.flv -vf fps=1 out%d.png.
Using Xuggler, we are able to extract frames at 30 fps.
Using FFMPEG APIs directly in code, we are getting frames at 30 fps.
But when we use FFMPEG APIs directly on Android (See Hardware Details), we are getting following results:
720p video (1280 x 720) - 16 fps (approx. 60 ms/frame)
1080p video (1920 x 1080) - 7 fps (approx. 140 ms/frame)
We haven't tested Xuggler/CLI on Android yet.
Ideally, we should be able to get the data in constant time (approx. 30 ms/frame).
How can we get 30 fps on Android?
Code being used on Android:
if (avformat_open_input(&pFormatCtx, pcVideoFile, NULL, NULL)) {
iError = -1; //Couldn't open file
}
if (!iError) {
//Retrieve stream information
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
iError = -2; //Couldn't find stream information
}
//Find the first video stream
if (!iError) {
for (i = 0; i < pFormatCtx->nb_streams; i++) {
if (AVMEDIA_TYPE_VIDEO
== pFormatCtx->streams[i]->codec->codec_type) {
iFramesInVideo = pFormatCtx->streams[i]->nb_index_entries;
duration = pFormatCtx->streams[i]->duration;
begin = pFormatCtx->streams[i]->start_time;
time_base = (pFormatCtx->streams[i]->time_base.num * 1.0f)
/ pFormatCtx->streams[i]->time_base.den;
pCodecCtx = avcodec_alloc_context3(NULL);
if (!pCodecCtx) {
iError = -6;
break;
}
AVCodecParameters params = { 0 };
iReturn = avcodec_parameters_from_context(¶ms,
pFormatCtx->streams[i]->codec);
if (iReturn < 0) {
iError = -7;
break;
}
iReturn = avcodec_parameters_to_context(pCodecCtx, ¶ms);
if (iReturn < 0) {
iError = -7;
break;
}
//pCodecCtx = pFormatCtx->streams[i]->codec;
iVideoStreamIndex = i;
break;
}
}
}
if (!iError) {
if (iVideoStreamIndex == -1) {
iError = -3; // Didn't find a video stream
}
}
if (!iError) {
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL) {
iError = -4;
}
}
if (!iError) {
// Open codec
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
iError = -5;
}
if (!iError) {
iNumBytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height, 1);
// initialize SWS context for software scaling
sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
AV_PIX_FMT_RGB24,
SWS_BILINEAR,
NULL,
NULL,
NULL);
if (!sws_ctx) {
iError = -7;
}
}
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000
+ (end.tv_nsec - start.tv_nsec) / 1000;
start = end;
//LOGI("Starting_Frame_Extraction: %lld", delta_us);
if (!iError) {
while (av_read_frame(pFormatCtx, &packet) == 0) {
// Is this a packet from the video stream?
if (packet.stream_index == iVideoStreamIndex) {
pFrame = av_frame_alloc();
if (NULL == pFrame) {
iError = -8;
break;
}
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &iFrameFinished,
&packet);
if (iFrameFinished) {
//OUR CODE
}
av_frame_free(&pFrame);
pFrame = NULL;
}
av_packet_unref(&packet);
}
}
You need some structures and functions from libavfilter.
The vf option means "video filter". The command line ffmpeg -i input -vf fps=30 out%d.png will output video_length_in_seconds * 30 regardless the original video fps. That means if the video is of 25 fps, you'll get some duplicate frames. While if the video is more than 30 fps, you'll lose some frames.
To achieve this, you have to init some filter context. See filtering_video.c example from ffmpeg source.
AVFilter* buffersrc = avfilter_get_by_name("buffer");
AVFilter* buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut* outputs = avfilter_inout_alloc();
AVFilterInOut* inputs = avfilter_inout_alloc();
AVRational time_base = p_format_ctx->streams[video_stream]->time_base;
enum AVPixelFormat pix_fmts[] = { p_codec_ctx->pix_fmt, AV_PIX_FMT_NONE };
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
// failed, goto cleanup
}
char args[512];
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
p_codec_ctx->width, p_codec_ctx->height, p_codec_ctx->pix_fmt,
time_base.num, time_base.den,
p_codec_ctx->sample_aspect_ratio.num, p_codec_ctx->sample_aspect_ratio.den);
int ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
LOG(ERROR) << "Cannot create buffer source";
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return false;
}
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
// failed... blabla
}
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
// failed... blabla
}
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
const char* filter_description[256] = "fps=fps=30";
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr.c_str(),
&inputs, &outputs, NULL)) < 0) {
// failed...
}
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) {
// failed...
}
Ok, this is all initialization needed.
And adding some codes to decoding part:
avcodec_decode_video2(p_codec_ctx, p_frame, &got_frame, &packet);
if (*got_frame) {
p_frame->pts = av_frame_get_best_effort_timestamp(p_frame);
if (av_buffersrc_add_frame_flags(buffersrc_ctx, p_frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
// failed... blabla
}
while (1) {
int ret = av_buffersink_get_frame(buffersink_ctx, p_frame_stage);
// p_frame_stage is a AVFrame struct. Same size as p_frame. Need to allocated before.
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0) {
// something wrong. filter failed.
}
// Do something with p_frame_stage here.
}
}
Please take a look at https://gitter.im/mobile-ffmpeg/Lobby?at=5c5bb384f04ef00644f1bb4e A few lines below, they mention options to accelerate the process, such as ... -preset ultrafast, -threads 10, -tune zerolatency, -x264-params sliced-threads=1
I have two files, one is main file (main.cpp) other one is for multi-threading (threads.cpp).
I use SDL_PushEvent() in threads.cpp and SDL_PollEvent() in main.cpp.
Below is a logic of my sample code.
main.cpp
bool Init() {
if (SDL_Init(SDL_INIT_VIDEO) < 0)
return false;
SDL_DisplayMode mode;
SDL_GetDisplayMode(0, 0, &mode);
this->win_width = mode.w;
this->win_height = mode.h;
this->win = SDL_CreateWindow(NULL, 0, 0, win_width, win_height, SDL_WINDOW_SHOWN | SDL_WINDOW_FULLSCREEN | SDL_WINDOW_OPENGL);
if (this->win == NULL) {
LOGE("[Init] SDL Window Created failed : %s", SDL_GetError());
return false;
}
this->renderer = SDL_CreateRenderer(win, -1, SDL_RENDERER_ACCELERATED);
if (this->renderer == NULL) {
LOGE("[Init] SDL Renderer Created failed : %s", SDL_GetError());
return false;
}
this->bmp = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGB565, SDL_TEXTUREACCESS_STREAMING, win_width, win_height);
if (this->bmp == NULL) {
LOGE("[Init] SDL Texture Created failed : %s", SDL_GetError());
return false;
}
return true;
}
void DisplayEvent (SDL_Event e) {
FrameObject obj = *(FrameObject*) e.user.data1;
SDL_Rect rect;
rect.x = rect.y = 0;
rect.w = obj.frameWidth;
rect.h = obj.frameHeight;
int r = SDL_UpdateTexture(this->bmp, NULL, obj.FrameData.RGB, rect.w*2);
LOGI("[DisplayEvent] - UpdateTexture");
// Reneder this Frame
SDL_RenderClear(this->renderer);
LOGI("[DisplayEvent] - RenderClear");
SDL_RenderCopy(this->renderer, this->bmp, NULL, &rect);
LOGI("[DisplayEvent] - RenderCopy");
SDL_RenderPresent(this->renderer);
LOGI("[DisplayEvent] - RenderPresent");
}
int main (int argc, char **argv) {
Init();
while (!quit) {
SDL_Event e;
// Event Polling
while (SDL_PollEvent(&e)) {
switch (e.type) {
case MY_EVENT:
LOGI("[main] - Get MY_EVENT");
DisplayEvent(e);
LOGI("[main] - %s more MY_EVENT", SDL_HasEvent(MY_EVENT) ? "Has" : "Hasn't");
break;
default:
break;
}
}
}
SDL_Quit();
}
threads.cpp
void* push_event(void *arg) {
FrameObject obj = (FrameObject*) arg;
while (!quit) {
SDL_Event event;
SDL_zero(event);
event.type = MY_EVENT;
event.user.data1 = obj;
event.user.data2 = 0;
if (SDL_PushEvent(&event) == 1) LOGI("[push_event] - Push MY_EVENT");
else LOGE("[push_event] - Event Push Error : %s", SDL_GetError());
sleep(1);
}
}
EDIT:
I add more sample code. I found the problem is not missing the SDL Event. The problem is SDL thread (main Thread) is blocked at SDL_RenderClear().
The log message output "[DisplayEvent] - UpdateTexture" but not print "[DisplayEvent] - RenderClear". It's weird. For create single thread to run push_event is find, but when I create two threads to run push_event, the SDL Thread is blocked.
Dost it problem is hardware i.e. GPU?
SDL Wiki on SDL_PushEvent says that it is thread-safe so I'm assuming it is Ok to call it from other threads. However, it says there that you need first to call SDL_RegisterEvents to get an event ID suitable for using as application specific events, and then use it to push your events.
This is not clear from your code, are you calling SDL_RegisterEvents? This might be the problem. Also, have you checked if it works if you push the event on the same thread you initialized the video? This test will make sure the problem isn't related to threads.
I have changed the default media framework in Android from Stagefright to Gstreamer. This has been done to make it flexible for our project.
But when I run some apks, all the sounds of the app is getting played at the time of app start, and it does not play after that showing the error "sample # not READY" from Soundpool. For example in an App Baby piano, the sounds of the piano syllables are getting played when I start the application and when I actually click on the piano after entering into the play mode, it is not getting played.
The problem I think is that when the sounds are being loaded into the Soundpool, the Gstreamer Mediaplayer object is created and it gets played and it is being done at the start of the App.
In the log it is showing, Sample Channel Count(0) out of range. It is happening from the SoundPoool.cpp file from the section below.
status_t Sample::doLoad() {
uint32_t sampleRate;
int numChannels;
int format;
sp<IMemory> p;
LOGW("Start decode");
if (mUrl) {
p = MediaPlayer::decode(mUrl, &sampleRate, &numChannels, &format);
} else {
p = MediaPlayer::decode(mFd, mOffset, mLength, &sampleRate, &numChannels, &format);
LOGW("close(%d)", mFd);
::close(mFd);
mFd = -1;
}
if (p == 0) {
LOGE("Unable to load sample: %s", mUrl);
return -1;
}
LOGW("pointer = %p, size = %u, sampleRate = %u, numChannels = %d",
p->pointer(), p->size(), sampleRate, numChannels);
if (sampleRate > kMaxSampleRate) {
LOGE("Sample rate (%u) out of range", sampleRate);
return - 1;
}
if ((numChannels < 1) || (numChannels > 2)) {
LOGE("Sample channel count (%d) out of range", numChannels);
return - 1;
}
//_dumpBuffer(p->pointer(), p->size());
uint8_t* q = static_cast<uint8_t*>(p->pointer()) + p->size() - 10;
//_dumpBuffer(q, 10, 10, false);
mData = p;
mSize = p->size();
mSampleRate = sampleRate;
mNumChannels = numChannels;
mFormat = format;
mState = READY;
return 0; }
and the MediaPlayerService decode function returns all the values as null from the code section below
sp<IMemory> MediaPlayerService::decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat)
{
LOGD("decode(%d, %lld, %lld)", fd, offset, length);
sp<MemoryBase> mem;
sp<MediaPlayerBase> player;
player_type playerType = getPlayerType(fd, offset, length);
LOGD("player type = %d", playerType);
// create the right type of player
sp<AudioCache> cache = new AudioCache("decode_fd");
player = android::createPlayer(playerType, cache.get(), cache->notify);
if (player == NULL) goto Exit;
if (player->hardwareOutput()) goto Exit;
static_cast<MediaPlayerInterface*>(player.get())->setAudioSink(cache);
// set data source
if (player->setDataSource(fd, offset, length) != NO_ERROR) goto Exit;
LOGD("prepare");
player->prepareAsync();
LOGD("wait for prepare");
if (cache->wait() != NO_ERROR) goto Exit;
LOGD("start");
player->start();
LOGD("wait for playback complete");
if (cache->wait() != NO_ERROR) goto Exit;
mem = new MemoryBase(cache->getHeap(), 0, cache->size());
*pSampleRate = cache->sampleRate();//Nes
*pNumChannels = cache->channelCount();
*pFormat = cache->format();
LOGD("return memory # %p, sampleRate=%u, channelCount = %d, format = %d", mem->pointer(), *pSampleRate, *pNumChannels, *pFormat);
Exit:
if (player != 0) player->reset();
::close(fd);
return mem;
}
The samplerate, channels etc have value 0 getting returned from this function.
After this when the samples are played, it is showing the error ""sample # not READY"
int SoundPool::play(int sampleID, float leftVolume, float rightVolume,
int priority, int loop, float rate)
{
LOGW("sampleID=%d, leftVolume=%f, rightVolume=%f, priority=%d, loop=%d, rate=%f",
sampleID, leftVolume, rightVolume, priority, loop, rate);
sp<Sample> sample;
SoundChannel* channel;
int channelID;
// scope for lock
{
Mutex::Autolock lock(&mLock);
// is sample ready?
sample = findSample(sampleID);
if ((sample == 0) || (sample->state() != Sample::READY)) {
LOGW(" sample %d not READY", sampleID);
return 0;
}
dump();
// allocate a channel
channel = allocateChannel(priority);
// no channel allocated - return 0
if (!channel) {
LOGW("No channel allocated");
return 0;
}
channelID = ++mNextChannelID;
}
LOGW("channel state = %d", channel->state());
channel->play(sample, channelID, leftVolume, rightVolume, priority, loop, rate);
return channelID;
}
Is there a solution for this problem..Plz help..
You can use this methode also
public void loadSound (String strSound, int stream) {
boolean loaded = false;
mSoundPool.setOnLoadCompleteListener(new OnLoadCompleteListener() {
#Override
public void onLoadComplete(SoundPool soundPool, int sampleId,
int status) {
mSoundPool.play(stream, streamVolume, streamVolume, 1, LOOP_1_TIME, 1f);
}
});
try {
stream= mSoundPool.load(aMan.openFd(strSound), 1);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
Sample not ready usually indicates that it hasn't been loaded yet, i.e. it's still loading. (Perhaps the framework you switched to might be loading it for a longer time.)
You should subscribe to its onLoadCompleteListener, and when you receive the callback, the sound is ready to be played. Before that point, it won't be playable.
I had the same problem for android 2.0 and solved by using the .ogg format instead of .mp3 format for the sounds i use as it is mentioned here. I hope this solves your problem.