Trying to access the depth map generated by TOF camera by camera2 api of android, but the app always crash when I copy the depth map.
Android os : 10
Android api : 28
arch : armv8a
Qt version : Felgo 3.6.0
Develop on : windows 10 64 bits
ndk : r18b
phone : mate30
Error messages : F libc : Fatal signal 11 (SIGSEGV), code 2 (SEGV_ACCERR), fault addr 0x741c259980 in tid 31512 (ImageReader-480), pid 31412 (P.Androidndkcam)
Find the back camera support depth16
std::tuple<std::string, bool> get_camera_depth_id(ACameraManager *cam_manager, int camera_facing)
{
auto camera_ids = get_camera_id_list(cam_manager);
if(camera_ids){
qInfo()<<__func__<<": found camera count "<<camera_ids->numCameras;
for(int i = 0; i < camera_ids->numCameras; ++i){
const char *id = camera_ids->cameraIds[i];
camera_status_t ret = ACAMERA_OK;
auto chars = get_camera_characteristics(cam_manager, id, &ret);
if(ret != ACAMERA_OK){
qInfo()<<__func__<<": cannot obtain characteristics of camera id = "<<id;
continue;
}
auto const entry = get_camera_capabilities(chars.get(), &ret);
if(ret != ACAMERA_OK){
qInfo()<<__func__<<": cannot obtain capabilities of camera id = "<<id;
continue;
}
ACameraMetadata_const_entry lens_info;
ACameraMetadata_getConstEntry(chars.get(), ACAMERA_LENS_FACING, &lens_info);
auto const facing = static_cast<acamera_metadata_enum_android_lens_facing_t>(lens_info.data.u8[i]);
bool is_right_face = facing == camera_facing;
bool support_bc = false, support_depth = false;
for(uint32_t i = 0; i < entry.count; i++) {
if(entry.data.u8[i] == ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE){
support_bc = true;
}
if(entry.data.u8[i] == ACAMERA_REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT){
support_depth = true;
}
}
qInfo()<<__func__<<" support bc = "<<support_bc<<", support depth = "<<support_depth
<<", is right face = "<<is_right_face;
if(is_right_face && support_depth){
qInfo()<<__func__<<": obtain depth camera id = "<<id;
return {id, support_bc};
}
}
}else{
qInfo()<<__func__<<": cannot get depth cam";
}
return {};
}
open camera
void initCam()
{
qDebug()<<__func__<<": init camera manager";
cameraManager = ACameraManager_create();
qDebug()<<__func__<<": get back facing camera id";
auto [id, support_bc] = get_camera_depth_id(cameraManager, ACAMERA_LENS_FACING_BACK);
//auto const id = get_camera_id(cameraManager, ACAMERA_LENS_FACING_BACK);
qInfo()<<__func__<<": back camera id = "<<id.c_str();
if(!id.empty()){
auto const cam_status =
ACameraManager_openCamera(cameraManager, id.c_str(), &cameraDeviceCallbacks, &cameraDevice);
qInfo()<<__func__<<" cam status = "<<cam_status;
qDebug()<<__func__<<": open camera";
android_cam_info cam_info(*cameraManager, id.c_str());
qInfo()<<__func__<<" print depth stream configuration info";
//print the format, width, height, is input information
cam_info.stream_config(ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS).print();
//obtain minimum widh and height for the depth map
std::tie(width_, height_) =
cam_info.stream_config(ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS).get_minimum_dimension();
imageReader = createJpegReader();
if(imageReader){
imageWindow = createSurface(imageReader);
ANativeWindow_acquire(imageWindow);
ACameraDevice_createCaptureRequest(cameraDevice, TEMPLATE_PREVIEW, &request);
ACameraOutputTarget_create(imageWindow, &imageTarget);
ACaptureRequest_addTarget(request, imageTarget);
ACaptureSessionOutput_create(imageWindow, &imageOutput);
ACaptureSessionOutputContainer_create(&outputs);
ACaptureSessionOutputContainer_add(outputs, imageOutput);
ACameraDevice_createCaptureSession(cameraDevice, outputs, &sessionStateCallbacks, &textureSession);
// Start capturing continuously
ACameraCaptureSession_setRepeatingRequest(textureSession, &captureCallbacks, 1, &request, nullptr);
}
}
}
The way I create the imageReader
AImageReader* createJpegReader()
{
AImageReader* reader = nullptr;
media_status_t status = AImageReader_new(width_, height_, AIMAGE_FORMAT_DEPTH16, 1, &reader);
if(status != AMEDIA_OK){
qInfo()<<__func__<<": cannot create AImageReader, error code is = "<<status;
return nullptr;
}
AImageReader_ImageListener listener;
listener.context = this;
listener.onImageAvailable = imageCallback;
AImageReader_setImageListener(reader, &listener);
return reader;
}
Create surface
ANativeWindow* createSurface(AImageReader* reader)
{
ANativeWindow *nativeWindow;
AImageReader_getWindow(reader, &nativeWindow);
return nativeWindow;
}
The call back of the AImageReader
static void process_depth_16(void* context, AImage *image)
{
uint16_t *data = nullptr;
int len = 0;
auto const status = AImage_getPlaneData(image, 0, reinterpret_cast<uint8_t**>(&data), &len);
if(status != AMEDIA_OK){
qInfo()<<__func__<<": AImage_getPlaneData fail, error code = "<<status;
return;
}
qInfo()<<__func__<<": image len = "<<len;
auto *impl = static_cast<pimpl*>(context);
convert_depth_16_to_cvmat(data, impl->width_, impl->height_);
}
static void imageCallback(void* context, AImageReader* reader)
{
qDebug()<<__func__;
int status = -1;
auto image = get_next_image(reader, &status);
if(status != AMEDIA_OK){
qInfo()<<__func__<<": cannot acquire next image, error code = "<<status;
return;
}
int32_t format = -1;
AImage_getFormat(image.get(), &format);
if(format == AIMAGE_FORMAT_DEPTH16){
process_depth_16(context, image.get());
}else{
qInfo()<<__func__<<": do not support format = "<<format;
}
}
Copy depth map(this function cause the app crash)
cv::Mat convert_depth_16_to_cvmat(uint16_t *data, int32_t width, int32_t height)
{
auto *depth_ptr = data;
cv::Mat output(height, width, CV_16U);
for(int32_t row = 0; row != height; ++row){
depth_ptr += width * row;
auto *output_ptr = output.ptr<ushort>(row);
qInfo()<<__func__<<": row = "<<row; //crash when row equal to 27,sometimes over 50
for(int32_t col = 0; col != width; ++col){
output_ptr[col] = depth_ptr[col];
}
}
return output;
}
Minimum width and height I obtain from the ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS are 480x360. Anything wrong with the codes?
Other utility functions put at pastebin.
Edit: The length of the data is 345600, that means there should have 172800(480x360) pixels with data types uint16_t
I find out the answer, mate30 do not support depth map, but the native api do not tell us that, maybe the functions of the native api related to depth map not that mature yet.
Related
I need to pass the FFMPEG 'raw' data back to my JAVA code in order to display it on the screen.
I have a native method that deals with FFMPEG and after that calls a method in java that takes Byte[] (so far) as an argument.
Byte Array that is passed is read by JAVA but when doing BitmapFactory.decodeByteArray(bitmap, 0, bitmap.length); it returns null. I have printed out the array and I get 200k of elements (which are expected), but cannot be decoded. So far what I'm doing is taking data from AvFrame->data casting it to unsigned char * and then casting that to jbyterArray. After all the casting, I pass the jbyteArray as argument to my JAVA method. Is there something I'm missing here? Why won't BitmapFactory decode the array into an image for displaying?
EDIT 1.0
Currently I am trying to obtain my image via
public void setImage(ByteBuffer bmp) {
bmp.rewind();
Bitmap bitmap = Bitmap.createBitmap(1920, 1080, Bitmap.Config.ARGB_8888);
bitmap.copyPixelsFromBuffer(bmp);
runOnUiThread(() -> {
ImageView imgViewer = findViewById(R.id.mSurfaceView);
imgViewer.setImageBitmap(bitmap);
});
}
But I keep getting an exception
JNI DETECTED ERROR IN APPLICATION: JNI NewDirectByteBuffer called with pending exception java.lang.RuntimeException: Buffer not large enough for pixels
at void android.graphics.Bitmap.copyPixelsFromBuffer(java.nio.Buffer) (Bitmap.java:657)
at void com.example.asmcpp.MainActivity.setSurfaceImage(java.nio.ByteBuffer)
Edit 1.1
So, here is the full code that is executing every time there is a frame incoming. Note that the ByteBuffer is created and passed from within this method
void VideoClientInterface::onEncodedFrame(video::encoded_frame_t &encodedFrame) {
AVFrame *filt_frame = av_frame_alloc();
auto frame = std::shared_ptr<video::encoded_frame_t>(new video::encoded_frame_t,
[](video::encoded_frame_t *p) { if (p) delete p; });
if (frame) {
frame->size = encodedFrame.size;
frame->ssrc = encodedFrame.ssrc;
frame->width = encodedFrame.width;
frame->height = encodedFrame.height;
frame->dataType = encodedFrame.dataType;
frame->timestamp = encodedFrame.timestamp;
frame->frameIndex = encodedFrame.frameIndex;
frame->isKeyFrame = encodedFrame.isKeyFrame;
frame->isDroppable = encodedFrame.isDroppable;
frame->data = new char[frame->size];
if (frame->data) {
memcpy(frame->data, encodedFrame.data, frame->size);
AVPacket packet;
av_init_packet(&packet);
packet.dts = AV_NOPTS_VALUE;
packet.pts = encodedFrame.timestamp;
packet.data = (uint8_t *) encodedFrame.data;
packet.size = encodedFrame.size;
int ret = avcodec_send_packet(m_avCodecContext, &packet);
if (ret == 0) {
ret = avcodec_receive_frame(m_avCodecContext, m_avFrame);
if (ret == 0) {
m_transform = sws_getCachedContext(
m_transform, // previous context ptr
m_avFrame->width, m_avFrame->height, AV_PIX_FMT_YUV420P, // src
m_avFrame->width, m_avFrame->height, AV_PIX_FMT_RGB24, // dst
SWS_BILINEAR, nullptr, nullptr, nullptr // options
);
auto decodedFrame = std::make_shared<video::decoded_frame_t>();
decodedFrame->width = m_avFrame->width;
decodedFrame->height = m_avFrame->height;
decodedFrame->size = m_avFrame->width * m_avFrame->height * 3;
decodedFrame->timeStamp = m_avFrame->pts;
decodedFrame->data = new unsigned char[decodedFrame->size];
if (decodedFrame->data) {
uint8_t *dstSlice[] = {decodedFrame->data,
0,
0};// outFrame.bits(), outFrame.bits(), outFrame.bits()
const int dstStride[] = {decodedFrame->width * 3, 0, 0};
sws_scale(m_transform, m_avFrame->data, m_avFrame->linesize,
0, m_avFrame->height, dstSlice, dstStride);
auto m_rawData = decodedFrame->data;
auto len = strlen(reinterpret_cast<char *>(m_rawData));
if (frameCounter == 10) {
jobject newArray = GetJniEnv()->NewDirectByteBuffer(m_rawData, len);
GetJniEnv()->CallVoidMethod(m_obj, setSurfaceImage, newArray);
frameCounter = 0;
}
frameCounter++;
}
} else {
av_packet_unref(&packet);
}
} else {
av_packet_unref(&packet);
}
}
}
}
I am not entirely sure I am even doing that part correctly. If you see any errors in this, feel free to point them out.
You cannot cast native byte arrays to jbyteArray and expect it to work. A byte[] is an actual object with length field, a reference count, and so on.
Use NewDirectByteBuffer instead to wrap your byte buffer into a Java ByteBuffer, from where you can grab the actual byte[] using .array().
Note that this JNI operation is relatively expensive, so if you expect to do this on a per-frame basis, you might want to pre-allocate some bytebuffers and tell FFmpeg to write directly into those buffers.
I am encoding raw data on Android using ffmpeg libraries. The native code reads the audio data from an external device and encodes it into AAC format in an mp4 container. I am finding that the audio data is successfully encoded (I can play it with Groove Music, my default Windows audio player). But the metadata, as reported by ffprobe, has an incorrect duration of 0.05 secs - it's actually several seconds long. Also the bitrate is reported wrongly as around 65kbps even though I specified 192kbps.
I've tried recordings of various durations but the result is always similar - the (very small) duration and bitrate. I've tried various other audio players such as Quicktime but they play only the first 0.05 secs or so of the audio.
I've removed error-checking from the following. The actual code checks every call and no problems are reported.
Initialisation:
void AudioWriter::initialise( const char *filePath )
{
AVCodecID avCodecID = AVCodecID::AV_CODEC_ID_AAC;
int bitRate = 192000;
char *containerFormat = "mp4";
int sampleRate = 48000;
int nChannels = 2;
mAvCodec = avcodec_find_encoder(avCodecID);
mAvCodecContext = avcodec_alloc_context3(mAvCodec);
mAvCodecContext->codec_id = avCodecID;
mAvCodecContext->codec_type = AVMEDIA_TYPE_AUDIO;
mAvCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;
mAvCodecContext->bit_rate = bitRate;
mAvCodecContext->sample_rate = sampleRate;
mAvCodecContext->channels = nChannels;
mAvCodecContext->channel_layout = AV_CH_LAYOUT_STEREO;
avcodec_open2( mAvCodecContext, mAvCodec, nullptr );
mAvFormatContext = avformat_alloc_context();
avformat_alloc_output_context2(&mAvFormatContext, nullptr, containerFormat, nullptr);
mAvFormatContext->audio_codec = mAvCodec;
mAvFormatContext->audio_codec_id = avCodecID;
mAvOutputStream = avformat_new_stream(mAvFormatContext, mAvCodec);
avcodec_parameters_from_context(mAvOutputStream->codecpar, mAvCodecContext);
if (!(mAvFormatContext->oformat->flags & AVFMT_NOFILE))
{
avio_open(&mAvFormatContext->pb, filePath, AVIO_FLAG_WRITE);
}
if ( mAvFormatContext->oformat->flags & AVFMT_GLOBALHEADER )
{
mAvCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
avformat_write_header(mAvFormatContext, NULL);
mAvAudioFrame = av_frame_alloc();
mAvAudioFrame->nb_samples = mAvCodecContext->frame_size;
mAvAudioFrame->format = mAvCodecContext->sample_fmt;
mAvAudioFrame->channel_layout = mAvCodecContext->channel_layout;
av_samples_get_buffer_size(NULL, mAvCodecContext->channels, mAvCodecContext->frame_size,
mAvCodecContext->sample_fmt, 0);
av_frame_get_buffer(mAvAudioFrame, 0);
av_frame_make_writable(mAvAudioFrame);
mAvPacket = av_packet_alloc();
}
Encoding:
// SoundRecording is a custom class with the raw samples to be encoded
bool AudioWriter::encodeToContainer( SoundRecording *soundRecording )
{
int ret;
int frameCount = mAvCodecContext->frame_size;
int nChannels = mAvCodecContext->channels;
float *buf = new float[frameCount*nChannels];
while ( soundRecording->hasReadableData() )
{
//Populate the frame
int samplesRead = soundRecording->read( buf, frameCount*nChannels );
// Planar data
int nFrames = samplesRead/nChannels;
for ( int i = 0; i < nFrames; ++i )
{
for (int c = 0; c < nChannels; ++c )
{
samples[c][i] = buf[nChannels*i +c];
}
}
// Fill a gap at the end with silence
if ( samplesRead < frameCount*nChannels )
{
for ( int i = samplesRead; i < frameCount*nChannels; ++i )
{
for (int c = 0; c < nChannels; ++c )
{
samples[c][i] = 0.0;
}
}
}
encodeFrame( mAvAudioFrame ) )
}
finish();
}
bool AudioWriter::encodeFrame( AVFrame *frame )
{
//send the frame for encoding
int ret;
if ( frame != nullptr )
{
frame->pts = mAudFrameCounter++;
}
avcodec_send_frame(mAvCodecContext, frame );
while (ret >= 0)
{
ret = avcodec_receive_packet(mAvCodecContext, mAvPacket);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF )
{
break;
}
else
if (ret < 0) {
return false;
}
av_packet_rescale_ts(mAvPacket, mAvCodecContext->time_base, mAvOutputStream->time_base);
mAvPacket->stream_index = mAvOutputStream->index;
av_interleaved_write_frame(mAvFormatContext, mAvPacket);
av_packet_unref(mAvPacket);
}
return true;
}
void AudioWriter::finish()
{
// Flush by sending a null frame
encodeFrame( nullptr );
av_write_trailer(mAvFormatContext);
}
Since the resultant file contains the recorded music, the code to manipulate the audio data seems to be correct (unless I am overwriting other memory somehow).
The inaccurate duration and bitrate suggest that information concerning time is not being properly managed. I set the pts of the frames using a simple increasing integer. I'm unclear what the code that sets the timestamp and stream index achieves - and whether it's even necessary: I copied it from supposedly working code but I've seen other code without it.
Can anyone see what I'm doing wrong?
The timestamp need to be correct. Set the time_base to 1/sample_rate and increment the timestamp by 1024 each frame. Note: 1024 is aac specific. If you change codecs, you need to change the frame size.
I am trying to make 3DR texturing but it always use only vertex colors in texture.
On every frame I store frame as PNG:
RGBImage frame(t3dr_image, 4);
std::ostringstream ss;
ss << dataset_.c_str();
ss << "/";
ss << poses_.size();
ss << ".png";
frame.Write(ss.str().c_str());
poses_.push_back(t3dr_image_pose);
timestamps_.push_back(t3dr_image.timestamp);
In the method Save I am trying to process texturing:
1) I extract full mesh from context
Tango3DR_Mesh* mesh = 0;
Tango3DR_Status ret;
ret = Tango3DR_extractFullMesh(t3dr_context_, &mesh);
if (ret != TANGO_3DR_SUCCESS)
std::exit(EXIT_SUCCESS);
2) Create texturing context using extracted mesh
Tango3DR_ConfigH textureConfig;
textureConfig = Tango3DR_Config_create(TANGO_3DR_CONFIG_TEXTURING);
ret = Tango3DR_Config_setDouble(textureConfig, "min_resolution", 0.01);
if (ret != TANGO_3DR_SUCCESS)
std::exit(EXIT_SUCCESS);
Tango3DR_TexturingContext context;
context = Tango3DR_createTexturingContext(textureConfig, dataset.c_str(), mesh);
if (context == nullptr)
std::exit(EXIT_SUCCESS);
Tango3DR_Config_destroy(textureConfig);
3) Call Tango3DR_updateTexture with data I stored before (this does not work)
for (unsigned int i = 0; i < poses_.size(); i++) {
std::ostringstream ss;
ss << dataset_.c_str();
ss << "/";
ss << i;
ss << ".png";
RGBImage frame(ss.str());
Tango3DR_ImageBuffer image;
image.width = frame.GetWidth();
image.height = frame.GetHeight();
image.stride = frame.GetWidth() * 3;
image.timestamp = timestamps_[i];
//data are for sure in this format
image.format = TANGO_3DR_HAL_PIXEL_FORMAT_RGB_888;
image.data = frame.GetData();
ret = Tango3DR_updateTexture(context, &image, &poses_[i]);
if (ret != TANGO_3DR_SUCCESS)
std::exit(EXIT_SUCCESS);
}
4) Texturize mesh
ret = Tango3DR_Mesh_destroy(mesh);
if (ret != TANGO_3DR_SUCCESS)
std::exit(EXIT_SUCCESS);
mesh = 0;
ret = Tango3DR_getTexturedMesh(context, &mesh);
if (ret != TANGO_3DR_SUCCESS)
std::exit(EXIT_SUCCESS);
5) Save it as OBJ (in the result texture are only data from vertex colors, why?)
ret = Tango3DR_Mesh_saveToObj(mesh, filename.c_str());
if (ret != TANGO_3DR_SUCCESS)
std::exit(EXIT_SUCCESS);
ret = Tango3DR_destroyTexturingContext(context);
if (ret != TANGO_3DR_SUCCESS)
std::exit(EXIT_SUCCESS);
ret = Tango3DR_Mesh_destroy(mesh);
if (ret != TANGO_3DR_SUCCESS)
std::exit(EXIT_SUCCESS);
All methods returned TANGO_3DR_SUCCESS.
Full code here: https://github.com/lvonasek/tango
Thanks for reaching out and providing the detailed code breakdown.
The error is on our end - the library currently doesn't support RGB texture inputs. It assumes YUV for all input images. I've opened a ticket to track this bug and we'll fix it for the next release, by allowing RGB input and providing better return values for invalid image formats.
Edit: Found another bug on our end. The API states image_pose should be the pose of the image, but our implementation actually expects the pose of the device. I've opened a bug, and this will be fixed in next release (release-H).
You can try working around this for now by passing in the device pose without multiplying the device-to-camera extrinsic calibration, although of course that's just a temp bandaid.
I want to find out the list of mobile devices which are all click my ad. Based on the details I want to re-target them
So I need to capture their identifier(UDID in ios and Android id in android phone) in order to identify them.
Can anyone suggest me good method or prefer any better way to achieve the above.
For Android, you should check this thread :
Will TelephonyManger.getDeviceId() return device id for Tablets like Galaxy Tab...?
Jorgesys' answer seems clean.
ANDROID:
I am using this and seems to be working fine.
public static String getDeviceAndroidID(Context context)
{
String android_id = Secure.getString(context.getContentResolver(), Secure.ANDROID_ID);
if(android_id != null)
return android_id;
else
return "";
}
iOS:
For iOS version less than 7 I am using MAC address of the device. From iOS version 7 apple is providing a unique ID for this purpose.
(NSString *)getMacAddress
{
if(SYSTEM_VERSION_GREATER_THAN_OR_EQUAL_TO(#"7.0"))
{
NSString *strUID = nil;
if(strUID == nil) strUID = [[[UIDevice currentDevice] identifierForVendor] UUIDString];
return strUID;
}
int mgmtInfoBase[6];
char *msgBuffer = NULL;
size_t length;
unsigned char macAddress[6];
struct if_msghdr *interfaceMsgStruct;
struct sockaddr_dl *socketStruct;
NSString *errorFlag = nil;
// Setup the management Information Base (mib)
mgmtInfoBase[0] = CTL_NET; // Request network subsystem
mgmtInfoBase[1] = AF_ROUTE; // Routing table info
mgmtInfoBase[2] = 0;
mgmtInfoBase[3] = AF_LINK; // Request link layer information
mgmtInfoBase[4] = NET_RT_IFLIST; // Request all configured interfaces
// With all configured interfaces requested, get handle index
if ((mgmtInfoBase[5] = if_nametoindex("en0")) == 0)
errorFlag = #"if_nametoindex failure";
else
{
// Get the size of the data available (store in len)
if (sysctl(mgmtInfoBase, 6, NULL, &length, NULL, 0) < 0)
errorFlag = #"sysctl mgmtInfoBase failure";
else
{
// Alloc memory based on above call
if ((msgBuffer = malloc(length)) == NULL)
errorFlag = #"buffer allocation failure";
else
{
// Get system information, store in buffer
if (sysctl(mgmtInfoBase, 6, msgBuffer, &length, NULL, 0) < 0)
errorFlag = #"sysctl msgBuffer failure";
}
}
}
// Befor going any further...
if (errorFlag != nil)
{
free(msgBuffer);
if(ENABLE_LOG) DLog(#"Error: %#", errorFlag);
return errorFlag;
}
// Map msgbuffer to interface message structure
interfaceMsgStruct = (struct if_msghdr *) msgBuffer;
// Map to link-level socket structure
socketStruct = (struct sockaddr_dl *) (interfaceMsgStruct + 1);
// Copy link layer address data in socket structure to an array
memcpy(&macAddress, socketStruct->sdl_data + socketStruct->sdl_nlen, 6);
// Read from char array into a string object, into traditional Mac address format
NSString *macAddressString = [NSString stringWithFormat:#"%02X:%02X:%02X:%02X:%02X:%02X",
macAddress[0], macAddress[1], macAddress[2],
macAddress[3], macAddress[4], macAddress[5]];
//if(ENABLE_LOG) DLog(#"Mac Address: %#", macAddressString);
// Release the buffer memory
free(msgBuffer);
return macAddressString;
}
i've successfully cross compiled the sdl library for the android platform now i want to display my sdl forms like SDL_Surface and the SDL_Rect in the android screen .
How is that possible?
here is my first try
SDLRenderer::SDLRenderer () :
bmp (NULL),
screen (NULL),
imgConvertCtx (NULL),
isInit (false),
quitKeyPressed (false)
{
}
SDLRenderer::~SDLRenderer ()
{
}
bool SDLRenderer::init (int width, int height)
{ LOGI("sdlrenderer init");
this->screen = SDL_SetVideoMode(width, height, 0, 0);
if(!screen){
LOGI("!screen");
return false;
}
this->bmp = SDL_CreateYUVOverlay(width, height, SDL_YV12_OVERLAY, this->screen);
LOGI("SDL_CreateYUVOverlay passed");
return true;
}
bool SDLRenderer::processEvents ()
{
SDL_Event sdlEvent;
while(SDL_PollEvent(&sdlEvent))
{
switch(sdlEvent.type)
{
case SDL_KEYDOWN:
if(sdlEvent.key.keysym.sym == SDLK_ESCAPE)
this->quitKeyPressed = true;
break;
case SDL_QUIT: this->quitKeyPressed = true; break;
}
}
return true;
}
bool SDLRenderer::isQuitKeyPressed ()
{
return this->quitKeyPressed;
}
void SDLRenderer::onVideoDataAvailable (const uint8_t **data, videoFrameProperties* props)
{LOGI("sdlrenderer data availabe");
if(!this->isInit){
this->isInit = this->init(props->width, props->height);
LOGI("sdlrenderer data availabe calling render init");
}
LOGI("before SDL_LockYUVOverlay(bmp);");
SDL_LockYUVOverlay(bmp);
LOGI("after SDL_LockYUVOverlay(bmp);");
AVPicture pict;
LOGI("after AVPicture pict;");
pict.data[0] = bmp->pixels[0];
pict.data[1] = bmp->pixels[2];
pict.data[2] = bmp->pixels[1];
pict.linesize[0] = bmp->pitches[0];
pict.linesize[1] = bmp->pitches[2];
pict.linesize[2] = bmp->pitches[1];
LOGI("after creating avpicture");
// Convert the image into YUV format that SDL uses
if(imgConvertCtx == NULL)
{
int w = props->width;
int h = props->height;
imgConvertCtx = sws_getContext(props->width, props->height, (PixelFormat)props- >pxlFmt, w, h, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
if(imgConvertCtx == NULL)
{ LOGI("imgConvertCtx == NULL");
fprintf(stderr, "Cannot initialize the conversion context!\n");
exit(1);
}
}
sws_scale(imgConvertCtx, data, props->linesize, 0, props->height, pict.data, pict.linesize);
LOGI("calling SDL_UnlockYUVOverlay(bmp);");
SDL_UnlockYUVOverlay(bmp);
rect.x = 0;
rect.y = 0;
rect.w = props->width;
rect.h = props->height;
LOGI("sdlrenderer displaying");
SDL_DisplayYUVOverlay(bmp, &rect);
}
there is my main
int main(int argc, char *argv[])
{
SDLRenderer *renderer = new SDLRenderer();
DASHReceiver *receiver = new DASHReceiver(30);
receiver->Init("http://www----custom url here");
LibavDecoder *decoder = new LibavDecoder(receiver);
decoder->attachVideoObserver(renderer);
decoder->setFrameRate(24);
decoder->init();
bool eos = false;
while(!renderer->isQuitKeyPressed() && !eos)
{
eos = !decoder->decode();
renderer->processEvents();
}
decoder->stop();
return 0;
}
Thanks in advance!
You are missing an SDL_Flip or an SDL_UpdateRect to be called on your main SDL_surface, which will update it on the screen.
As far as I can see you are trying to port the bitmovin opensource dash player.
I have already done that and once SDL was ported to android, all other parts of the sofware were working.
I've got exactly the same code as you and this part is working well
be sure to define the surface in the java part
Try google with SDLActivity and use the java code provided from here
Then take a carefull look here http://lists.libsdl.org/pipermail/sdl-libsdl.org/2011-July/081481.html to make some little modification to the java code
// The Unimplemented OpenGL ES API notices *always* indicate you have
// the incorrect context version, which has to be fixed in SDLActivity.java .
int EGL_CONTEXT_CLIENT_VERSION = 0x3098;
int contextAttrs[] = new int[]{
EGL_CONTEXT_CLIENT_VERSION, majorVersion,
EGL10.EGL_NONE
};
EGLContext ctx = egl.eglCreateContext(dpy, config,EGL10.EGL_NO_CONTEXT, contextAttrs);
if (ctx == EGL10.EGL_NO_CONTEXT) {
Log.e("SDL", "Couldn't create context");
return false;
}
/*
EGLContext ctx = egl.eglCreateContext(dpy, config, EGL10.EGL_NO_CONTEXT, null);
if (ctx == EGL10.EGL_NO_CONTEXT) {
Log.e("SDL", "Couldn't create context");
return false;
}
*/