Writing to framebuffer directly on Android - android

I've a rooted phone with Android 4.2.2. I'd like to use minui API used (source here) in bootloader code to draw stuff on the screen. minui is much simpler than native OpenGL and I don't need any complex functionality exposed by OpenGL.
The problem is that I can't write stuff directly to fb0 device. FBIOPUT_VSCREENINFO fails for unknown reason.
How can I draw directly to fb0 on Android, or how can I use minui outside the bootloader mode?

device node:/dev/graphics/fb0
you can build in Android tree or use arm-linux-gcc.
arm-linux-gcc -D__ANDROID__ fb-test.c -static
likes a normal embeded linux, there is a sample:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <linux/fb.h>
#include <sys/mman.h>
struct fb_fix_screeninfo FixedInfo;
struct fb_var_screeninfo OrigVarInfo;
static int FrameBufferFD = -1;
void *FrameBuffer = (void *) -1;
#ifndef __ANDROID__
#define FRAMEBUFFER "/dev/fb0"
#else
#define FRAMEBUFFER "/dev/graphics/fb0"
#endif //__ANDROID__
void openFBDEV(void)
{
/* open the framebuffer device */
FrameBufferFD = open(FRAMEBUFFER, O_RDWR);
if (FrameBufferFD < 0)
{
fprintf(stderr, "Error opening %s\n", FRAMEBUFFER);
exit(1);
}
/* Get the fixed screen info */
if (ioctl(FrameBufferFD, FBIOGET_FSCREENINFO, &FixedInfo))
{
fprintf(stderr, "error: ioctl(FBIOGET_FSCREENINFO) failed\n");
exit(1);
}
/* get the variable screen info */
if (ioctl(FrameBufferFD, FBIOGET_VSCREENINFO, &OrigVarInfo))
{
fprintf(stderr, "error: ioctl(FBIOGET_VSCREENINFO) failed\n");
exit(1);
}
if (FixedInfo.visual != FB_VISUAL_TRUECOLOR
&& FixedInfo.visual != FB_VISUAL_DIRECTCOLOR)
{
fprintf(stderr,
"non-TRUE/DIRECT-COLOR visuals (0x%x) not supported by this demo.\n",
FixedInfo.visual);
exit(1);
}
/*
* fbdev says the frame buffer is at offset zero, and the mmio region
* is immediately after.
*/
/* mmap the framebuffer into our address space */
FrameBuffer = (void *) mmap(0, /* start */
FixedInfo.smem_len, /* bytes */
PROT_READ | PROT_WRITE, /* prot */
MAP_SHARED, /* flags */
FrameBufferFD, /* fd */
0 /* offset */);
if (FrameBuffer == (void *) -1)
{
fprintf(stderr, "error: unable to mmap framebuffer\n");
exit(1);
}
}
void closeFBDEV(void)
{
munmap(FrameBuffer, FixedInfo.smem_len);
close(FrameBufferFD);
}
int main()
{
openFBDEV();
fprintf(stderr, "openFBDEV finish\n");
memset(FrameBuffer, 128, FixedInfo.smem_len);
sleep(5);
closeFBDEV();
fprintf(stderr, "closeFBDEV finish\n");
return 0;
}

Source code,test it works in Qualcomm platform MSM89xx:
panel_test.c
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <linux/fb.h>
#include <sys/mman.h>
#include <stdlib.h>
#include "yellow_face.zif"
int main()
{
int fbfd = 0;
struct fb_var_screeninfo vinfo;
struct fb_fix_screeninfo finfo;
struct fb_cmap cmapinfo;
long int screensize = 0;
char *fbp = 0;
int x = 0, y = 0;
long int location = 0;
int b,g,r;
// Open the file for reading and writing
fbfd = open("/dev/graphics/fb0", O_RDWR,0); // 打开Frame Buffer设备
if (fbfd < 0) {
printf("Error: cannot open framebuffer device.%x\n",fbfd);
exit(1);
}
printf("The framebuffer device was opened successfully.\n");
// Get fixed screen information
if (ioctl(fbfd, FBIOGET_FSCREENINFO, &finfo)) { // 获取设备固有信息
printf("Error reading fixed information.\n");
exit(2);
}
printf("\ntype:0x%x\n", finfo.type ); // FrameBuffer 类型,如0为象素
printf("visual:%d\n", finfo.visual ); // 视觉类型:如真彩2,伪彩3
printf("line_length:%d\n", finfo.line_length ); // 每行长度
printf("\nsmem_start:0x%lx,smem_len:%u\n", finfo.smem_start, finfo.smem_len ); // 映象RAM的参数
printf("mmio_start:0x%lx ,mmio_len:%u\n", finfo.mmio_start, finfo.mmio_len );
// Get variable screen information
if (ioctl(fbfd, FBIOGET_VSCREENINFO, &vinfo)) { // 获取设备可变信息
printf("Error reading variable information.\n");
exit(3);
}
printf("%dx%d, %dbpp,xres_virtual=%d,yres_virtual=%dvinfo.xoffset=%d,vinfo.yoffset=%d\n", vinfo.xres, vinfo.yres, vinfo.bits_per_pixel,vinfo.xres_virtual,vinfo.yres_virtual,vinfo.xoffset,vinfo.yoffset);
screensize = finfo.line_length * vinfo.yres_virtual;
// Map the device to memory 通过mmap系统调用将framebuffer内存映射到用户空间,并返回映射后的起始地址
fbp = (char *)mmap(0, screensize, PROT_READ | PROT_WRITE, MAP_SHARED,fbfd, 0);
if ((int)fbp == -1) {
printf("Error: failed to map framebuffer device to memory.\n");
exit(4);
}
printf("The framebuffer device was mapped to memory successfully.\n");
/***************exampel 1**********************/
b = 10;
g = 100;
r = 100;
for ( y = 0; y < 340; y++ )
for ( x = 0; x < 420; x++ ) {
location = (x+100) * (vinfo.bits_per_pixel/8) +
(y+100) * finfo.line_length;
if ( vinfo.bits_per_pixel == 32 ) { //
*(fbp + location) = b; // Some blue
*(fbp + location + 1) = g; // A little green
*(fbp + location + 2) = r; // A lot of red
*(fbp + location + 3) = 0; // No transparency
}
}
/*****************exampel 1********************/
/*****************exampel 2********************/
unsigned char *pTemp = (unsigned char *)fbp;
int i, j;
//起始坐标(x,y),终点坐标(right,bottom)
x = 400;
y = 400;
int right = 700;//vinfo.xres;
int bottom = 1000;//vinfo.yres;
for(i=y; i< bottom; i++)
{
for(j=x; j<right; j++)
{
unsigned short data = yellow_face_data[(((i-y) % 128) * 128) + ((j-x) %128)];
pTemp[i*finfo.line_length + (j*4) + 2] = (unsigned char)((data & 0xF800) >> 11 << 3);
pTemp[i*finfo.line_length + (j*4) + 1] = (unsigned char)((data & 0x7E0) >> 5 << 2);
pTemp[i*finfo.line_length + (j*4) + 0] = (unsigned char)((data & 0x1F) << 3);
}
}
/*****************exampel 2********************/
//note:vinfo.xoffset =0 vinfo.yoffset =0 否则FBIOPAN_DISPLAY不成功
if (ioctl(fbfd, FBIOPAN_DISPLAY, &vinfo)) {
printf("Error FBIOPAN_DISPLAY information.\n");
exit(5);
}
sleep(10);
munmap(fbp,finfo.smem_len);//finfo.smem_len == screensize == finfo.line_length * vinfo.yres_virtual
close(fbfd);
return 0;
}
Android.mk
# Copyright 2006-2014 The Android Open Source Project
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= panel_test.c
LOCAL_SHARED_LIBRARIES := $(common_libs) libqdutils libdl liblog libbase libcutils
LOCAL_C_INCLUDES := $(common_includes) $(kernel_includes)
LOCAL_ADDITIONAL_DEPENDENCIES := $(common_deps) $(kernel_deps)
LOCAL_MODULE := panel_test
LOCAL_CFLAGS := -Werror
include $(BUILD_EXECUTABLE)
include $(call first-makefiles-under,$(LOCAL_PATH))
yellow_face.zif
yellow_face.zif

Related

TensorFlow C++ example for Android

I was looking into the examples provided in the TensorFlow git repository for Android devices. It uses Java interfaces as a wrapper for C++ API.  Are there any examples where I can use C++ API directly for initialization  of TensorFlow, loading the model and for inference and etc.?
Check out this repo and the following blog for a solution. These links will provide step by step instructions on how to use Tensorflow c++ API on Android. The idea is to create a dynamic library (.so file) that is Android friendly (i.e. does not include elements of Tensorflow which are desktop\gpu compatible only).
I wrote this code for Raspberry PI, but I believe it must be pretty much the same for Android:
tfbenchmark.h:
#ifndef TENSORFLOW_H
#define TENSORFLOW_H
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
class TensorFlowBenchmark
{
public:
TensorFlowBenchmark();
virtual ~TensorFlowBenchmark();
bool init();
bool run();
private:
std::unique_ptr<tensorflow::Session> session_;
};
#endif /* TENSORFLOW_H */
tfbenchmark.cpp:
#include "tfbenchmark.h"
#include <vector>
#include <fstream>
#include <chrono>
#include <ctime>
#include <cstddef>
#include <jpeglib.h>
#include <setjmp.h>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"
// These are all common classes it's handy to reference with no namespace.
using tensorflow::Flag;
using tensorflow::Tensor;
using tensorflow::TensorShape;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::int32;
const static string root_dir = ".";
const static string image = "../input.jpg";
const static string graph = "models/frozen_graph.pb";
const static int32 input_width = 224;
const static int32 input_height = 224;
const static int32 input_mean = 128;
const static int32 input_std = 128;
const static string input_layer = "x_input_pl";
const static string output_layer = "out/out";
const static int NUM_EVAL = 100;
const static int MAX_BATCH = 256;
template<class T>
void report_metrics(const std::vector<T>& v, int batch_size) {
double sum = std::accumulate(v.begin(), v.end(), 0.0);
double mean = sum / v.size();
LOG(INFO) << "Batch size = " << batch_size << ": "
<< mean/batch_size << "ms per image";
}
// Error handling for JPEG decoding.
void CatchError(j_common_ptr cinfo) {
(*cinfo->err->output_message)(cinfo);
jmp_buf* jpeg_jmpbuf = reinterpret_cast<jmp_buf*>(cinfo->client_data);
jpeg_destroy(cinfo);
longjmp(*jpeg_jmpbuf, 1);
}
// Decompresses a JPEG file from disk.
Status LoadJpegFile(string file_name, std::vector<tensorflow::uint8>* data,
int* width, int* height, int* channels) {
struct jpeg_decompress_struct cinfo;
FILE* infile;
JSAMPARRAY buffer;
int row_stride;
if ((infile = fopen(file_name.c_str(), "rb")) == NULL) {
LOG(ERROR) << "Can't open " << file_name;
return tensorflow::errors::NotFound("JPEG file ", file_name,
" not found");
}
struct jpeg_error_mgr jerr;
jmp_buf jpeg_jmpbuf; // recovery point in case of error
cinfo.err = jpeg_std_error(&jerr);
cinfo.client_data = &jpeg_jmpbuf;
jerr.error_exit = CatchError;
if (setjmp(jpeg_jmpbuf)) {
return tensorflow::errors::Unknown("JPEG decoding failed");
}
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, infile);
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
*width = cinfo.output_width;
*height = cinfo.output_height;
*channels = cinfo.output_components;
data->resize((*height) * (*width) * (*channels));
row_stride = cinfo.output_width * cinfo.output_components;
buffer = (*cinfo.mem->alloc_sarray)((j_common_ptr) & cinfo, JPOOL_IMAGE,
row_stride, 1);
while (cinfo.output_scanline < cinfo.output_height) {
tensorflow::uint8* row_address =
&((*data)[cinfo.output_scanline * row_stride]);
jpeg_read_scanlines(&cinfo, buffer, 1);
memcpy(row_address, buffer[0], row_stride);
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(infile);
return Status::OK();
}
// Given an image file name, read in the data, try to decode it as an image,
// resize it to the requested size, and then scale the values as desired.
Status FillTensorFromImageData(std::vector<tensorflow::uint8>& image_data,
const int batch_size, const int image_height,
const int image_width, const int image_channels,
std::vector<Tensor>* out_tensors) {
// In these loops, we convert the eight-bit data in the image into float,
// resize
// it using bilinear filtering, and scale it numerically to the float range
// that
// the model expects (given by input_mean and input_std).
tensorflow::Tensor image_tensor(
tensorflow::DT_FLOAT,
tensorflow::TensorShape(
{batch_size, image_height, image_width, image_channels}));
auto image_tensor_mapped = image_tensor.tensor<float, 4>();
LOG(INFO) << image_data.size() << "bytes in image_data";
tensorflow::uint8* in = image_data.data();
float* out = image_tensor_mapped.data();
for(int n = 0; n < batch_size; n++) {
for (int y = 0; y < image_height; ++y) {
tensorflow::uint8* in_row = in + (y * image_width * image_channels);
float* out_row = out + (n * image_height * image_width * image_channels) + (y * image_width * image_channels);
for (int x = 0; x < image_width; ++x) {
tensorflow::uint8* input_pixel = in_row + (x * image_channels);
float* out_pixel = out_row + (x * image_channels);
for (int c = 0; c < image_channels; ++c) {
out_pixel[c] =
static_cast<float>(input_pixel[c] - input_mean) / input_std;
}
}
}
}
out_tensors->push_back(image_tensor);
return Status::OK();
}
// Reads a model graph definition from disk, and creates a session object you
// can use to run it.
Status LoadGraph(string graph_file_name,
std::unique_ptr<tensorflow::Session>* session) {
tensorflow::GraphDef graph_def;
Status load_graph_status = ReadBinaryProto(tensorflow::Env::Default(),
graph_file_name, &graph_def);
if (!load_graph_status.ok()) {
return tensorflow::errors::NotFound("Failed to load compute graph at '",
graph_file_name, "'");
}
session->reset(tensorflow::NewSession(tensorflow::SessionOptions()));
Status session_create_status = (*session)->Create(graph_def);
if (!session_create_status.ok()) {
return session_create_status;
}
return Status::OK();
}
TensorFlowBenchmark::TensorFlowBenchmark() {}
TensorFlowBenchmark::~TensorFlowBenchmark() {}
bool TensorFlowBenchmark::init() {
// We need to call this to set up global state for TensorFlow.
int argc;
char** argv;
tensorflow::port::InitMain("benchmark", &argc, &argv);
string graph_path = tensorflow::io::JoinPath(root_dir, graph);
Status load_graph_status = LoadGraph(graph_path, &session_);
if (!load_graph_status.ok()) {
LOG(ERROR) << load_graph_status;
return false;
}
return true;
}
bool TensorFlowBenchmark::run() {
string image_path = tensorflow::io::JoinPath(root_dir, image);
std::vector<tensorflow::uint8> image_data;
int image_width;
int image_height;
int image_channels;
Status load_img_status = LoadJpegFile(image_path, &image_data, &image_width, &image_height,
&image_channels);
if(!load_img_status.ok()) {
LOG(ERROR) << load_img_status;
return false;
}
LOG(INFO) << "Loaded JPEG: " << image_width << "x" << image_height << "x"
<< image_channels;
for(int batch_size = 1; batch_size <= MAX_BATCH; batch_size <<= 1) {
LOG(INFO) << "Batch size " << batch_size;
std::vector<Tensor> resized_tensors;
Status read_tensor_status =
FillTensorFromImageData(image_data, batch_size, image_height, image_width,
image_channels, &resized_tensors);
if (!read_tensor_status.ok()) {
LOG(ERROR) << read_tensor_status;
return false;
}
const Tensor& resized_tensor = resized_tensors[0];
// Actually run the image through the model.
std::vector<Tensor> outputs;
std::vector<long> timings;
for (int i = 0; i < NUM_EVAL; ++i) {
auto start = std::chrono::system_clock::now();
Status run_status = session_->Run({{input_layer, resized_tensor}},
{output_layer}, {}, &outputs);
auto end = std::chrono::system_clock::now();
timings.push_back( std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count());
if (!run_status.ok()) {
LOG(ERROR) << "Running model failed: " << run_status;
return false;
}
}
report_metrics(timings, batch_size);
}
return true;
}

How to make spidev.c work on samsung s5p6818/Android5.1

I'am using a develop board of samsung s5p6818 soc. After choosing "user mode spi""SLSI SPI port0" on menuconfig, and adding these code in plat-s5p6818/drone/device.c,
/*------------------------------------------------------------------------------
* SSP/SPI
*/
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include <mach/slsi-spi.h>
static struct s3c64xx_spi_csinfo spi0_csi[] = {
[0] = {
.line = CFG_SPI0_CS,
.set_level = gpio_set_value,
.fb_delay = 0x2,
},
};
static struct spi_board_info spi_plat_board[] __initdata = {
[0] = {
.modalias = "spidev", /* fixup */
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Note> set bus num, must be smaller than ARRAY_SIZE(spi_plat_device) */
.chip_select = 0, /* Note> set chip select num, must be smaller than spi cs_num */
.controller_data = &spi0_csi[0],//spi0_info,
.mode = SPI_MODE_3 | SPI_CPOL | SPI_CPHA,
},
};
#endif
spi_register_board_info(spi_plat_board,ARRAY_SIZE(spi_plat_board));
/*--------------------------------------------------------------------------*/
I can find "spidev0.0" in /dev when Android system is operating on my board. However, this device dosen't work when I use a testing program calling the function read/write, and cause my system to reboot (function "open","ioctl" can work properly). Here is my testing code
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <linux/ioctl.h>
#define SPI_IOC_MAGIC 'k'
#define SPI_CPHA 0x01
#define SPI_CPOL 0x02
#define SPI_MODE_0 (0|0)
#define SPI_MODE_1 (0|SPI_CPHA)
#define SPI_MODE_2 (SPI_CPOL|0)
#define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
typedef unsigned char __u8;
typedef unsigned int __u32;
/* Read / Write of SPI mode (SPI_MODE_0..SPI_MODE_3) */
#define SPI_IOC_RD_MODE _IOR(SPI_IOC_MAGIC, 1, __u8)
#define SPI_IOC_WR_MODE _IOW(SPI_IOC_MAGIC, 1, __u8)
/* Read / Write SPI bit justification */
#define SPI_IOC_RD_LSB_FIRST _IOR(SPI_IOC_MAGIC, 2, __u8)
#define SPI_IOC_WR_LSB_FIRST _IOW(SPI_IOC_MAGIC, 2, __u8)
/* Read / Write SPI device word length (1..N) */
#define SPI_IOC_RD_BITS_PER_WORD _IOR(SPI_IOC_MAGIC, 3, __u8)
#define SPI_IOC_WR_BITS_PER_WORD _IOW(SPI_IOC_MAGIC, 3, __u8)
/* Read / Write SPI device default max speed hz */
#define SPI_IOC_RD_MAX_SPEED_HZ _IOR(SPI_IOC_MAGIC, 4, __u32)
#define SPI_IOC_WR_MAX_SPEED_HZ _IOW(SPI_IOC_MAGIC, 4, __u32)
static int fd = 0;
int main(void)
{
// int fd = -1;
int buf[1000];
int i = 0;
fd = open("/dev/spidev0.0", O_RDWR);
if(fd < 0){
printf("open /dev/spidev0.0 error!!!\n");
}
printf("-------------===============------------fd=%d\n",fd);
spi_init();
for (i=0;i<1000;i++)
{
buf[i]=0x33333333;
}
for(i=0;i<20;i++)
{ write(fd,buf,1000*4);
}
return 0;
}
void spi_init(void){
int ret = 0;
int speed = 2*1000*1000;
int delay;
unsigned char bits = 32;
unsigned char mode = SPI_MODE_1;
ret = ioctl(fd, SPI_IOC_WR_MODE, &mode);
if(ret == -1){
printf("can't set spi mode\n");
}else{
printf("set spi mode %d\n",ret);
}
ret = ioctl(fd, SPI_IOC_RD_MODE, &mode);
if(ret == -1){
printf("can't get spi mode\n");
}else{
printf("get spi mode %d\n",ret);
}
ret = ioctl(fd, SPI_IOC_WR_BITS_PER_WORD, &bits);
if(ret == -1){
printf("can't set bits per word\n");
}else{
printf("set bits per word %d\n",ret);
}
ret = ioctl(fd, SPI_IOC_RD_BITS_PER_WORD, &bits);
if(ret == -1){
printf("can't get bits per word\n");
}else{
printf(" get bits per word %d\n",ret);
}
ret = ioctl(fd, SPI_IOC_WR_MAX_SPEED_HZ, &speed);
if(ret == -1){
printf("can't set max speed hz\n");
}else{
printf("set max speed hz %d\n",ret);
}
ret = ioctl(fd, SPI_IOC_RD_MAX_SPEED_HZ, &speed);
if(ret == -1){
printf("can't get max speed hz\n");
}else{
printf("get max speed hz %d\n",ret);
}
}
Here is my debug loglog
I think this is because I didn't config gpio correctly, but I don't know how and where to config gpio pins. I hope someone can teach me how to make this correct.
you can set gpio function in plat-s5p6818/drone/include/cfg_gpio.h for examples check how i2c bus interface is defined for your arch.

How to count time in a process JNI?

I want to make logging time which a JNI's cpp system needed for every process.This is my code :
/*
* ImageProcessing.cpp
*/
#include <jni.h>
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <android/log.h>
#include <strstream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <vector>
#define LOG_TAG "TourGuide"
#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
using namespace std;
using namespace cv;
std::vector<float> parse_delimeted_list_of_numbers(char* line, char delimeter)
{
std::vector<float> vector_of_numbers;
std::istrstream input_stream(line);
std::string text;
float number;
while (std::getline(input_stream, text, delimeter)) {
sscanf(text.c_str(), "%f", &number, text.size());
vector_of_numbers.push_back(number);
}
return vector_of_numbers;
}
extern "C"
jboolean
Java_com_example_franksyesipangkar_tourguide_CameraPreview_ImageProcessing
(JNIEnv* env, jobject thiz, jint width, jint height, jbyteArray NV21FrameData, jintArray outPixels, jbyteArray b)
{
LOGD("JNIEnv");
//convert jbyteArray to char
jbyte *cmd = env->GetByteArrayElements(b, 0);
LOGD("JNIEnvFeature");
char feature[90600];//[819000] sejumlah 800kb untuk ukuran file
memset(feature,0, sizeof(feature));
memcpy(feature, cmd, strlen((char*)cmd));
LOGD("OutFeature: %s", feature);
//LOGD("OutCMD: %s", cmd);
vector<float> vectorHOGSVM;
vectorHOGSVM = parse_delimeted_list_of_numbers(feature, ' ');
LOGD("Parsing Vector Success ");
/* Mengambil data pointer */
jbyte * pNV21FrameData = env->GetByteArrayElements(NV21FrameData, 0);
jint * poutPixels = env->GetIntArrayElements(outPixels, 0);
/* Membuat matrix dari input frame gambar */
Mat mGray(height, width, CV_8UC1, (unsigned char *)pNV21FrameData);
Mat mResult(height, width, CV_8UC4, (unsigned char *)poutPixels);
/* Mengubah matrix kembali menjadi frame gambar */
IplImage GrayImg = mGray;
IplImage ResultImg = mResult;
HOGDescriptor hog;
//hog.winSize = Size(56,40);
// Set our custom detecting vector
hog.setSVMDetector(vectorHOGSVM);
//hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
/* Deklarasi variable vector untuk menggambar kotak deteksi */
vector<Rect> found, found_filtered;
size_t i, j;
hog.detectMultiScale(mGray, found, 0, Size(8,8), Size(32,32), 1.05, 2);
double t = (double)getTickCount();
t = (double)getTickCount() - t;
LOGD("Detection Time: %gms", t*1000./cv::getTickFrequency());
LOGD("Animal: %d", found.size());
for( i = 0; i < found.size(); i++ )
{
Rect r = found[i];
for( j = 0; j < found.size(); j++ )
if( j != i && (r & found[j]) == r)
break;
if( j == found.size() )
found_filtered.push_back(r);
}
if(found.size()) {
Rect r = found[0];
r.x += cvRound(r.width*0.1);
r.width = cvRound(r.width*0.8);
r.y += cvRound(r.height*0.07);
r.height = cvRound(r.height*0.8);
LOGD("c : %d, r : %d",r.height,r.width);
cvCvtColor(&GrayImg, &ResultImg, CV_GRAY2BGR);
env->ReleaseByteArrayElements(NV21FrameData, pNV21FrameData, 0);
env->ReleaseIntArrayElements(outPixels, poutPixels, 0);
env->ReleaseByteArrayElements(b, cmd, 0);
return true;
}
For detection process, I make logging time with cv::getTickFrequency() (OpenCV function) like this :
double t = (double)getTickCount();
t = (double)getTickCount() - t;
LOGD("Detection Time: %gms", t*1000./cv::getTickFrequency());
But, in another process I don't understand for making log time.So, I want to make logging time in every process, in process LOGD("OutFeature: %s", feature);, and process LOGD("Parsing Vector Success ");.Do you have an idea for it ?

How to capture screenshot from FrameBuffer?

I want to capture screenshot from FrameBuffer in Android, I use the code below, but just got a fuzzy image.I contains 3 main steps. first, read data and info from FrameBuffer, second, convert the raw data to 24 bits, third, construct the BITMAP structs and write to bmp file. But I got fuzzy images, does anyone could help?I will appreciate it.
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <linux/fb.h>
#include <sys/ioctl.h>
typedef unsigned char BYTE;
typedef unsigned short WORD;
typedef unsigned int DWORD;
typedef long LONG;
typedef struct tagBITMAPFILEHEADER {
WORD bfType;
DWORD bfSize;
WORD bfReserved1;
WORD bfReserved2;
DWORD bfOffBits;
}__attribute__((packed)) BITMAPFILEHEADER, *PBITMAPFILEHEADER;
typedef struct tagBITMAPINFOHEADER {
DWORD biSize;
LONG biWidth;
LONG biHeight;
WORD biPlanes;
WORD biBitCount;
DWORD biCompression;
DWORD biSizeImage;
LONG biXPelsPerMeter;
LONG biYPelsPerMeter;
DWORD biClrUsed;
DWORD biClrImportant;
}__attribute__((packed)) BITMAPINFOHEADER, *PBITMAPINFOHEADER;
typedef struct tagRGBQUAD {
BYTE rgbBlue;
BYTE rgbGreen;
BYTE rgbRed;
BYTE rgbReserved;
}__attribute__((packed)) RGBQUAD;
#define FRAME_BUFFER_PATH "/dev/graphics/fb0"
int take_screenshot(char *path)
{
int i;
int img_fd, fb_fd;
int data_size;
char *img_buf;
struct fb_var_screeninfo var_info;
struct fb_fix_screeninfo fix_info;
BITMAPFILEHEADER file_head;
BITMAPINFOHEADER info_head;
//RGBQUAD rgb_quad;
/*open files*/
fb_fd = open(FRAME_BUFFER_PATH, O_RDWR);
if (img_fd < 0) {
perror("open framebuff");
return -1;
}
if (ioctl(fb_fd, FBIOGET_VSCREENINFO, &var_info) < 0) {
perror("ioctl FBIOGET_VSCREENINFO");
close(img_fd);
return 0;
}
printf("xres %d, yres %d\n", var_info.xres, var_info.yres);
if (ioctl(fb_fd, FBIOGET_FSCREENINFO, &fix_info)){
debug("Error reading fixed information\n");
close(img_fd);
return 0;
}
img_fd = open(path, O_RDWR | O_CREAT, 0644);
if (img_fd < 0)
{
perror("open image");
close(img_fd);
return -1;
}
data_size = var_info.xres*var_info.yres*(var_info.bits_per_pixel/8);
/*initialize bmp structs*/
file_head.bfType = 0x4d42;
file_head.bfSize = sizeof(file_head) + sizeof(info_head) + data_size;
file_head.bfReserved1 = 0;
file_head.bfReserved2 = 0;
file_head.bfOffBits = sizeof(file_head) + sizeof(info_head);
info_head.biSize = sizeof(info_head);
info_head.biWidth = var_info.xres;
info_head.biHeight = -var_info.yres;
info_head.biPlanes = 0;
info_head.biBitCount = 24;
info_head.biCompression = 0;
info_head.biSizeImage = data_size;
info_head.biXPelsPerMeter = 3780;
info_head.biYPelsPerMeter = 3780;
info_head.biClrUsed = 0;
info_head.biClrImportant = 0;
img_buf = (char *)malloc(data_size);
if (img_buf == NULL)
{
printf("malloc failed!\n");
close(fb_fd);
close(img_fd);
return -1;
}
/*read img data and */
read(fb_fd, img_buf, data_size);
write(img_fd, &file_head, sizeof(file_head));
write(img_fd, &info_head, sizeof(info_head));
/*********************/
int w, h;
int depth;
unsigned short *bits;
w = var_info.xres;
h = var_info.yres;
depth = var_info.bits_per_pixel;
uint8_t *rgb24;
if (depth == 16) {
rgb24 = (uint8_t *)malloc(w * h * 3);
int i = 0;
for ( ; i < w*h; i++) {
uint16_t pixel16 = ((uint16_t *)img_buf)[i];
// RRRRRGGGGGGBBBBBB -> RRRRRRRRGGGGGGGGBBBBBBBB
// in rgb24 color max is 2^8 per channel (*255/32 *255/64 *255/32)
rgb24[3*i+0] = (255*(pixel16 & 0x001F))/ 32; //Blue
rgb24[3*i+1] = (255*((pixel16 & 0x07E0) >> 5))/64; //Green
rgb24[3*i+2] = (255*((pixel16 & 0xF800) >> 11))/32; //Red
}
} else if (depth == 24) {
rgb24 = (uint8_t *)img_buf;
} else if (depth == 32) {
//skip transparency channel
rgb24 = (uint8_t *) malloc(w * h * 3);
int i=0;
for ( ; i <w*h; i++) {
uint32_t pixel32 = ((uint32_t *)img_buf)[i];
// in rgb24 color max is 2^8 per channel
rgb24[3*i+2] = pixel32 & 0x000000FF; //Blue
rgb24[3*i+1] = (pixel32 & 0x0000FF00) >> 8; //Green
rgb24[3*i+0] = (pixel32 & 0x00FF0000) >> 16; //Red
}
} else {
};
write(img_fd, rgb24, w*h*3);
close(fb_fd);
close(img_fd);
return 0;
}
Sometime the framebuffer's row size will be larger than the resolution. So you shouldn't use the xres to determine where the next line begins. There is a total memory size attribute saved in the fixed info, so you could divide it with the yres. There is also a row-stride attribute but I'm not sure whether it works. Maybe you could try it.

How can I get rendered frames statistics (drawn/dropped) from StageFright Media Framework?

I'm very newbie in Android world and I have to write an streaming video quality checker application on Android. I have to use the native StageFright media framework to play videos. As far as I understand there is an native API of render statistics, but I need advice how I can get it. Thank you.
There is an ADB command to print playback framerate.
Procedure
Open console on windows (or linux) on the host. Make sure that required drivers have been installed for USB connectivity with the device (android phone or board)
Run the following commands
$> adb kill-server
$> adb shell setprop debug.video.showfps 1
Run the video playback. If the video is being run using Android Media player stack, then you will see prints reporting frame rate achieved.
You're welcome to use this as well, call it at the beginning and end of each frame rendered. It's a slightly altered version of some sample code from the NDK:
stats.c:
#include <sys/time.h>
#include <string.h>
#include <android/log.h>
#include <stdio.h>
#include "stats.h"
#define LOG_TAG "[STATS]"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define LOGW(...) __android_log_print(ANDROID_LOG_WARN,LOG_TAG,__VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
#define STATS_DUMP(...) __android_
double now_ms()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec*1000. + tv.tv_usec/1000.;
}
void stats_init(Stats* s)
{
s->lastTime = now_ms();
s->firstTime = 0.;
s->firstFrame = 0;
s->numFrames = 0;
s->dump = malloc(128);
memset(s->dump,0,128);
}
void stats_startFrame(Stats* s)
{
s->frameTime = now_ms();
}
void stats_endFrame(Stats* s)
{
double now = now_ms();
double renderTime = now - s->frameTime;
double frameTime = now - s->lastTime;
int nn;
if (now - s->firstTime >= MAX_PERIOD_MS) {
if (s->numFrames > 0) {
double renderTime = now - s->frameTime;
double frameTime = now - s->lastTime;
int nn;
double minRender, maxRender, avgRender;
double minFrame, maxFrame, avgFrame;
int count;
nn = s->firstFrame;
minRender = maxRender = avgRender = s->frames[nn].renderTime;
minFrame = maxFrame = avgFrame = s->frames[nn].frameTime;
for (count = s->numFrames; count > 0; count-- ) {
nn += 1;
if (nn >= MAX_FRAME_STATS)
nn -= MAX_FRAME_STATS;
double render = s->frames[nn].renderTime;
if (render < minRender) minRender = render;
if (render > maxRender) maxRender = render;
double frame = s->frames[nn].frameTime;
if (frame < minFrame) minFrame = frame;
if (frame > maxFrame) maxFrame = frame;
avgRender += render;
avgFrame += frame;
}
avgRender /= s->numFrames;
avgFrame /= s->numFrames;
sprintf(s->dump,"Frames per second - [AVG:%.1f] [MIN:%.1f] [MAX:%.1f]Rendering time ms - [AVG:%.1f] [MIN:%.1f] [MAX:%.1f]", 1000./avgFrame, 1000./maxFrame, 1000./minFrame, avgRender, minRender, maxRender);
//LOGI("Frames per second - [AVG:%.1f] [MIN:%.1f] [MAX:%.1f]Rendering time ms - [AVG:%.1f] [MIN:%.1f] [MAX:%.1f]", 1000./avgFrame, 1000./maxFrame, 1000./minFrame, avgRender, minRender, maxRender);
}
s->numFrames = 0;
s->firstFrame = 0;
s->firstTime = now;
}
nn = s->firstFrame + s->numFrames;
if (nn >= MAX_FRAME_STATS)
nn -= MAX_FRAME_STATS;
s->frames[nn].renderTime = renderTime;
s->frames[nn].frameTime = frameTime;
if (s->numFrames < MAX_FRAME_STATS) {
s->numFrames += 1;
} else {
s->firstFrame += 1;
if (s->firstFrame >= MAX_FRAME_STATS)
s->firstFrame -= MAX_FRAME_STATS;
}
s->lastTime = now;
}
stats.h:
#include <jni.h>
#define MAX_FRAME_STATS 120
#define MAX_PERIOD_MS 5000
typedef struct{
double renderTime;
double frameTime;
} FrameStats;
typedef struct{
double firstTime;
double lastTime;
double frameTime;
int firstFrame;
int numFrames;
FrameStats frames[ MAX_FRAME_STATS ];
char* dump;
} Stats;
extern double now_ms();
extern void stats_init(Stats *);
extern int stats_dump(Stats *);
extern void stats_startFrame(Stats *);
extern void stats_endFrame(Stats *);

Categories

Resources