TensorFlow C++ example for Android - android

I was looking into the examples provided in the TensorFlow git repository for Android devices. It uses Java interfaces as a wrapper for C++ API.  Are there any examples where I can use C++ API directly for initialization  of TensorFlow, loading the model and for inference and etc.?

Check out this repo and the following blog for a solution. These links will provide step by step instructions on how to use Tensorflow c++ API on Android. The idea is to create a dynamic library (.so file) that is Android friendly (i.e. does not include elements of Tensorflow which are desktop\gpu compatible only).

I wrote this code for Raspberry PI, but I believe it must be pretty much the same for Android:
tfbenchmark.h:
#ifndef TENSORFLOW_H
#define TENSORFLOW_H
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
class TensorFlowBenchmark
{
public:
TensorFlowBenchmark();
virtual ~TensorFlowBenchmark();
bool init();
bool run();
private:
std::unique_ptr<tensorflow::Session> session_;
};
#endif /* TENSORFLOW_H */
tfbenchmark.cpp:
#include "tfbenchmark.h"
#include <vector>
#include <fstream>
#include <chrono>
#include <ctime>
#include <cstddef>
#include <jpeglib.h>
#include <setjmp.h>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"
// These are all common classes it's handy to reference with no namespace.
using tensorflow::Flag;
using tensorflow::Tensor;
using tensorflow::TensorShape;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::int32;
const static string root_dir = ".";
const static string image = "../input.jpg";
const static string graph = "models/frozen_graph.pb";
const static int32 input_width = 224;
const static int32 input_height = 224;
const static int32 input_mean = 128;
const static int32 input_std = 128;
const static string input_layer = "x_input_pl";
const static string output_layer = "out/out";
const static int NUM_EVAL = 100;
const static int MAX_BATCH = 256;
template<class T>
void report_metrics(const std::vector<T>& v, int batch_size) {
double sum = std::accumulate(v.begin(), v.end(), 0.0);
double mean = sum / v.size();
LOG(INFO) << "Batch size = " << batch_size << ": "
<< mean/batch_size << "ms per image";
}
// Error handling for JPEG decoding.
void CatchError(j_common_ptr cinfo) {
(*cinfo->err->output_message)(cinfo);
jmp_buf* jpeg_jmpbuf = reinterpret_cast<jmp_buf*>(cinfo->client_data);
jpeg_destroy(cinfo);
longjmp(*jpeg_jmpbuf, 1);
}
// Decompresses a JPEG file from disk.
Status LoadJpegFile(string file_name, std::vector<tensorflow::uint8>* data,
int* width, int* height, int* channels) {
struct jpeg_decompress_struct cinfo;
FILE* infile;
JSAMPARRAY buffer;
int row_stride;
if ((infile = fopen(file_name.c_str(), "rb")) == NULL) {
LOG(ERROR) << "Can't open " << file_name;
return tensorflow::errors::NotFound("JPEG file ", file_name,
" not found");
}
struct jpeg_error_mgr jerr;
jmp_buf jpeg_jmpbuf; // recovery point in case of error
cinfo.err = jpeg_std_error(&jerr);
cinfo.client_data = &jpeg_jmpbuf;
jerr.error_exit = CatchError;
if (setjmp(jpeg_jmpbuf)) {
return tensorflow::errors::Unknown("JPEG decoding failed");
}
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, infile);
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
*width = cinfo.output_width;
*height = cinfo.output_height;
*channels = cinfo.output_components;
data->resize((*height) * (*width) * (*channels));
row_stride = cinfo.output_width * cinfo.output_components;
buffer = (*cinfo.mem->alloc_sarray)((j_common_ptr) & cinfo, JPOOL_IMAGE,
row_stride, 1);
while (cinfo.output_scanline < cinfo.output_height) {
tensorflow::uint8* row_address =
&((*data)[cinfo.output_scanline * row_stride]);
jpeg_read_scanlines(&cinfo, buffer, 1);
memcpy(row_address, buffer[0], row_stride);
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(infile);
return Status::OK();
}
// Given an image file name, read in the data, try to decode it as an image,
// resize it to the requested size, and then scale the values as desired.
Status FillTensorFromImageData(std::vector<tensorflow::uint8>& image_data,
const int batch_size, const int image_height,
const int image_width, const int image_channels,
std::vector<Tensor>* out_tensors) {
// In these loops, we convert the eight-bit data in the image into float,
// resize
// it using bilinear filtering, and scale it numerically to the float range
// that
// the model expects (given by input_mean and input_std).
tensorflow::Tensor image_tensor(
tensorflow::DT_FLOAT,
tensorflow::TensorShape(
{batch_size, image_height, image_width, image_channels}));
auto image_tensor_mapped = image_tensor.tensor<float, 4>();
LOG(INFO) << image_data.size() << "bytes in image_data";
tensorflow::uint8* in = image_data.data();
float* out = image_tensor_mapped.data();
for(int n = 0; n < batch_size; n++) {
for (int y = 0; y < image_height; ++y) {
tensorflow::uint8* in_row = in + (y * image_width * image_channels);
float* out_row = out + (n * image_height * image_width * image_channels) + (y * image_width * image_channels);
for (int x = 0; x < image_width; ++x) {
tensorflow::uint8* input_pixel = in_row + (x * image_channels);
float* out_pixel = out_row + (x * image_channels);
for (int c = 0; c < image_channels; ++c) {
out_pixel[c] =
static_cast<float>(input_pixel[c] - input_mean) / input_std;
}
}
}
}
out_tensors->push_back(image_tensor);
return Status::OK();
}
// Reads a model graph definition from disk, and creates a session object you
// can use to run it.
Status LoadGraph(string graph_file_name,
std::unique_ptr<tensorflow::Session>* session) {
tensorflow::GraphDef graph_def;
Status load_graph_status = ReadBinaryProto(tensorflow::Env::Default(),
graph_file_name, &graph_def);
if (!load_graph_status.ok()) {
return tensorflow::errors::NotFound("Failed to load compute graph at '",
graph_file_name, "'");
}
session->reset(tensorflow::NewSession(tensorflow::SessionOptions()));
Status session_create_status = (*session)->Create(graph_def);
if (!session_create_status.ok()) {
return session_create_status;
}
return Status::OK();
}
TensorFlowBenchmark::TensorFlowBenchmark() {}
TensorFlowBenchmark::~TensorFlowBenchmark() {}
bool TensorFlowBenchmark::init() {
// We need to call this to set up global state for TensorFlow.
int argc;
char** argv;
tensorflow::port::InitMain("benchmark", &argc, &argv);
string graph_path = tensorflow::io::JoinPath(root_dir, graph);
Status load_graph_status = LoadGraph(graph_path, &session_);
if (!load_graph_status.ok()) {
LOG(ERROR) << load_graph_status;
return false;
}
return true;
}
bool TensorFlowBenchmark::run() {
string image_path = tensorflow::io::JoinPath(root_dir, image);
std::vector<tensorflow::uint8> image_data;
int image_width;
int image_height;
int image_channels;
Status load_img_status = LoadJpegFile(image_path, &image_data, &image_width, &image_height,
&image_channels);
if(!load_img_status.ok()) {
LOG(ERROR) << load_img_status;
return false;
}
LOG(INFO) << "Loaded JPEG: " << image_width << "x" << image_height << "x"
<< image_channels;
for(int batch_size = 1; batch_size <= MAX_BATCH; batch_size <<= 1) {
LOG(INFO) << "Batch size " << batch_size;
std::vector<Tensor> resized_tensors;
Status read_tensor_status =
FillTensorFromImageData(image_data, batch_size, image_height, image_width,
image_channels, &resized_tensors);
if (!read_tensor_status.ok()) {
LOG(ERROR) << read_tensor_status;
return false;
}
const Tensor& resized_tensor = resized_tensors[0];
// Actually run the image through the model.
std::vector<Tensor> outputs;
std::vector<long> timings;
for (int i = 0; i < NUM_EVAL; ++i) {
auto start = std::chrono::system_clock::now();
Status run_status = session_->Run({{input_layer, resized_tensor}},
{output_layer}, {}, &outputs);
auto end = std::chrono::system_clock::now();
timings.push_back( std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count());
if (!run_status.ok()) {
LOG(ERROR) << "Running model failed: " << run_status;
return false;
}
}
report_metrics(timings, batch_size);
}
return true;
}

Related

Need to increment red pixel values in native c++

in android with jni i have a cpp code to change or increment red pixel values with the help of bitmap data passed from android
Java_com_journaldev_androidjnibasics_MainActivity_sendMyBitmap(JNIEnv *env, jobject thiz,
jobject bitmap) {
AndroidBitmapInfo info;
int ret;
if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
return NULL;
}
if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
return NULL;
}
//
//read pixels of bitmap into native memory :
//
void *bitmapPixels;
if ((ret = AndroidBitmap_lockPixels(env, bitmap, &bitmapPixels)) < 0) {
return NULL;
}
uint32_t *src = (uint32_t *) bitmapPixels;
uint32_t *tempPixels = new uint32_t[info.height * info.width];
int stride = info.stride;
int pixelsCount = info.height * info.width;
int x, y, red, green, blue;
for (y=0;y<info.height;y++) {
uint32_t * line = (uint32_t *)bitmapPixels;
for (x=0;x<info.width;x++) {
blue = (int) ((line[x] & 0xFF0000) >> 16);
green = (int)((line[x] & 0x00FF00) >> 8);
red = (int) (line[x] & 0x0000FF);
//just set it to all be red for testing
red = 255;
green = 0;
blue = 0;
//why is the image totally blue??
line[x] =
((blue<< 16) & 0xFF0000) |
((green << 8) & 0x00FF00) |
(red & 0x0000FF);
}
bitmapPixels = (char *)bitmapPixels + info.stride;
}
memcpy(tempPixels, src, sizeof(uint32_t) * pixelsCount);
AndroidBitmap_unlockPixels(env, bitmap);
//
//recycle bitmap - using bitmap.recycle()
//
jclass bitmapCls = env->GetObjectClass(bitmap);
jmethodID recycleFunction = env->GetMethodID(bitmapCls, "recycle", "()V");
if (recycleFunction == 0) {
return NULL;
}
env->CallVoidMethod(bitmap, recycleFunction);
//
//creating a new bitmap to put the pixels into it - using Bitmap Bitmap.createBitmap (int width, int height, Bitmap.Config config) :
//
jmethodID createBitmapFunction = env->GetStaticMethodID(bitmapCls, "createBitmap",
"(IILandroid/graphics/Bitmap$Config;)Landroid/graphics/Bitmap;");
jstring configName = env->NewStringUTF("ARGB_8888");
jclass bitmapConfigClass = env->FindClass("android/graphics/Bitmap$Config");
jmethodID valueOfBitmapConfigFunction = env->GetStaticMethodID(bitmapConfigClass, "valueOf",
"(Ljava/lang/String;)Landroid/graphics/Bitmap$Config;");
jobject bitmapConfig = env->CallStaticObjectMethod(bitmapConfigClass,
valueOfBitmapConfigFunction, configName);
jobject newBitmap = env->CallStaticObjectMethod(bitmapCls, createBitmapFunction, info.height,
info.width, bitmapConfig);
//
// putting the pixels into the new bitmap:
//
if ((ret = AndroidBitmap_lockPixels(env, newBitmap, &bitmapPixels)) < 0) {
return NULL;
}
uint32_t *newBitmapPixels = (uint32_t *) bitmapPixels;
int whereToPut = 0;
for (int x = info.width - 1; x >= 0; --x)
for (int y = 0; y < info.height; ++y) {
uint32_t pixel = tempPixels[info.width * y + x];
newBitmapPixels[whereToPut++] = pixel;
}
AndroidBitmap_unlockPixels(env, newBitmap);
delete[] tempPixels;
return newBitmap;
}
Here after this process, the image getting fully transparent or white colour. can anyone hep me out to do this. My aim is to change the value of R (red) pixel in this bitmap data. thanks in advance
//-------------------------------------------------------------------------------------------------
// header file byte_masks.h
// two constexpr functions that will write out the shift and mask functions at compile time
// from_byte<0> will be the same as (value & 0x000000FF) >> 0;
// from_byte<1> will be the same as (value & 0x0000FF00) >> 8;
// from_byte<2> will be the same as (value & 0x00FF0000) >> 16;
// from_byte<3> will be the same as (value & 0xFF000000) >> 24;
#pragma once
#include <cstdint>
template<size_t N>
constexpr auto from_byte(std::uint32_t value)
{
const std::uint32_t shift = 8 * N;
const std::uint32_t mask = 0xFF << shift;
std::uint32_t retval{ (value & mask) >> shift };
return static_cast<std::uint8_t>(retval);
}
// to_byte<1> will be the same as value << 8 etc...
template<size_t N>
constexpr auto to_byte(std::uint8_t value)
{
const std::uint32_t shift = 8 * N;
return static_cast<std::uint32_t>(value << shift);
}
//-------------------------------------------------------------------------------------------------
// header file color_t.h
#pragma once
#include <cstdint>
struct color_t
{
static color_t from_argb(std::uint32_t pixel);
static color_t from_bgra(std::uint32_t pixel);
std::uint32_t to_argb_value();
std::uint32_t to_bgra_value();
std::uint8_t alpha = 0;
std::uint8_t red = 0;
std::uint8_t green = 0;
std::uint8_t blue = 0;
};
//-------------------------------------------------------------------------------------------------
// source file color_t.cpp
#include<color_t.h>
#include <byte_masks.h>
// this is basically the logic you used reading the data as ARGB
// to create a color from an integer value
color_t color_t::from_argb(std::uint32_t pixel)
{
color_t color{};
color.alpha = from_byte<3>(pixel);
color.red = from_byte<2>(pixel);
color.green = from_byte<1>(pixel);
color.blue = from_byte<0>(pixel);
return color;
}
// But your bitmap data has a different order for alpha, red, green, blue!!!
// ANDROID_BITMAP_FORMAT_RGBA_8888
color_t color_t::from_bgra(std::uint32_t pixel)
{
color_t color{};
color.blue = from_byte<3>(pixel);
color.green = from_byte<2>(pixel);
color.red = from_byte<1>(pixel);
color.alpha = from_byte<0>(pixel);
return color;
}
std::uint32_t color_t::to_argb_value()
{
return (to_byte<3>(alpha) | to_byte<2>(red) | to_byte<1>(green) | to_byte<0>(blue));
}
std::uint32_t color_t::to_bgra_value()
{
return (to_byte<3>(blue) | to_byte<2>(green) | to_byte<1>(red) | to_byte<0>(alpha));
}
//-------------------------------------------------------------------------------------------------
// my main.cpp, but use the color_t functions from below in your code
#include <cassert>
#include <color_t.h>
int main()
{
// two lines just to simulate a bit of your code
std::uint32_t line[]{ 0x00000, 0x11223344 };
const size_t x = 1;
// now it's easy to get the color and change it.
// this will use the blue, green, red alpha order matching your
// ANDROID_BITMAP_FORMAT_RGBA_8888 format.
auto color = color_t::from_bgra(line[x]);
color.red = 255;
// also note that by splitting out code into smaller functions
// it becomes much easier to read (specially for other people)
line[x] = color.to_bgra_value();
assert(to_byte<1>(line[x]) == 255);
}
I see several mistakes in your code:
You read the bitmap as BGRA_8888 into tempPixels and use tempPixels as source for a new ARGB_8888 buffer. You should make sure both buffers have the same format OR flip the pixel component order.
The incoming bitmap has a stride (=length of a row in bytes) that may not be equal to 4 * width. This means you should multiply info.height with info.stride instead. Possibly multiplied by 4, I don't know if the stride is documented to be in pixels or in bytes.
As I said, the input pixel format is BGRA, but you completely ignore the A component. That makes the output fully transparent. I suggest using a struct { uint8_t b,g,r,a; } pixel to disassemble the pixels and manipulate individual elements instead.
Finally, is there a good reason you cannot manipulate the incoming Bitmap instead of creating a new one and making two copies?

Leaf Disease Detection and Recognition using OpenCV

can you help me with this.
I was tasked to create an application using the OpenCV and c++ that would take in an image input of a plant leaf. This application would detect possible symptoms of disease like black/grey/brown spots from the leaf, or blights, lesions and etc. Each characteristic of disease such as color of the spots represents different diseases. After detecting the possible symptoms, the application will match it to the collection of template images from the application's database and will output a possible best match.
What methods do I have to use on this? I've researched Histogram Matching and Keypoint and Descriptor Matching but I'm not sure which one will work best.
I have found sample code using SURF and FLANN, but I don't know if this would be enough:
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"
using namespace cv;
void readme();
/**
* #function main
* #brief Main function
*/
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance <= max(2*min_dist, 0.02) )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < (int)good_matches.size(); i++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey(0);
return 0;
}
/**
* #function readme
*/
void readme()
{ std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl; }
Here are my questions:
What method do I have to use? Histogram Matching, Keypoint/Descriptor Matching or?
If I use Keypoint/Descriptor matching, what algorithm is best alternative to SURF and FLANN since I will be implementing it ALSO on an android platform? Do I still have to perform thresholding or segmentation? Will it not remove important details such as the color, shape or etc.? Please guys, suggests some steps to do this.
I think this way should give you good results:
Training process.
Extract LBP descriptors for exery pixel of image (can be computed
for color images too).
Compute histograms of LBP descriptors for each training sample.
Train classifier using histograms as inputs and labels as outputs.
Prediction process:
Extract LBP descriptors for exery pixel of new image.
Compute histogram of LBP descriptors for this image.
Feed historgam to classifier -> get results.
I've successfully used feed forward neural network as classifier, for solving similar problem.
You may find this book useful: ISBN 978-0-85729-747-1 "Computer Vision Using Local Binary Patterns"
Try this (computes LBP descriptors, there is also function for computation of histogram):
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include "opencv2/nonfree/nonfree.hpp"
#include <limits>
using namespace cv;
class myLBP
{
public:
uchar lut[256];
uchar null;
int radius;
int maxTransitions;
bool rotationInvariant;
myLBP(int _radius=1,int _maxTransitions=8,bool _rotationInvariant=false)
{
radius=_radius;
maxTransitions=_maxTransitions;
rotationInvariant=_rotationInvariant;
bool set[256];
uchar uid = 0;
for (int i=0; i<256; i++)
{
if (numTransitions(i) <= maxTransitions)
{
int id;
if (rotationInvariant)
{
int rie = rotationInvariantEquivalent(i);
if (i == rie)
{
id = uid++;
}
else
{
id = lut[rie];
}
}
else
{
id = uid++;
}
lut[i] = id;
set[i] = true;
}
else
{
set[i] = false;
}
}
null = uid;
for (int i=0; i<256; i++)
if (!set[i])
{
lut[i] = null; // Set to null id
}
}
/* Returns the number of 0->1 or 1->0 transitions in i */
static int numTransitions(int i)
{
int transitions = 0;
int curParity = i%2;
for (int j=1; j<=8; j++)
{
int parity = (i>>(j%8)) % 2;
if (parity != curParity)
{
transitions++;
}
curParity = parity;
}
return transitions;
}
static int rotationInvariantEquivalent(int i)
{
int min = std::numeric_limits<int>::max();
for (int j=0; j<8; j++)
{
bool parity = i % 2;
i = i >> 1;
if (parity)
{
i+=128;
}
min = std::min(min, i);
}
return min;
}
void process(const Mat &src, Mat &dst) const
{
Mat m;
src.convertTo(m, CV_32F);
assert(m.isContinuous() && (m.channels() == 1));
Mat n(m.rows, m.cols, CV_8UC1);
n = null; // Initialize to NULL LBP pattern
const float *p = (const float*)m.ptr();
for (int r=radius; r<m.rows-radius; r++)
{
for (int c=radius; c<m.cols-radius; c++)
{
const float cval = (p[(r+0*radius)*m.cols+c+0*radius]);
n.at<uchar>(r, c) = lut[(p[(r-1*radius)*m.cols+c-1*radius] >= cval ? 128 : 0) |
(p[(r-1*radius)*m.cols+c+0*radius] >= cval ? 64 : 0) |
(p[(r-1*radius)*m.cols+c+1*radius] >= cval ? 32 : 0) |
(p[(r+0*radius)*m.cols+c+1*radius] >= cval ? 16 : 0) |
(p[(r+1*radius)*m.cols+c+1*radius] >= cval ? 8 : 0) |
(p[(r+1*radius)*m.cols+c+0*radius] >= cval ? 4 : 0) |
(p[(r+1*radius)*m.cols+c-1*radius] >= cval ? 2 : 0) |
(p[(r+0*radius)*m.cols+c-1*radius] >= cval ? 1 : 0)];
}
}
dst=n.clone();
}
/* Returns the number of 1 bits in i */
static int bitCount(int i)
{
int count = 0;
for (int j=0; j<8; j++)
{
count += (i>>j)%2;
}
return count;
}
void draw(const Mat &src, Mat &dst) const
{
static Mat hueLUT, saturationLUT, valueLUT;
if (!hueLUT.data)
{
const int NUM_COLORS = 10;
hueLUT.create(1, 256, CV_8UC1);
hueLUT.setTo(0);
uchar uid = 0;
for (int i=0; i<256; i++)
{
const int transitions = numTransitions(i);
int u2;
if (transitions <= 2)
{
u2 = uid++;
}
else
{
u2 = 58;
}
// Assign hue based on bit count
int color = bitCount(i);
if (transitions > 2)
{
color = NUM_COLORS-1;
}
hueLUT.at<uchar>(0, u2) = 255.0*(float)color/(float)NUM_COLORS;
}
saturationLUT.create(1, 256, CV_8UC1);
saturationLUT.setTo(255);
valueLUT.create(1, 256, CV_8UC1);
valueLUT.setTo(255.0*(3.0/4.0));
}
if (src.type() != CV_8UC1)
{
std::cout << "Expected 8UC1 source type.";
}
Mat hue, saturation, value;
LUT(src, hueLUT, hue);
LUT(src, saturationLUT, saturation);
LUT(src, valueLUT, value);
std::vector<Mat> mv;
mv.push_back(hue);
mv.push_back(saturation);
mv.push_back(value);
Mat coloredU2;
merge(mv, coloredU2);
cvtColor(coloredU2, dst, cv::COLOR_HSV2BGR);
}
};
void Hist(const Mat &src, Mat &dst,float max=256, float min=0,int dims=-1)
{
std::vector<Mat> mv;
split(src, mv);
Mat m(mv.size(), dims, CV_32FC1);
for (size_t i=0; i<mv.size(); i++)
{
int channels[] = {0};
int histSize[] = {dims};
float range[] = {min, max};
const float* ranges[] = {range};
Mat hist, chan = mv[i];
// calcHist requires F or U, might as well convert just in case
if (mv[i].depth() != CV_8U && mv[i].depth() != CV_32F)
{
mv[i].convertTo(chan, CV_32F);
}
calcHist(&chan, 1, channels, Mat(), hist, 1, histSize, ranges);
memcpy(m.ptr(i), hist.ptr(), dims * sizeof(float));
}
dst=m.clone();
}
int main(int argc, char* argv[])
{
cv::initModule_nonfree();
cv::namedWindow("result");
cv::Mat bgr_img = cv::imread("D:\\ImagesForTest\\lena.jpg");
if (bgr_img.empty())
{
exit(EXIT_FAILURE);
}
cv::Mat gray_img;
cv::cvtColor(bgr_img, gray_img, cv::COLOR_BGR2GRAY);
cv::normalize(gray_img, gray_img, 0, 255, cv::NORM_MINMAX);
myLBP lbp(1,2);
Mat lbp_img;
lbp.process(gray_img,lbp_img);
lbp.draw(lbp_img,bgr_img);
//for(int i=0;i<lbp_img.rows;++i)
imshow("result",bgr_img);
cv::waitKey();
return 0;
}

Writing to framebuffer directly on Android

I've a rooted phone with Android 4.2.2. I'd like to use minui API used (source here) in bootloader code to draw stuff on the screen. minui is much simpler than native OpenGL and I don't need any complex functionality exposed by OpenGL.
The problem is that I can't write stuff directly to fb0 device. FBIOPUT_VSCREENINFO fails for unknown reason.
How can I draw directly to fb0 on Android, or how can I use minui outside the bootloader mode?
device node:/dev/graphics/fb0
you can build in Android tree or use arm-linux-gcc.
arm-linux-gcc -D__ANDROID__ fb-test.c -static
likes a normal embeded linux, there is a sample:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <linux/fb.h>
#include <sys/mman.h>
struct fb_fix_screeninfo FixedInfo;
struct fb_var_screeninfo OrigVarInfo;
static int FrameBufferFD = -1;
void *FrameBuffer = (void *) -1;
#ifndef __ANDROID__
#define FRAMEBUFFER "/dev/fb0"
#else
#define FRAMEBUFFER "/dev/graphics/fb0"
#endif //__ANDROID__
void openFBDEV(void)
{
/* open the framebuffer device */
FrameBufferFD = open(FRAMEBUFFER, O_RDWR);
if (FrameBufferFD < 0)
{
fprintf(stderr, "Error opening %s\n", FRAMEBUFFER);
exit(1);
}
/* Get the fixed screen info */
if (ioctl(FrameBufferFD, FBIOGET_FSCREENINFO, &FixedInfo))
{
fprintf(stderr, "error: ioctl(FBIOGET_FSCREENINFO) failed\n");
exit(1);
}
/* get the variable screen info */
if (ioctl(FrameBufferFD, FBIOGET_VSCREENINFO, &OrigVarInfo))
{
fprintf(stderr, "error: ioctl(FBIOGET_VSCREENINFO) failed\n");
exit(1);
}
if (FixedInfo.visual != FB_VISUAL_TRUECOLOR
&& FixedInfo.visual != FB_VISUAL_DIRECTCOLOR)
{
fprintf(stderr,
"non-TRUE/DIRECT-COLOR visuals (0x%x) not supported by this demo.\n",
FixedInfo.visual);
exit(1);
}
/*
* fbdev says the frame buffer is at offset zero, and the mmio region
* is immediately after.
*/
/* mmap the framebuffer into our address space */
FrameBuffer = (void *) mmap(0, /* start */
FixedInfo.smem_len, /* bytes */
PROT_READ | PROT_WRITE, /* prot */
MAP_SHARED, /* flags */
FrameBufferFD, /* fd */
0 /* offset */);
if (FrameBuffer == (void *) -1)
{
fprintf(stderr, "error: unable to mmap framebuffer\n");
exit(1);
}
}
void closeFBDEV(void)
{
munmap(FrameBuffer, FixedInfo.smem_len);
close(FrameBufferFD);
}
int main()
{
openFBDEV();
fprintf(stderr, "openFBDEV finish\n");
memset(FrameBuffer, 128, FixedInfo.smem_len);
sleep(5);
closeFBDEV();
fprintf(stderr, "closeFBDEV finish\n");
return 0;
}
Source code,test it works in Qualcomm platform MSM89xx:
panel_test.c
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <linux/fb.h>
#include <sys/mman.h>
#include <stdlib.h>
#include "yellow_face.zif"
int main()
{
int fbfd = 0;
struct fb_var_screeninfo vinfo;
struct fb_fix_screeninfo finfo;
struct fb_cmap cmapinfo;
long int screensize = 0;
char *fbp = 0;
int x = 0, y = 0;
long int location = 0;
int b,g,r;
// Open the file for reading and writing
fbfd = open("/dev/graphics/fb0", O_RDWR,0); // 打开Frame Buffer设备
if (fbfd < 0) {
printf("Error: cannot open framebuffer device.%x\n",fbfd);
exit(1);
}
printf("The framebuffer device was opened successfully.\n");
// Get fixed screen information
if (ioctl(fbfd, FBIOGET_FSCREENINFO, &finfo)) { // 获取设备固有信息
printf("Error reading fixed information.\n");
exit(2);
}
printf("\ntype:0x%x\n", finfo.type ); // FrameBuffer 类型,如0为象素
printf("visual:%d\n", finfo.visual ); // 视觉类型:如真彩2,伪彩3
printf("line_length:%d\n", finfo.line_length ); // 每行长度
printf("\nsmem_start:0x%lx,smem_len:%u\n", finfo.smem_start, finfo.smem_len ); // 映象RAM的参数
printf("mmio_start:0x%lx ,mmio_len:%u\n", finfo.mmio_start, finfo.mmio_len );
// Get variable screen information
if (ioctl(fbfd, FBIOGET_VSCREENINFO, &vinfo)) { // 获取设备可变信息
printf("Error reading variable information.\n");
exit(3);
}
printf("%dx%d, %dbpp,xres_virtual=%d,yres_virtual=%dvinfo.xoffset=%d,vinfo.yoffset=%d\n", vinfo.xres, vinfo.yres, vinfo.bits_per_pixel,vinfo.xres_virtual,vinfo.yres_virtual,vinfo.xoffset,vinfo.yoffset);
screensize = finfo.line_length * vinfo.yres_virtual;
// Map the device to memory 通过mmap系统调用将framebuffer内存映射到用户空间,并返回映射后的起始地址
fbp = (char *)mmap(0, screensize, PROT_READ | PROT_WRITE, MAP_SHARED,fbfd, 0);
if ((int)fbp == -1) {
printf("Error: failed to map framebuffer device to memory.\n");
exit(4);
}
printf("The framebuffer device was mapped to memory successfully.\n");
/***************exampel 1**********************/
b = 10;
g = 100;
r = 100;
for ( y = 0; y < 340; y++ )
for ( x = 0; x < 420; x++ ) {
location = (x+100) * (vinfo.bits_per_pixel/8) +
(y+100) * finfo.line_length;
if ( vinfo.bits_per_pixel == 32 ) { //
*(fbp + location) = b; // Some blue
*(fbp + location + 1) = g; // A little green
*(fbp + location + 2) = r; // A lot of red
*(fbp + location + 3) = 0; // No transparency
}
}
/*****************exampel 1********************/
/*****************exampel 2********************/
unsigned char *pTemp = (unsigned char *)fbp;
int i, j;
//起始坐标(x,y),终点坐标(right,bottom)
x = 400;
y = 400;
int right = 700;//vinfo.xres;
int bottom = 1000;//vinfo.yres;
for(i=y; i< bottom; i++)
{
for(j=x; j<right; j++)
{
unsigned short data = yellow_face_data[(((i-y) % 128) * 128) + ((j-x) %128)];
pTemp[i*finfo.line_length + (j*4) + 2] = (unsigned char)((data & 0xF800) >> 11 << 3);
pTemp[i*finfo.line_length + (j*4) + 1] = (unsigned char)((data & 0x7E0) >> 5 << 2);
pTemp[i*finfo.line_length + (j*4) + 0] = (unsigned char)((data & 0x1F) << 3);
}
}
/*****************exampel 2********************/
//note:vinfo.xoffset =0 vinfo.yoffset =0 否则FBIOPAN_DISPLAY不成功
if (ioctl(fbfd, FBIOPAN_DISPLAY, &vinfo)) {
printf("Error FBIOPAN_DISPLAY information.\n");
exit(5);
}
sleep(10);
munmap(fbp,finfo.smem_len);//finfo.smem_len == screensize == finfo.line_length * vinfo.yres_virtual
close(fbfd);
return 0;
}
Android.mk
# Copyright 2006-2014 The Android Open Source Project
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= panel_test.c
LOCAL_SHARED_LIBRARIES := $(common_libs) libqdutils libdl liblog libbase libcutils
LOCAL_C_INCLUDES := $(common_includes) $(kernel_includes)
LOCAL_ADDITIONAL_DEPENDENCIES := $(common_deps) $(kernel_deps)
LOCAL_MODULE := panel_test
LOCAL_CFLAGS := -Werror
include $(BUILD_EXECUTABLE)
include $(call first-makefiles-under,$(LOCAL_PATH))
yellow_face.zif
yellow_face.zif

How to capture screenshot from FrameBuffer?

I want to capture screenshot from FrameBuffer in Android, I use the code below, but just got a fuzzy image.I contains 3 main steps. first, read data and info from FrameBuffer, second, convert the raw data to 24 bits, third, construct the BITMAP structs and write to bmp file. But I got fuzzy images, does anyone could help?I will appreciate it.
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <linux/fb.h>
#include <sys/ioctl.h>
typedef unsigned char BYTE;
typedef unsigned short WORD;
typedef unsigned int DWORD;
typedef long LONG;
typedef struct tagBITMAPFILEHEADER {
WORD bfType;
DWORD bfSize;
WORD bfReserved1;
WORD bfReserved2;
DWORD bfOffBits;
}__attribute__((packed)) BITMAPFILEHEADER, *PBITMAPFILEHEADER;
typedef struct tagBITMAPINFOHEADER {
DWORD biSize;
LONG biWidth;
LONG biHeight;
WORD biPlanes;
WORD biBitCount;
DWORD biCompression;
DWORD biSizeImage;
LONG biXPelsPerMeter;
LONG biYPelsPerMeter;
DWORD biClrUsed;
DWORD biClrImportant;
}__attribute__((packed)) BITMAPINFOHEADER, *PBITMAPINFOHEADER;
typedef struct tagRGBQUAD {
BYTE rgbBlue;
BYTE rgbGreen;
BYTE rgbRed;
BYTE rgbReserved;
}__attribute__((packed)) RGBQUAD;
#define FRAME_BUFFER_PATH "/dev/graphics/fb0"
int take_screenshot(char *path)
{
int i;
int img_fd, fb_fd;
int data_size;
char *img_buf;
struct fb_var_screeninfo var_info;
struct fb_fix_screeninfo fix_info;
BITMAPFILEHEADER file_head;
BITMAPINFOHEADER info_head;
//RGBQUAD rgb_quad;
/*open files*/
fb_fd = open(FRAME_BUFFER_PATH, O_RDWR);
if (img_fd < 0) {
perror("open framebuff");
return -1;
}
if (ioctl(fb_fd, FBIOGET_VSCREENINFO, &var_info) < 0) {
perror("ioctl FBIOGET_VSCREENINFO");
close(img_fd);
return 0;
}
printf("xres %d, yres %d\n", var_info.xres, var_info.yres);
if (ioctl(fb_fd, FBIOGET_FSCREENINFO, &fix_info)){
debug("Error reading fixed information\n");
close(img_fd);
return 0;
}
img_fd = open(path, O_RDWR | O_CREAT, 0644);
if (img_fd < 0)
{
perror("open image");
close(img_fd);
return -1;
}
data_size = var_info.xres*var_info.yres*(var_info.bits_per_pixel/8);
/*initialize bmp structs*/
file_head.bfType = 0x4d42;
file_head.bfSize = sizeof(file_head) + sizeof(info_head) + data_size;
file_head.bfReserved1 = 0;
file_head.bfReserved2 = 0;
file_head.bfOffBits = sizeof(file_head) + sizeof(info_head);
info_head.biSize = sizeof(info_head);
info_head.biWidth = var_info.xres;
info_head.biHeight = -var_info.yres;
info_head.biPlanes = 0;
info_head.biBitCount = 24;
info_head.biCompression = 0;
info_head.biSizeImage = data_size;
info_head.biXPelsPerMeter = 3780;
info_head.biYPelsPerMeter = 3780;
info_head.biClrUsed = 0;
info_head.biClrImportant = 0;
img_buf = (char *)malloc(data_size);
if (img_buf == NULL)
{
printf("malloc failed!\n");
close(fb_fd);
close(img_fd);
return -1;
}
/*read img data and */
read(fb_fd, img_buf, data_size);
write(img_fd, &file_head, sizeof(file_head));
write(img_fd, &info_head, sizeof(info_head));
/*********************/
int w, h;
int depth;
unsigned short *bits;
w = var_info.xres;
h = var_info.yres;
depth = var_info.bits_per_pixel;
uint8_t *rgb24;
if (depth == 16) {
rgb24 = (uint8_t *)malloc(w * h * 3);
int i = 0;
for ( ; i < w*h; i++) {
uint16_t pixel16 = ((uint16_t *)img_buf)[i];
// RRRRRGGGGGGBBBBBB -> RRRRRRRRGGGGGGGGBBBBBBBB
// in rgb24 color max is 2^8 per channel (*255/32 *255/64 *255/32)
rgb24[3*i+0] = (255*(pixel16 & 0x001F))/ 32; //Blue
rgb24[3*i+1] = (255*((pixel16 & 0x07E0) >> 5))/64; //Green
rgb24[3*i+2] = (255*((pixel16 & 0xF800) >> 11))/32; //Red
}
} else if (depth == 24) {
rgb24 = (uint8_t *)img_buf;
} else if (depth == 32) {
//skip transparency channel
rgb24 = (uint8_t *) malloc(w * h * 3);
int i=0;
for ( ; i <w*h; i++) {
uint32_t pixel32 = ((uint32_t *)img_buf)[i];
// in rgb24 color max is 2^8 per channel
rgb24[3*i+2] = pixel32 & 0x000000FF; //Blue
rgb24[3*i+1] = (pixel32 & 0x0000FF00) >> 8; //Green
rgb24[3*i+0] = (pixel32 & 0x00FF0000) >> 16; //Red
}
} else {
};
write(img_fd, rgb24, w*h*3);
close(fb_fd);
close(img_fd);
return 0;
}
Sometime the framebuffer's row size will be larger than the resolution. So you shouldn't use the xres to determine where the next line begins. There is a total memory size attribute saved in the fixed info, so you could divide it with the yres. There is also a row-stride attribute but I'm not sure whether it works. Maybe you could try it.

How can I get rendered frames statistics (drawn/dropped) from StageFright Media Framework?

I'm very newbie in Android world and I have to write an streaming video quality checker application on Android. I have to use the native StageFright media framework to play videos. As far as I understand there is an native API of render statistics, but I need advice how I can get it. Thank you.
There is an ADB command to print playback framerate.
Procedure
Open console on windows (or linux) on the host. Make sure that required drivers have been installed for USB connectivity with the device (android phone or board)
Run the following commands
$> adb kill-server
$> adb shell setprop debug.video.showfps 1
Run the video playback. If the video is being run using Android Media player stack, then you will see prints reporting frame rate achieved.
You're welcome to use this as well, call it at the beginning and end of each frame rendered. It's a slightly altered version of some sample code from the NDK:
stats.c:
#include <sys/time.h>
#include <string.h>
#include <android/log.h>
#include <stdio.h>
#include "stats.h"
#define LOG_TAG "[STATS]"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define LOGW(...) __android_log_print(ANDROID_LOG_WARN,LOG_TAG,__VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
#define STATS_DUMP(...) __android_
double now_ms()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec*1000. + tv.tv_usec/1000.;
}
void stats_init(Stats* s)
{
s->lastTime = now_ms();
s->firstTime = 0.;
s->firstFrame = 0;
s->numFrames = 0;
s->dump = malloc(128);
memset(s->dump,0,128);
}
void stats_startFrame(Stats* s)
{
s->frameTime = now_ms();
}
void stats_endFrame(Stats* s)
{
double now = now_ms();
double renderTime = now - s->frameTime;
double frameTime = now - s->lastTime;
int nn;
if (now - s->firstTime >= MAX_PERIOD_MS) {
if (s->numFrames > 0) {
double renderTime = now - s->frameTime;
double frameTime = now - s->lastTime;
int nn;
double minRender, maxRender, avgRender;
double minFrame, maxFrame, avgFrame;
int count;
nn = s->firstFrame;
minRender = maxRender = avgRender = s->frames[nn].renderTime;
minFrame = maxFrame = avgFrame = s->frames[nn].frameTime;
for (count = s->numFrames; count > 0; count-- ) {
nn += 1;
if (nn >= MAX_FRAME_STATS)
nn -= MAX_FRAME_STATS;
double render = s->frames[nn].renderTime;
if (render < minRender) minRender = render;
if (render > maxRender) maxRender = render;
double frame = s->frames[nn].frameTime;
if (frame < minFrame) minFrame = frame;
if (frame > maxFrame) maxFrame = frame;
avgRender += render;
avgFrame += frame;
}
avgRender /= s->numFrames;
avgFrame /= s->numFrames;
sprintf(s->dump,"Frames per second - [AVG:%.1f] [MIN:%.1f] [MAX:%.1f]Rendering time ms - [AVG:%.1f] [MIN:%.1f] [MAX:%.1f]", 1000./avgFrame, 1000./maxFrame, 1000./minFrame, avgRender, minRender, maxRender);
//LOGI("Frames per second - [AVG:%.1f] [MIN:%.1f] [MAX:%.1f]Rendering time ms - [AVG:%.1f] [MIN:%.1f] [MAX:%.1f]", 1000./avgFrame, 1000./maxFrame, 1000./minFrame, avgRender, minRender, maxRender);
}
s->numFrames = 0;
s->firstFrame = 0;
s->firstTime = now;
}
nn = s->firstFrame + s->numFrames;
if (nn >= MAX_FRAME_STATS)
nn -= MAX_FRAME_STATS;
s->frames[nn].renderTime = renderTime;
s->frames[nn].frameTime = frameTime;
if (s->numFrames < MAX_FRAME_STATS) {
s->numFrames += 1;
} else {
s->firstFrame += 1;
if (s->firstFrame >= MAX_FRAME_STATS)
s->firstFrame -= MAX_FRAME_STATS;
}
s->lastTime = now;
}
stats.h:
#include <jni.h>
#define MAX_FRAME_STATS 120
#define MAX_PERIOD_MS 5000
typedef struct{
double renderTime;
double frameTime;
} FrameStats;
typedef struct{
double firstTime;
double lastTime;
double frameTime;
int firstFrame;
int numFrames;
FrameStats frames[ MAX_FRAME_STATS ];
char* dump;
} Stats;
extern double now_ms();
extern void stats_init(Stats *);
extern int stats_dump(Stats *);
extern void stats_startFrame(Stats *);
extern void stats_endFrame(Stats *);

Categories

Resources