I am implementing the following project:
https://github.com/matthill/darwinwallet
I am getting the following error when I am executing a OpenCV android project in Eclipse:
jni/jni_recognizer.cpp:2:33: fatal error: opencv2/core/core.hpp: No such file or directory
#include <opencv2/core/core.hpp>
^
compilation terminated.
make.exe: *** [obj/local/armeabi-v7a/objs/native_wallet/jni_recognizer.o] Error 1
in my jni/include folder in Eclipse,there is no openCV files.There are only NDK files, so, maybe that is the issue. SO, please tell me how to add openCV files in the include folder.
This is my recognizer.cpp file
`
#include <jni.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <vector>
#include "NativeVision/vision.h"
#include <time.h>
#include <android/log.h>
using namespace std;
using namespace cv;
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, "JNI_DEBUGGING", __VA_ARGS__)
double diffclock(clock_t clock1,clock_t clock2)
{
double diffticks=clock2-clock1;
double diffms=(diffticks*1000)/ CLOCKS_PER_SEC;
return diffms;
}
static Ptr<ORB> detector;
static Ptr<DescriptorMatcher> descriptorMatcher;
static vector<Mat> trainImages;
static bool training_complete = false;
static vector<string> billMapping;
extern "C" {
JNIEXPORT void JNICALL Java_com_ndu_mobile_darwinwallet_Recognizer_nvInitialize(JNIEnv* env, jobject thiz)
{
LOGD( "Started nvInitialize" );
detector = getQueryDetector();
descriptorMatcher = getMatcher();
LOGD( "Finished nvInitialize" );
}
}
extern "C" {
JNIEXPORT void JNICALL Java_com_ndu_mobile_darwinwallet_Recognizer_nvResetTrainedDatabase(JNIEnv* env, jobject thiz)
{
LOGD( "Started nvResetTrainedDatabase" );
training_complete = false;
descriptorMatcher = getMatcher();
trainImages.clear();
billMapping.clear();
LOGD( "Finished nvResetTrainedDatabase" );
}
}
extern "C" {
JNIEXPORT void JNICALL Java_com_ndu_mobile_darwinwallet_Recognizer_nvTrainImage(JNIEnv* env, jobject thiz, jstring billname, jstring billpath)
{
//char* _imgBytes = (char*) env->GetPrimitiveArrayCritical(imgBytes, 0);
const char * _billpath = env->GetStringUTFChars(billpath, 0);
const char * _billname = env->GetStringUTFChars(billname, 0);
LOGD( "Started nvTrainImage" );
std::ostringstream out;
//std::ostringstream out;
//out << " : billname: " << _billname << ": BILLPATH: " << billpath << endl;
LOGD( out.str().c_str() );
//LOGD( "nvTrainImage: 1" );
//Mat mgray(1, bytelength, CV_8U, (unsigned char *)_imgBytes);
//LOGD( "nvTrainImage: 2" );
Mat img = imread(_billpath, 0);
//Mat img = imread("/sdcard/wallet/us/100b/full_pic.jpg", 0);
//LOGD( "nvTrainImage: 3" );
Mat trainData = trainImage( img, detector, descriptorMatcher );
out << "nvTrainImage: " << _billpath << " (" << trainData.rows << " x " << trainData.cols << ")" << endl;
LOGD( out.str().c_str() );
trainImages.push_back(trainData);
string billstr(_billname);
billMapping.push_back(billstr);
LOGD( "Finished nvTrainImage" );
env->ReleaseStringUTFChars(billpath, _billpath);
env->ReleaseStringUTFChars(billname, _billname);
//env->ReleasePrimitiveArrayCritical(imgBytes, _imgBytes, 0);
}
}
extern "C" {
JNIEXPORT void JNICALL Java_com_ndu_mobile_darwinwallet_Recognizer_nvFinalizeTraining(JNIEnv* env, jobject thiz)
{
LOGD( "Started nvFinalizeTraining" );
descriptorMatcher->add(trainImages);
descriptorMatcher->train();
training_complete = true;
LOGD( "Finished nvFinalizeTraining" );
}
}
extern "C" {
JNIEXPORT jstring JNICALL Java_com_ndu_mobile_darwinwallet_Recognizer_nvRecognize(JNIEnv* env, jobject thiz, jint width, jint height, jbyteArray yuv)
{
jbyte* _yuv = env->GetByteArrayElements(yuv, 0);
//jint* _bgra = env->GetIntArrayElements(bgra, 0);
LOGD( "Started nvFindFeatures" );
jstring response = env->NewStringUTF("");
if (training_complete == true)
{
clock_t begin;
clock_t end;
//Mat myuv(height + height/2, width, CV_8UC1, (unsigned char *)_yuv);
//Mat mbgra(height, width, CV_8UC4);
Mat mgray(height, width, CV_8UC1, (unsigned char *)_yuv);
//Mat myuv(width, 1, CV_8U, (unsigned char *)_yuv);
//Mat mgray = imdecode(myuv, 0);
//Please make attention about BGRA byte order
//ARGB stored in java as int array becomes BGRA at native level
//cvtColor(myuv, mbgra, CV_YUV420sp2BGR, 4);
vector<KeyPoint> v;
//FastFeatureDetector detector(50);
//detector.detect(mgray, v);
float divisor = 1;
if (height < width)
{
divisor = (((double) height) / 240);
}
else
{
divisor = (((double) width) / 240);
}
if (divisor == 0)
divisor = 1;
Size idealSize(width/divisor, height/divisor);
Mat mgray_small;
resize(mgray, mgray_small, idealSize);
Mat descriptors;
vector<DMatch> matches;
begin=clock();
//detector->detect(mgray_small, v);
//descriptorExtractor->compute( mgray_small, v, descriptors );
//surfStyleMatching( descriptorMatcher, descriptors, matches );
Mat* dummy;
//imwrite("/sdcard/wallet_debug.jpg", mgray_small );
bool debug_on = true;
int debug_matches[billMapping.size()];
RecognitionResult result = recognize( mgray_small, false, dummy, detector, descriptorMatcher, billMapping,
debug_on, debug_matches);
end=clock();
std::ostringstream out;
out << "time: " << diffclock(begin, end) << " ms | matches: " << matches.size() << endl;
if (debug_on)
{
for (int k = 0; k < billMapping.size(); k++)
out << " --" << billMapping[k] << " : " << debug_matches[k] << endl;
}
out << "orig_width: " << width << "orig_height: " << height << endl;
out << "divisor: " << divisor << endl;
//LOGD( (char*) out.str().c_str());
if (result.haswinner == false)
out << "No winner frown emoticon" << endl;
else
{
out << "Big Winner! " << result.winner << " : " << result.confidence << endl;
std::ostringstream responsetext;
responsetext << result.winner << "," << result.confidence;
response = env->NewStringUTF(responsetext.str().c_str());
}
LOGD( (char*) out.str().c_str());
//for( size_t i = 0; i < v.size(); i++ )
// circle(mbgra, Point(v[i].pt.x, v[i].pt.y), 10, Scalar(0,0,255,255));
}
LOGD( "Finished nvFindFeatures" );
//env->ReleaseIntArrayElements(bgra, _bgra, 0);
env->ReleaseByteArrayElements(yuv, _yuv, 0);
return response;
}
}
`
And this is my android.mk file:
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
OPENCV_CAMERA_MODULES:= off
#OPENCV_MK_PATH:D:\FYP\darwinwallet-master\OpenCV-2.4.9-android-sdk\sdk\native\jni
OPENCV_MK_PATH:D:\FYP\darwinwallet-master\OpenCV-2.4.9-android-sdk\sdk\native\jni\OpenCV.mk
OPENCV_LIB_TYPE:=STATIC
OPENCV_INSTALL_MODULES:=on
include $(OPENCV_MK_PATH)
#Profiler
#-include android-ndk-profiler.mk
#include ../includeOpenCV.mk
#ifeq ("$(wildcard $(OPENCV_MK_PATH))","")
#try to load OpenCV.mk from default install location
#include $(TOOLCHAIN_PREBUILT_ROOT)/user/share/OpenCV/OpenCV.mk
#else
#include $(OPENCV_MK_PATH)
#endif
#LOCAL_C_INCLUDES:=D:\FYP\darwinwallet-master\OpenCV-2.4.9-android-sdk\sdk\native\jni\include
LOCAL_MODULE:=native_wallet
LOCAL_SRC_FILES:=jni_recognizer.cpp NativeVision\vision.cpp
LOCAL_CFLAGS=-ffast-math -O3 -funroll-loops
#LOCAL_CFLAGS=-O3 -funroll-loops
LOCAL_LDLIBS+=-llog -ldl
#Profiling
#LOCAL_CFLAGS:=-pg
#LOCAL_STATIC_LIBRARIES:=andprof
include $(BUILD_SHARED_LIBRARY)
There is a character missing
OPENCV_MK_PATH:D…
should read
OPENCV_MK_PATH:=D…
Related
I'm trying the use the OpenSSL Rand library with DRBG in an android app, this library is implemented in native code of NDK. At starting, the OpenSSL works fine but in many times the app crashed and don't show any throw message. Here is the only error message that showwing:
A/libc: Fatal signal 7 (SIGBUS), code 1 (BUS_ADRALN), fault addr 0xb1aedca6f2d64adf in tid 22101 (RenderThread), pid 22075 (android.example)
My code is the follow:
libnative.cpp
#include <jni.h>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <cassert>
#include <openssl/rand.h>
#include <openssl/rand_drbg.h>
#include <android/log.h>
#include <future>
#ifndef TAG
#define TAG "OpenSslApi"
#endif
#ifndef DEFAULT_VALUE_ERROR
#define DEFAULT_VALUE_ERROR 0
#endif
void thread_handler(union sigval sv) {}
extern "C"
JNIEXPORT void JNICALL
Java_com_android_random_OpenSslApi_initDrbgRandom(
JNIEnv * env,
jclass clazz) {
RAND_DRBG * randDrbgInstance = RAND_DRBG_new(NID_aes_256_ctr, RAND_DRBG_FLAG_CTR_NO_DF, nullptr);
RAND_DRBG_instantiate(randDrbgInstance, nullptr, 0);
RAND_DRBG_set_reseed_time_interval(randDrbgInstance, 0);
RAND_DRBG_set_reseed_interval(randDrbgInstance, 0);
}
std::pair < jint * , jint > generateRandomIntDrbg(jint * secureRandom, jint sizeKey) {
jint myStatus = RAND_DRBG_bytes(
RAND_DRBG_get0_public(),
(unsigned char * ) secureRandom,
sizeKey * sizeof(int)
);
return std::make_pair(secureRandom, myStatus);
}
extern "C"
JNIEXPORT jint JNICALL
Java_com_android_random_OpenSslApi_intDrbgGenerateSecureRandom(
JNIEnv * env,
jclass clazz,
jintArray empty_array,
jint size_key) {
struct sigevent sev {};
timer_t timerid;
memset( & sev, 0, sizeof(sev));
sev.sigev_notify = SIGEV_THREAD;
sev.sigev_notify_function = & thread_handler;
sev.sigev_value.sival_ptr = & timerid;
timer_create(CLOCK_MONOTONIC, & sev, & timerid);
JavaVM * javaVm = nullptr;
env -> GetJavaVM( & javaVm);
javaVm -> AttachCurrentThread( & env, (void ** ) & env);
jintArray array = env -> NewIntArray(size_key);
if (array == nullptr) {
return DEFAULT_VALUE_ERROR;
}
jint * secureRandomIntArray = env -> GetIntArrayElements(array, nullptr);
if (secureRandomIntArray == nullptr) {
return DEFAULT_VALUE_ERROR;
}
std::future < std::pair < jint * , jint >> futureIntRandom = std::async (generateRandomIntDrbg, secureRandomIntArray, size_key);
std::pair < jint * , jint > result = futureIntRandom.get();
jint * resultSecureRandom = std::get < jint * > (result);
if (resultSecureRandom == nullptr) {
return DEFAULT_VALUE_ERROR;
}
memcpy(secureRandomIntArray, empty_array, size_key);
env -> ReleaseIntArrayElements(empty_array, secureRandomIntArray, 0);
return std::get < jint > (result);
}
OpenSslApi.java
static {
System.loadLibrary("libnative");
}
public OpenSslApi() {
initDrbgRandom();
}
public static native void initDrbgRandom();
public static native int intDrbgGenerateSecureRandom(
int[] emptyArray,
final int sizeKey
);
Thanks for either suggestions about the solution of this error.
I was looking into the examples provided in the TensorFlow git repository for Android devices. It uses Java interfaces as a wrapper for C++ API. Are there any examples where I can use C++ API directly for initialization of TensorFlow, loading the model and for inference and etc.?
Check out this repo and the following blog for a solution. These links will provide step by step instructions on how to use Tensorflow c++ API on Android. The idea is to create a dynamic library (.so file) that is Android friendly (i.e. does not include elements of Tensorflow which are desktop\gpu compatible only).
I wrote this code for Raspberry PI, but I believe it must be pretty much the same for Android:
tfbenchmark.h:
#ifndef TENSORFLOW_H
#define TENSORFLOW_H
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
class TensorFlowBenchmark
{
public:
TensorFlowBenchmark();
virtual ~TensorFlowBenchmark();
bool init();
bool run();
private:
std::unique_ptr<tensorflow::Session> session_;
};
#endif /* TENSORFLOW_H */
tfbenchmark.cpp:
#include "tfbenchmark.h"
#include <vector>
#include <fstream>
#include <chrono>
#include <ctime>
#include <cstddef>
#include <jpeglib.h>
#include <setjmp.h>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/graph/default_device.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/stringpiece.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/command_line_flags.h"
// These are all common classes it's handy to reference with no namespace.
using tensorflow::Flag;
using tensorflow::Tensor;
using tensorflow::TensorShape;
using tensorflow::Status;
using tensorflow::string;
using tensorflow::int32;
const static string root_dir = ".";
const static string image = "../input.jpg";
const static string graph = "models/frozen_graph.pb";
const static int32 input_width = 224;
const static int32 input_height = 224;
const static int32 input_mean = 128;
const static int32 input_std = 128;
const static string input_layer = "x_input_pl";
const static string output_layer = "out/out";
const static int NUM_EVAL = 100;
const static int MAX_BATCH = 256;
template<class T>
void report_metrics(const std::vector<T>& v, int batch_size) {
double sum = std::accumulate(v.begin(), v.end(), 0.0);
double mean = sum / v.size();
LOG(INFO) << "Batch size = " << batch_size << ": "
<< mean/batch_size << "ms per image";
}
// Error handling for JPEG decoding.
void CatchError(j_common_ptr cinfo) {
(*cinfo->err->output_message)(cinfo);
jmp_buf* jpeg_jmpbuf = reinterpret_cast<jmp_buf*>(cinfo->client_data);
jpeg_destroy(cinfo);
longjmp(*jpeg_jmpbuf, 1);
}
// Decompresses a JPEG file from disk.
Status LoadJpegFile(string file_name, std::vector<tensorflow::uint8>* data,
int* width, int* height, int* channels) {
struct jpeg_decompress_struct cinfo;
FILE* infile;
JSAMPARRAY buffer;
int row_stride;
if ((infile = fopen(file_name.c_str(), "rb")) == NULL) {
LOG(ERROR) << "Can't open " << file_name;
return tensorflow::errors::NotFound("JPEG file ", file_name,
" not found");
}
struct jpeg_error_mgr jerr;
jmp_buf jpeg_jmpbuf; // recovery point in case of error
cinfo.err = jpeg_std_error(&jerr);
cinfo.client_data = &jpeg_jmpbuf;
jerr.error_exit = CatchError;
if (setjmp(jpeg_jmpbuf)) {
return tensorflow::errors::Unknown("JPEG decoding failed");
}
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, infile);
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
*width = cinfo.output_width;
*height = cinfo.output_height;
*channels = cinfo.output_components;
data->resize((*height) * (*width) * (*channels));
row_stride = cinfo.output_width * cinfo.output_components;
buffer = (*cinfo.mem->alloc_sarray)((j_common_ptr) & cinfo, JPOOL_IMAGE,
row_stride, 1);
while (cinfo.output_scanline < cinfo.output_height) {
tensorflow::uint8* row_address =
&((*data)[cinfo.output_scanline * row_stride]);
jpeg_read_scanlines(&cinfo, buffer, 1);
memcpy(row_address, buffer[0], row_stride);
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(infile);
return Status::OK();
}
// Given an image file name, read in the data, try to decode it as an image,
// resize it to the requested size, and then scale the values as desired.
Status FillTensorFromImageData(std::vector<tensorflow::uint8>& image_data,
const int batch_size, const int image_height,
const int image_width, const int image_channels,
std::vector<Tensor>* out_tensors) {
// In these loops, we convert the eight-bit data in the image into float,
// resize
// it using bilinear filtering, and scale it numerically to the float range
// that
// the model expects (given by input_mean and input_std).
tensorflow::Tensor image_tensor(
tensorflow::DT_FLOAT,
tensorflow::TensorShape(
{batch_size, image_height, image_width, image_channels}));
auto image_tensor_mapped = image_tensor.tensor<float, 4>();
LOG(INFO) << image_data.size() << "bytes in image_data";
tensorflow::uint8* in = image_data.data();
float* out = image_tensor_mapped.data();
for(int n = 0; n < batch_size; n++) {
for (int y = 0; y < image_height; ++y) {
tensorflow::uint8* in_row = in + (y * image_width * image_channels);
float* out_row = out + (n * image_height * image_width * image_channels) + (y * image_width * image_channels);
for (int x = 0; x < image_width; ++x) {
tensorflow::uint8* input_pixel = in_row + (x * image_channels);
float* out_pixel = out_row + (x * image_channels);
for (int c = 0; c < image_channels; ++c) {
out_pixel[c] =
static_cast<float>(input_pixel[c] - input_mean) / input_std;
}
}
}
}
out_tensors->push_back(image_tensor);
return Status::OK();
}
// Reads a model graph definition from disk, and creates a session object you
// can use to run it.
Status LoadGraph(string graph_file_name,
std::unique_ptr<tensorflow::Session>* session) {
tensorflow::GraphDef graph_def;
Status load_graph_status = ReadBinaryProto(tensorflow::Env::Default(),
graph_file_name, &graph_def);
if (!load_graph_status.ok()) {
return tensorflow::errors::NotFound("Failed to load compute graph at '",
graph_file_name, "'");
}
session->reset(tensorflow::NewSession(tensorflow::SessionOptions()));
Status session_create_status = (*session)->Create(graph_def);
if (!session_create_status.ok()) {
return session_create_status;
}
return Status::OK();
}
TensorFlowBenchmark::TensorFlowBenchmark() {}
TensorFlowBenchmark::~TensorFlowBenchmark() {}
bool TensorFlowBenchmark::init() {
// We need to call this to set up global state for TensorFlow.
int argc;
char** argv;
tensorflow::port::InitMain("benchmark", &argc, &argv);
string graph_path = tensorflow::io::JoinPath(root_dir, graph);
Status load_graph_status = LoadGraph(graph_path, &session_);
if (!load_graph_status.ok()) {
LOG(ERROR) << load_graph_status;
return false;
}
return true;
}
bool TensorFlowBenchmark::run() {
string image_path = tensorflow::io::JoinPath(root_dir, image);
std::vector<tensorflow::uint8> image_data;
int image_width;
int image_height;
int image_channels;
Status load_img_status = LoadJpegFile(image_path, &image_data, &image_width, &image_height,
&image_channels);
if(!load_img_status.ok()) {
LOG(ERROR) << load_img_status;
return false;
}
LOG(INFO) << "Loaded JPEG: " << image_width << "x" << image_height << "x"
<< image_channels;
for(int batch_size = 1; batch_size <= MAX_BATCH; batch_size <<= 1) {
LOG(INFO) << "Batch size " << batch_size;
std::vector<Tensor> resized_tensors;
Status read_tensor_status =
FillTensorFromImageData(image_data, batch_size, image_height, image_width,
image_channels, &resized_tensors);
if (!read_tensor_status.ok()) {
LOG(ERROR) << read_tensor_status;
return false;
}
const Tensor& resized_tensor = resized_tensors[0];
// Actually run the image through the model.
std::vector<Tensor> outputs;
std::vector<long> timings;
for (int i = 0; i < NUM_EVAL; ++i) {
auto start = std::chrono::system_clock::now();
Status run_status = session_->Run({{input_layer, resized_tensor}},
{output_layer}, {}, &outputs);
auto end = std::chrono::system_clock::now();
timings.push_back( std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count());
if (!run_status.ok()) {
LOG(ERROR) << "Running model failed: " << run_status;
return false;
}
}
report_metrics(timings, batch_size);
}
return true;
}
I want to make logging time which a JNI's cpp system needed for every process.This is my code :
/*
* ImageProcessing.cpp
*/
#include <jni.h>
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <android/log.h>
#include <strstream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <vector>
#define LOG_TAG "TourGuide"
#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
using namespace std;
using namespace cv;
std::vector<float> parse_delimeted_list_of_numbers(char* line, char delimeter)
{
std::vector<float> vector_of_numbers;
std::istrstream input_stream(line);
std::string text;
float number;
while (std::getline(input_stream, text, delimeter)) {
sscanf(text.c_str(), "%f", &number, text.size());
vector_of_numbers.push_back(number);
}
return vector_of_numbers;
}
extern "C"
jboolean
Java_com_example_franksyesipangkar_tourguide_CameraPreview_ImageProcessing
(JNIEnv* env, jobject thiz, jint width, jint height, jbyteArray NV21FrameData, jintArray outPixels, jbyteArray b)
{
LOGD("JNIEnv");
//convert jbyteArray to char
jbyte *cmd = env->GetByteArrayElements(b, 0);
LOGD("JNIEnvFeature");
char feature[90600];//[819000] sejumlah 800kb untuk ukuran file
memset(feature,0, sizeof(feature));
memcpy(feature, cmd, strlen((char*)cmd));
LOGD("OutFeature: %s", feature);
//LOGD("OutCMD: %s", cmd);
vector<float> vectorHOGSVM;
vectorHOGSVM = parse_delimeted_list_of_numbers(feature, ' ');
LOGD("Parsing Vector Success ");
/* Mengambil data pointer */
jbyte * pNV21FrameData = env->GetByteArrayElements(NV21FrameData, 0);
jint * poutPixels = env->GetIntArrayElements(outPixels, 0);
/* Membuat matrix dari input frame gambar */
Mat mGray(height, width, CV_8UC1, (unsigned char *)pNV21FrameData);
Mat mResult(height, width, CV_8UC4, (unsigned char *)poutPixels);
/* Mengubah matrix kembali menjadi frame gambar */
IplImage GrayImg = mGray;
IplImage ResultImg = mResult;
HOGDescriptor hog;
//hog.winSize = Size(56,40);
// Set our custom detecting vector
hog.setSVMDetector(vectorHOGSVM);
//hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
/* Deklarasi variable vector untuk menggambar kotak deteksi */
vector<Rect> found, found_filtered;
size_t i, j;
hog.detectMultiScale(mGray, found, 0, Size(8,8), Size(32,32), 1.05, 2);
double t = (double)getTickCount();
t = (double)getTickCount() - t;
LOGD("Detection Time: %gms", t*1000./cv::getTickFrequency());
LOGD("Animal: %d", found.size());
for( i = 0; i < found.size(); i++ )
{
Rect r = found[i];
for( j = 0; j < found.size(); j++ )
if( j != i && (r & found[j]) == r)
break;
if( j == found.size() )
found_filtered.push_back(r);
}
if(found.size()) {
Rect r = found[0];
r.x += cvRound(r.width*0.1);
r.width = cvRound(r.width*0.8);
r.y += cvRound(r.height*0.07);
r.height = cvRound(r.height*0.8);
LOGD("c : %d, r : %d",r.height,r.width);
cvCvtColor(&GrayImg, &ResultImg, CV_GRAY2BGR);
env->ReleaseByteArrayElements(NV21FrameData, pNV21FrameData, 0);
env->ReleaseIntArrayElements(outPixels, poutPixels, 0);
env->ReleaseByteArrayElements(b, cmd, 0);
return true;
}
For detection process, I make logging time with cv::getTickFrequency() (OpenCV function) like this :
double t = (double)getTickCount();
t = (double)getTickCount() - t;
LOGD("Detection Time: %gms", t*1000./cv::getTickFrequency());
But, in another process I don't understand for making log time.So, I want to make logging time in every process, in process LOGD("OutFeature: %s", feature);, and process LOGD("Parsing Vector Success ");.Do you have an idea for it ?
I've a rooted phone with Android 4.2.2. I'd like to use minui API used (source here) in bootloader code to draw stuff on the screen. minui is much simpler than native OpenGL and I don't need any complex functionality exposed by OpenGL.
The problem is that I can't write stuff directly to fb0 device. FBIOPUT_VSCREENINFO fails for unknown reason.
How can I draw directly to fb0 on Android, or how can I use minui outside the bootloader mode?
device node:/dev/graphics/fb0
you can build in Android tree or use arm-linux-gcc.
arm-linux-gcc -D__ANDROID__ fb-test.c -static
likes a normal embeded linux, there is a sample:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <linux/fb.h>
#include <sys/mman.h>
struct fb_fix_screeninfo FixedInfo;
struct fb_var_screeninfo OrigVarInfo;
static int FrameBufferFD = -1;
void *FrameBuffer = (void *) -1;
#ifndef __ANDROID__
#define FRAMEBUFFER "/dev/fb0"
#else
#define FRAMEBUFFER "/dev/graphics/fb0"
#endif //__ANDROID__
void openFBDEV(void)
{
/* open the framebuffer device */
FrameBufferFD = open(FRAMEBUFFER, O_RDWR);
if (FrameBufferFD < 0)
{
fprintf(stderr, "Error opening %s\n", FRAMEBUFFER);
exit(1);
}
/* Get the fixed screen info */
if (ioctl(FrameBufferFD, FBIOGET_FSCREENINFO, &FixedInfo))
{
fprintf(stderr, "error: ioctl(FBIOGET_FSCREENINFO) failed\n");
exit(1);
}
/* get the variable screen info */
if (ioctl(FrameBufferFD, FBIOGET_VSCREENINFO, &OrigVarInfo))
{
fprintf(stderr, "error: ioctl(FBIOGET_VSCREENINFO) failed\n");
exit(1);
}
if (FixedInfo.visual != FB_VISUAL_TRUECOLOR
&& FixedInfo.visual != FB_VISUAL_DIRECTCOLOR)
{
fprintf(stderr,
"non-TRUE/DIRECT-COLOR visuals (0x%x) not supported by this demo.\n",
FixedInfo.visual);
exit(1);
}
/*
* fbdev says the frame buffer is at offset zero, and the mmio region
* is immediately after.
*/
/* mmap the framebuffer into our address space */
FrameBuffer = (void *) mmap(0, /* start */
FixedInfo.smem_len, /* bytes */
PROT_READ | PROT_WRITE, /* prot */
MAP_SHARED, /* flags */
FrameBufferFD, /* fd */
0 /* offset */);
if (FrameBuffer == (void *) -1)
{
fprintf(stderr, "error: unable to mmap framebuffer\n");
exit(1);
}
}
void closeFBDEV(void)
{
munmap(FrameBuffer, FixedInfo.smem_len);
close(FrameBufferFD);
}
int main()
{
openFBDEV();
fprintf(stderr, "openFBDEV finish\n");
memset(FrameBuffer, 128, FixedInfo.smem_len);
sleep(5);
closeFBDEV();
fprintf(stderr, "closeFBDEV finish\n");
return 0;
}
Source code,test it works in Qualcomm platform MSM89xx:
panel_test.c
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <linux/fb.h>
#include <sys/mman.h>
#include <stdlib.h>
#include "yellow_face.zif"
int main()
{
int fbfd = 0;
struct fb_var_screeninfo vinfo;
struct fb_fix_screeninfo finfo;
struct fb_cmap cmapinfo;
long int screensize = 0;
char *fbp = 0;
int x = 0, y = 0;
long int location = 0;
int b,g,r;
// Open the file for reading and writing
fbfd = open("/dev/graphics/fb0", O_RDWR,0); // 打开Frame Buffer设备
if (fbfd < 0) {
printf("Error: cannot open framebuffer device.%x\n",fbfd);
exit(1);
}
printf("The framebuffer device was opened successfully.\n");
// Get fixed screen information
if (ioctl(fbfd, FBIOGET_FSCREENINFO, &finfo)) { // 获取设备固有信息
printf("Error reading fixed information.\n");
exit(2);
}
printf("\ntype:0x%x\n", finfo.type ); // FrameBuffer 类型,如0为象素
printf("visual:%d\n", finfo.visual ); // 视觉类型:如真彩2,伪彩3
printf("line_length:%d\n", finfo.line_length ); // 每行长度
printf("\nsmem_start:0x%lx,smem_len:%u\n", finfo.smem_start, finfo.smem_len ); // 映象RAM的参数
printf("mmio_start:0x%lx ,mmio_len:%u\n", finfo.mmio_start, finfo.mmio_len );
// Get variable screen information
if (ioctl(fbfd, FBIOGET_VSCREENINFO, &vinfo)) { // 获取设备可变信息
printf("Error reading variable information.\n");
exit(3);
}
printf("%dx%d, %dbpp,xres_virtual=%d,yres_virtual=%dvinfo.xoffset=%d,vinfo.yoffset=%d\n", vinfo.xres, vinfo.yres, vinfo.bits_per_pixel,vinfo.xres_virtual,vinfo.yres_virtual,vinfo.xoffset,vinfo.yoffset);
screensize = finfo.line_length * vinfo.yres_virtual;
// Map the device to memory 通过mmap系统调用将framebuffer内存映射到用户空间,并返回映射后的起始地址
fbp = (char *)mmap(0, screensize, PROT_READ | PROT_WRITE, MAP_SHARED,fbfd, 0);
if ((int)fbp == -1) {
printf("Error: failed to map framebuffer device to memory.\n");
exit(4);
}
printf("The framebuffer device was mapped to memory successfully.\n");
/***************exampel 1**********************/
b = 10;
g = 100;
r = 100;
for ( y = 0; y < 340; y++ )
for ( x = 0; x < 420; x++ ) {
location = (x+100) * (vinfo.bits_per_pixel/8) +
(y+100) * finfo.line_length;
if ( vinfo.bits_per_pixel == 32 ) { //
*(fbp + location) = b; // Some blue
*(fbp + location + 1) = g; // A little green
*(fbp + location + 2) = r; // A lot of red
*(fbp + location + 3) = 0; // No transparency
}
}
/*****************exampel 1********************/
/*****************exampel 2********************/
unsigned char *pTemp = (unsigned char *)fbp;
int i, j;
//起始坐标(x,y),终点坐标(right,bottom)
x = 400;
y = 400;
int right = 700;//vinfo.xres;
int bottom = 1000;//vinfo.yres;
for(i=y; i< bottom; i++)
{
for(j=x; j<right; j++)
{
unsigned short data = yellow_face_data[(((i-y) % 128) * 128) + ((j-x) %128)];
pTemp[i*finfo.line_length + (j*4) + 2] = (unsigned char)((data & 0xF800) >> 11 << 3);
pTemp[i*finfo.line_length + (j*4) + 1] = (unsigned char)((data & 0x7E0) >> 5 << 2);
pTemp[i*finfo.line_length + (j*4) + 0] = (unsigned char)((data & 0x1F) << 3);
}
}
/*****************exampel 2********************/
//note:vinfo.xoffset =0 vinfo.yoffset =0 否则FBIOPAN_DISPLAY不成功
if (ioctl(fbfd, FBIOPAN_DISPLAY, &vinfo)) {
printf("Error FBIOPAN_DISPLAY information.\n");
exit(5);
}
sleep(10);
munmap(fbp,finfo.smem_len);//finfo.smem_len == screensize == finfo.line_length * vinfo.yres_virtual
close(fbfd);
return 0;
}
Android.mk
# Copyright 2006-2014 The Android Open Source Project
LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES:= panel_test.c
LOCAL_SHARED_LIBRARIES := $(common_libs) libqdutils libdl liblog libbase libcutils
LOCAL_C_INCLUDES := $(common_includes) $(kernel_includes)
LOCAL_ADDITIONAL_DEPENDENCIES := $(common_deps) $(kernel_deps)
LOCAL_MODULE := panel_test
LOCAL_CFLAGS := -Werror
include $(BUILD_EXECUTABLE)
include $(call first-makefiles-under,$(LOCAL_PATH))
yellow_face.zif
yellow_face.zif
:)
I'm programming an algorithm for detect symmetric radial center of an image, to detect eyes position into face framePicture. I know that already exist a project of public domain that do this work, but i would base my work about another kind of studies.
This is the scenario:
Doing by hand this manipulation frame by frame, i have seen that writting code on java layer, like this:
private Mat mGray = new Mat(height,width,CvType.CV_8U);
private Mat mOut = new Mat(height,width,CvType.CV_8U);
private Mat mIntermediateMat = Mat.zeros(height,width,CvType.CV_32F);
[...common methods of opencv app...]
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
switch (ImageManipulationsActivity.viewMode) {
case ImageManipulationsActivity.VIEW_MODE_RGBA:
mOut = inputFrame.rgba();
break;
case ImageManipulationsActivity.VIEW_MODE_MODIFY:
mGray = inputFrame.gray();
int h = mGray.rows();
int w = mGray.cols();
int sobxVal,sobyVal;
/**
* Appling manually sobel filtering to calculate dx and dy image,
* moreover calculate magnitudo matrix and cosValue and sinValue
* matrices for computing, using lut techniques.
*/
for(int i = 1; i < h-1; i++)
for(int j = 1; j < w-1; j++) {
sobxVal = (int) (
((int)mGray.get(i-1,j)[0] << 1) +
mGray.get(i-1,j-1)[0] +
mGray.get(i-1,j+1)[0] - (
((int)mGray.get(i+1,j)[0] << 1) +
mGray.get(i+1,j-1)[0] +
mGray.get(i+1,j+1)[0] ) );
sobyVal = (int) (
((int)mGray.get(i,j-1)[0] << 1) +
mGray.get(i-1,j-1)[0] +
mGray.get(i+1,j-1)[0] - (
((int)mGray.get(i,j+1)[0] << 1) +
mGray.get(i-1,j+1)[0] +
mGray.get(i+1,j+1)[0] ) );
// compute magnitudo and atan2
}
// ...other calculations...
Core.convertScaleAbs(mIntermediateMat, mOut);
}
return mOut;
}
is not all efficient! So I decided to write c++ for a native function to manipulate matrix in this way:
Code by c++ side
#include <jni.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <math.h>
#include <vector>
#include <android/log.h>
#define LOG_TAG "Example Filter"
#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
#define RADIUS 10
#define _K 9.9
#define _A 2 //radial strictness parameter, found experimentally
using namespace std;
using namespace cv;
extern "C" {
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial2_Tutorial2Activity_FindFeatures(
JNIEnv* env,
jobject,
jlong addrGray,
jlong addrRgba,
jlong addrlutCosSin,
jlong addrlutM );
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial2_Tutorial2Activity_FindFeatures(
JNIEnv* env,
jobject,
jlong addrGray,
jlong addrOut,
jlong addrlutCosSin,
jlong addrlutM )
{
Mat& mGr = *(Mat*)addrGray;
Mat& mOut = *(Mat*)addrOut;
Mat& lutCosSin = *(Mat*)addrlutCosSin;
Mat& lutM = *(Mat*)addrlutM;
int w = mGr.cols;
int h = mGr.rows;
double sobelxVal,sobelyVal,angle;
Mat magnitudo(h,w,CV_32F,Scalar(0));
Mat xMat(h,w,CV_8S,Scalar(0));
Mat yMat(h,w,CV_8S,Scalar(0));
Mat oMat(h,w,CV_32F,Scalar(0));
Mat mMat(h,w,CV_32F,Scalar(0));
/*
* Convolves Matrix with Sobel ky kernel and Sobel kx kernel
*ky = [ 1 2 1 ;
* 0 0 0 ;
* -1 -2 -1 ]
*
*kx = [ 1 0 -1 ;
* 2 0 -2 ;
* 1 0 -1 ]
*
* doing dedicated computation
*/
for( int i = 1; i < h-1; i++ )
{
for (int j = 1; j < w-1; j++ )
{
sobelxVal = (mGr.at<int>(i-1,j) << 1) +
mGr.at<int>(i-1,j-1) +
mGr.at<int>(i-1,j+1) - (
(mGr.at<int>(i+1,j) << 1) +
mGr.at<int>(i+1,j-1) +
mGr.at<int>(i+1,j+1) );
sobelyVal = (mGr.at<int>(i,j-1) << 1) +
mGr.at<int>(i-1,j-1) +
mGr.at<int>(i+1,j-1) - (
(mGr.at<int>(i,j+1) << 1) +
mGr.at<int>(i-1,j+1) +
mGr.at<int>(i+1,j+1) );
magnitudo.at<double>(i,j) = lutM.at<double>((int)sobelxVal+255/4,(int)sobelxVal+255/4);
angle = floor(atan2(sobelyVal,sobelxVal)*180/M_PI);
xMat.at<double>(i,j) = lutCosSin.at<double>(0,angle);
yMat.at<double>(i,j) = lutCosSin.at<double>(1,angle);
}
}
// other code calculation to determine mOut matrix values
}
}
By java code side
private Mat mRgba;
private Mat mGray;
/*
* Matrix of 360 cols and 2 rows
* row[0] cos values
* row[1] sin values
*/
private static Mat lutCosSin;
/*
* Matrix 510 x 510
* where lutM(i,j) = atan2(i,j)
*/
private static Mat lutMagnitudo
// common methods and declarations...
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
final int viewMode = mViewMode;
switch (viewMode) {
case VIEW_MODE_RGBA:
// input frame has RBGA format
mRgba = inputFrame.rgba();
break;
case VIEW_MODE_FEATURES:
// input frame has RGBA format
mGray = inputFrame.gray();
mRgba = Mat.zeros(mGray.rows(), mGray.cols(), mGray.type());
FindFeatures(
mGray.getNativeObjAddr(),
mRgba.getNativeObjAddr(),
lutCosSin.getNativeObjAddr(),
lutMagnitudo.getNativeObjAddr()
);
//Core.convertScaleAbs(mRgba, mRgba);
// Log.d(TAG, "Called native function :"+mRgba.submat(new Range(0,5), new Range(0,5)).toString()+
// "\nAngles matrix:"+mGray);
break;
}
return mRgba;
}
public native void FindFeatures(long matAddrGr, long matAddrRgba, long matAddrlutCS, long matAddrlutM);
the first important issue of this snipped code is that on accessing to mGr cells with "at" method in this way:
mGr.at<int>(i,j)
previus check that mGr type is int, the returned values isn't the effective gray level pixel of gray frame (i saw it by log).
I suppose that there are linkin bug of matrix from java code to c++ code, but i'm not sure for this.
I hope may anyone help me to solve this issue XD !!