The above image is my output
i am using opencv edge dection c++ code,as written below
JNIEXPORT jfloatArray JNICALL Java_com_test_getPoints
(JNIEnv *env, jobject thiz,jobject bitmap)
{
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Scaning getPoints");
int ret;
AndroidBitmapInfo info;
void* pixels = 0;
if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME,"AndroidBitmap_getInfo() failed ! error=%d", ret);
return 0;
}
if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888 )
{ __android_log_print(ANDROID_LOG_VERBOSE, APPNAME,"Bitmap format is not RGBA_8888!");
return 0;
}
if ((ret = AndroidBitmap_lockPixels(env, bitmap, &pixels)) < 0) {
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME,"AndroidBitmap_lockPixels() failed ! error=%d", ret);
}
Mat mbgra(info.height, info.width, CV_8UC4, pixels);
=========================================
vector<Point> img_pts = getPoints(mbgra);
=====================================
jfloatArray jArray = env->NewFloatArray(8);
if (jArray != NULL)
{
jfloat *ptr = env->GetFloatArrayElements(jArray, NULL);
for (int i=0,j=i+4; j<8; i++,j++)
{
ptr[i] = img_pts[i].x;
ptr[j] = img_pts[i].y;
}
env->ReleaseFloatArrayElements(jArray, ptr, NULL);
}
AndroidBitmap_unlockPixels(env, bitmap);
return jArray;
}
vector<Point> getPoints(Mat image)
{
int width = image.size().width;
int height = image.size().height;
Mat image_proc = image.clone();
vector<vector<Point> > squares;
Mat blurred(image_proc);
medianBlur(image_proc, blurred, 9);
Mat gray0(blurred.size(), CV_8U), gray;
vector<vector<Point> > contours;
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
if (approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
double largest_area = -1;
int largest_contour_index = 0;
for(int i=0;i<squares.size();i++)
{
double a =contourArea(squares[i],false);
if(a>largest_area)
{
largest_area = a;
largest_contour_index = i;
}
}
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Scaning size() %d",squares.size());
vector<Point> points;
if(squares.size() > 0)
{
points = squares[largest_contour_index];
}
else
{
points.push_back(Point(0, 0));
points.push_back(Point(width, 0));
points.push_back(Point(0, height));
points.push_back(Point(width, height));
}
return points;
}
}
How to find accurate edges as per above screen shoot.
i am new for opencv can any one guide me,thanks for response.
The ouput image
required output
Related
I'm working on my project which is about Document Shadow removal in android application. I try to modify the c++ algorithm to android by using button to process the input image to remove a shadow but There still some error and I have no ideal how to solve because I'm not good at coding here is the code of my project
MainActivity.java
public class MainActivity extends AppCompatActivity {
// Used to load the 'native-lib' library on application startup.
static {
System.loadLibrary("native-lib");
System.loadLibrary("opencv_java3");
}
public static String TAG ="MainActivity";
ImageView imageView;
Button loadImage;
Button process;
private int REQUEST_CODE = 1;
Mat image = new Mat();
Bitmap bitmap;
Bitmap bmp;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
imageView = (ImageView) findViewById(R.id.imageView);
loadImage = (Button) findViewById(R.id.button);
process = (Button) findViewById(R.id.button2);
loadImage.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View view) {
Intent intent = new Intent();
intent.setType("image/");
intent.setAction(Intent.ACTION_GET_CONTENT);
startActivityForResult(Intent.createChooser(intent,"Select Image"),REQUEST_CODE);
}
});
process.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View view) {
image = new Mat (bitmap.getWidth(), bitmap.getHeight(), CvType.CV_8UC1);
Utils.bitmapToMat(bitmap, image);
Native.removeShadow(image.getNativeObjAddr());
Utils.matToBitmap(image,bitmap);
imageView.setImageBitmap(bitmap);
}
});
}
#Override
protected void onActivityResult(int request_code, int result_code, Intent data){
super.onActivityResult(request_code,result_code,data);
if(request_code == REQUEST_CODE && result_code == RESULT_OK && data != null && data.getData() != null ){
Uri uri = data.getData();
try{
bitmap = MediaStore.Images.Media.getBitmap(getContentResolver(), uri);
imageView.setImageBitmap(bitmap);
}catch (IOException e){
e.printStackTrace();
}
}
}
}
Nativa.java
public class Native {
public native static void removeShadow (long addrRgba);
}
shadowRemove.h
#include "jni.h"
#include <stdio.h>
#include <iostream>
#include <omp.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/ml/ml.hpp>
#include <opencv2/photo/photo.hpp>
#include <cmath>
#include <random>
#ifndef SHADOWDOCTEST_SHADOWREMOVE_H
#define SHADOWDOCTEST_SHADOWREMOVE_H
#define BUFFER_SIZE 1000
#define USE_SAMPLING 1
using namespace cv;
using namespace std;
using namespace cv::ml;
extern "C" {
JNIEXPORT void JNICALL Java_com_example_shadowdoctest_Native_removeShadow(JNIEnv *, jclass, jlong);
void removeShadow(Mat& img);
// Returns the block centered around (x,y).
bool GetBlock(int x, int y, Mat &block, Mat &dsImage);
void ClusterBlock(Mat &block, Mat &clusterMu, int *randInd);
// Identifies the local cluster belonging to the background and saves it in the shadow map
// Currently uses max cluster mean.
void CalculatePaperStatistics(int x, int y, Mat &clusterMu);
// Finds the block that best represents the background region. Used for constructing the
// shadow map (gain map)
void FindReferenceIndex(int &refInd, Mat &dsImage, Vec3f &ref);
void NormalizeShadowMap(int refIndex, Vec3f &ref);
void UpsampleShadowMap();
void ApplyShadowMap();
// Converts x and y index to access downsampled images (xhat = x and
// yhat = y when stride is 1)
void ConvertIndex(int x, int y, int &xHat, int &yHat);
// Input image, shadow map (gain map), and mask of image regions
Mat *image;
Mat *shadowMap;
// Full width, height, and number of channels
int width;
int height;
int channels;
// Number of pixels to skip when performing local analysis
int stride;
// Size of overlapping blocks in local analysis
int blockSize;
// Number of clusters used for local analysis (i.e., 2)
int numOfClusters;
// Number of clusters used for global analysis (i.e., 3)
int numOfClustersRef;
// Maximum number of iterations and epsilon threshold used as stopping condition for GMM clustering
int maxIters;
float emEps;
// Amount of downsampling to be used on the original image (for speedup)
float dsFactor;
// Number of local and global samples in the block and image, respectively (Default is 150 and 1000)
int numOfLocalSamples;
int numOfGlobalSamples;
}
#endif //SHADOWDOCTEST_SHADOWREMOVE_H
shadowRemove.cpp
#include "shadowRemove.h"
#include <cstdlib>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <cstring>
JNIEXPORT void JNICALL Java_com_example_shadowdoctest_Native_removeShadow(JNIEnv *, jclass, jlong addrRgba){
Mat& image = *(Mat*)addrRgba;
removeShadow(image);
}
void removeShadow(Mat& img){
image = new Mat(img);
// image.convertTo(image, CV_32FC3);
width = image->cols;
height = image->rows;
channels = image->channels();
stride = 20; // Number of pixels to skip when performing local analysis
blockSize = 21; // Size of overlapping blocks in local analysis
numOfClusters = 3; // Number of clusters used for local analysis
numOfClustersRef = 3; // Number of clusters used for global analysis
maxIters = 100; // Maximum number of iterations used as stopping condition for GMM clustering.
emEps = 0.1f; // Epsilon threshold used as stopping condition for GMM clustering.
dsFactor = 1.0f; // No downsampling is done
numOfLocalSamples = 150; // Number of samples to take in each block (for local statistics)
numOfGlobalSamples = 1000; // Number of samples to take throughout entire image (for global statistics)
int sHeight, sWidth;
shadowMap = new Mat(image->cols, image->rows, CV_32FC3, CV_RGB(-1, -1, -1));//CV_32FC3 is a three channel matrix of 32-bit floats
resize(*shadowMap, *shadowMap, Size(0, 0), dsFactor, dsFactor, INTER_LANCZOS4);
ConvertIndex(shadowMap->cols, shadowMap->rows, sWidth, sHeight);
resize(*shadowMap, *shadowMap, Size(sWidth, sHeight));
int threadCount = omp_get_max_threads();
Mat* blockList = new Mat[threadCount];
for (int i = 0; i < threadCount; i++) {
blockList[i] = Mat(height, width, CV_32FC3, CV_RGB(0, 0, 0));
}
Mat dsMask = Mat(shadowMap->rows, shadowMap->cols, CV_8UC1, Scalar(0));
int* randInd = new int[numOfLocalSamples];
int size = blockSize * blockSize; // TODO: use size_t
vector<int> freeIndexes;
for (int i = 0; i < size; i++) {
freeIndexes.push_back(i);
}
int count = 0;
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(0, size);
while (count < numOfLocalSamples) {
int indexCandidate = distribution(generator);
vector<int>::iterator it = std::find(freeIndexes.begin(), freeIndexes.end(), indexCandidate);
if (it != freeIndexes.end()) {
randInd[count] = indexCandidate;
freeIndexes.erase(it);
count++;
}
}
Mat dsImage;
resize(*image, dsImage, Size(0, 0), dsFactor, dsFactor, INTER_NEAREST);
width = dsImage.cols;
height = dsImage.rows;
#pragma omp parallel
{
#pragma omp for schedule(dynamic) nowait
for (int i = 0; i < height; i += stride) {
for (int j = 0; j < width; j += stride) {
// Get current block
int threadNum = omp_get_thread_num();
Mat& curBlock = blockList[threadNum];
if (GetBlock(j, i, curBlock, dsImage)) {
// Cluster pixel intensities
Mat curMu;
vector<Mat> listOfCovs;
ClusterBlock(curBlock, curMu, randInd);
// Find paper mu of current block and update global matrix
CalculatePaperStatistics(j, i, curMu);
}
}
}
}
delete[] randInd;
delete[] blockList;
int refIndex = -1;
Vec3f ref;
FindReferenceIndex(refIndex, dsImage, ref);
width = image->cols;
height = image->rows;
medianBlur(*shadowMap, *shadowMap, 3);
GaussianBlur(*shadowMap, *shadowMap, Size(3, 3), 2.5f);
Mat dsShadowMap = *shadowMap;
UpsampleShadowMap();
NormalizeShadowMap(refIndex, ref);
ApplyShadowMap();
}
bool GetBlock (int x, int y, Mat& block, Mat& dsImage) {
int halfBlock = (int) floorf(float(blockSize) / 2.0f);
int minX = max(0, x - halfBlock);
int maxX = min(width - 1, x + halfBlock);
int minY = max(0, y - halfBlock);
int maxY = min(height - 1, y + halfBlock);
int deltaY = maxY - minY + 1;
int deltaX = maxX - minX + 1;
if (block.rows != deltaY || block.cols != deltaX) {
block = Mat(deltaY, deltaX, CV_32FC3, CV_RGB(0, 0, 0));
}
// Copy intensities to block
int bX = 0;
int bY = 0;
for (int i = minY; i <= maxY; i++) {
for (int j = minX; j <= maxX; j++) {
for (int k = 0; k < channels; k++) {
block.at<Vec3f>(bY, bX)[k] = dsImage.at<Vec3f>(i, j)[k];
}
bX++;
}
bX = 0;
bY++;
}
return true;
}
void ClusterBlock (Mat& block, Mat& clusterMu, int* randInd) {
// Set up expectation maximization model
Ptr<EM> emModel = EM::create();
emModel->setClustersNumber(numOfClusters);
emModel->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL);
emModel->setTermCriteria(TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, maxIters, emEps));
// Cluster block with k means initializer
Mat samples;
if (block.rows * block.cols == blockSize * blockSize) {
Mat tmp(numOfLocalSamples, 1, CV_32FC3, CV_RGB(-1, -1, -1));
for (int i = 0; i < numOfLocalSamples; i++) {
assert(randInd[i] >= 0 && randInd[i] < block.rows * block.cols);
tmp.at<Vec3f>(i) = block.at<Vec3f>(randInd[i]);
}
samples = tmp.reshape(1);
}
else {
samples = block.reshape(0, block.rows * block.cols);
samples = samples.reshape(1);
}
emModel->trainEM(samples);
clusterMu = emModel->getMeans();
clusterMu = clusterMu.reshape(channels);
clusterMu.convertTo(clusterMu, CV_32FC3);
}
void CalculatePaperStatistics(int x, int y, Mat& clusterMu) {
int sX, sY;
ConvertIndex(x, y, sX, sY);
Vec3f& shadowVec = shadowMap->at<Vec3f>(sY, sX);
double maxSum = 0;
for (int i = 0; i < numOfClusters; i++) {
double muSum = 0;
for (int k = 0; k < channels; k++) {
muSum += clusterMu.at<Vec3f>(i)[k];
}
if (muSum > maxSum) {
maxSum = muSum;
for (int k = 0; k < channels; k++) {
shadowVec[k] = clusterMu.at<Vec3f>(i)[k];
}
}
}
}
void FindReferenceIndex(int& refIndex, Mat& dsImage, Vec3f& ref) {
// Set up expectation maximization model
Ptr<EM> emModel = EM::create();
emModel->setClustersNumber(numOfClustersRef);
emModel->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL);
emModel->setTermCriteria(TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, maxIters, emEps));
// Cluster block with k means initializer
Mat samples;
#if USE_SAMPLING
Mat tmp(numOfGlobalSamples, 1, CV_32FC3, CV_RGB(-1, -1, -1));
int* randInd = new int[numOfGlobalSamples];
int size = width * height; // TODO: Use size_t
vector<int> freeIndexes;
for (int i = 0; i < size; i++) {
freeIndexes.push_back(i);
}
int count = 0;
int maxIndexCandidiate = -1;
int delta = size / numOfGlobalSamples;
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(0, size);
while (count < numOfGlobalSamples) {
int indexCandidate = distribution(generator);
vector<int>::iterator it = std::find(freeIndexes.begin(), freeIndexes.end(), indexCandidate);
if (it != freeIndexes.end()) {
randInd[count] = indexCandidate;
freeIndexes.erase(it);
count++;
}
}
for (int i = 0; i < numOfGlobalSamples; i++) {
tmp.at<Vec3f>(i) = image->at<Vec3f>(randInd[i]);
}
delete[] randInd;
samples = tmp.reshape(1);
#else
samples = dsImage.reshape(0, width * height);
samples = samples.reshape(1);
#endif
emModel->trainEM(samples);
// Get the cluster means
Mat clusterMu = emModel->getMeans();
clusterMu = clusterMu.reshape(channels);
clusterMu.convertTo(clusterMu, CV_32FC3);
// Get cluster variances
int maxInd = -1;
double curMax = -1;
for (int i = 0; i < numOfClustersRef; i++) {
double muMag = 0;
for (int k = 0; k < channels; k++) {
muMag += clusterMu.at<Vec3f>(i)[k];
}
if (muMag > curMax) {
curMax = muMag;
maxInd = i;
}
}
assert(maxInd != -1 && maxInd < numOfClustersRef);
// Find the closest actual value to the cluster to choose as reference
// TODO: stop earlier once threshold is met?
ref = clusterMu.at<Vec3f>(maxInd);
float curMin = std::numeric_limits<float>::max();
refIndex = -1;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
Vec3f curVal = dsImage.at<Vec3f>(i, j);
float curMag = 0;
for (int k = 0; k < channels; k++) {
float diff = curVal[k] - ref[k];
curMag += diff * diff;
}
if (curMag < curMin) {
curMin = curMag;
refIndex = j + i * width;
}
}
}
}
void UpsampleShadowMap() {
resize(*shadowMap, *shadowMap, Size(width, height), 0, 0, INTER_LANCZOS4);
}
void NormalizeShadowMap(int refIndex, Vec3f& ref) {
assert(shadowMap->rows == height && shadowMap->cols == width);
assert(refIndex >= 0 && refIndex < width * height);
ref = shadowMap->at<Vec3f>(refIndex);
// Divide each local paper intensity by the global reference
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
Vec3f& curShadowVec = shadowMap->at<Vec3f>(i, j);
for (int k = 0; k < channels; k++) {
curShadowVec[k] /= ref[k];
// Clamp negative and zero values to a small number
if (curShadowVec[k] <= 0) {
curShadowVec[k] = 1.0e-6f;
}
}
}
}
}
void ApplyShadowMap() {
// Loop through all the pixels and divide by inverse gain
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
Vec3f invGain = shadowMap->at<Vec3f>(i, j);
Vec3f& color = image->at<Vec3f>(i, j);
for (int k = 0; k < channels; k++) {
color[k] /= invGain[k];
}
}
}
}
void ConvertIndex(int x, int y, int& xHat, int& yHat) {
// Convert from original resolution to downsampled size (downsampled based on stride)
xHat = (int)floor((x - 1) / float(stride)) + 1;
yHat = (int)floor((y - 1) / float(stride)) + 1;
}
when I click the button to process an images the application will be force close, and the Logcat will show like this
Logcat
I'm also already setup my opencv in java and ndk
Here is original sorcecode of Removing Shadow in doucment images
I am currently working on zip animation to unlock android mobile screen. Changing background images is a expensive task and have not a smooth effect. I want a smooth effect in it. Any help please? Thanks
Try this:
The smooth effect makes use of Convolution Matrix:
Some image effects are better to implement using Convolution Matrix
method like: Gaussian Blur, Sharpening, Embossing, Smooth…
Check That Link to know more about Convolution Matrix or Another one
To do Convolution Matrix
import android.graphics.Bitmap;
import android.graphics.Color;
public class ConvolutionMatrix
{
public static final int SIZE = 3;
public double[][] Matrix;
public double Factor = 1;
public double Offset = 1;
public ConvolutionMatrix(int size) {
Matrix = new double[size][size];
}
public void setAll(double value) {
for (int x = 0; x < SIZE; ++x) {
for (int y = 0; y < SIZE; ++y) {
Matrix[x][y] = value;
}
}
}
public void applyConfig(double[][] config) {
for(int x = 0; x < SIZE; ++x) {
for(int y = 0; y < SIZE; ++y) {
Matrix[x][y] = config[x][y];
}
}
}
public static Bitmap computeConvolution3x3(Bitmap src, ConvolutionMatrix matrix) {
int width = src.getWidth();
int height = src.getHeight();
Bitmap result = Bitmap.createBitmap(width, height, src.getConfig());
int A, R, G, B;
int sumR, sumG, sumB;
int[][] pixels = new int[SIZE][SIZE];
for(int y = 0; y < height - 2; ++y) {
for(int x = 0; x < width - 2; ++x) {
// get pixel matrix
for(int i = 0; i < SIZE; ++i) {
for(int j = 0; j < SIZE; ++j) {
pixels[i][j] = src.getPixel(x + i, y + j);
}
}
// get alpha of center pixel
A = Color.alpha(pixels[1][1]);
// init color sum
sumR = sumG = sumB = 0;
// get sum of RGB on matrix
for(int i = 0; i < SIZE; ++i) {
for(int j = 0; j < SIZE; ++j) {
sumR += (Color.red(pixels[i][j]) * matrix.Matrix[i][j]);
sumG += (Color.green(pixels[i][j]) * matrix.Matrix[i][j]);
sumB += (Color.blue(pixels[i][j]) * matrix.Matrix[i][j]);
}
}
// get final Red
R = (int)(sumR / matrix.Factor + matrix.Offset);
if(R < 0) { R = 0; }
else if(R > 255) { R = 255; }
// get final Green
G = (int)(sumG / matrix.Factor + matrix.Offset);
if(G < 0) { G = 0; }
else if(G > 255) { G = 255; }
// get final Blue
B = (int)(sumB / matrix.Factor + matrix.Offset);
if(B < 0) { B = 0; }
else if(B > 255) { B = 255; }
// apply new pixel
result.setPixel(x + 1, y + 1, Color.argb(A, R, G, B));
}
}
// final image
return result;
}
}
Then to do Smooth effect
public static Bitmap smooth(Bitmap src, double value) {
ConvolutionMatrix convMatrix = new ConvolutionMatrix(3);
convMatrix.setAll(1);
convMatrix.Matrix[1][1] = value;
convMatrix.Factor = value + 8;
convMatrix.Offset = 1;
return ConvolutionMatrix.computeConvolution3x3(src, convMatrix);
}
You can change values and get the smooth effect as you want.
That tutorial it's found HERE
I have a large problem with drawn function in my android jni app using OpenCV. I track objects in my native function and forward rectangles Vector by Mat to Java code, then draw rectangles on RGBA Mat in Java and it all works. But now, I have draw these rectangles in native code in my function before return to Java, but it doesn't work. I tried to draw anything from the jni, but still no effect. Drawn functions doesn't change the Mat, which turn into Java. I would be very grateful for help.
This is my native code:
JNIEXPORT void JNICALL Java_org_opencv_samples_facedetect_DetectionBasedTracker_nativeDetect
(JNIEnv * jenv, jclass, jlong thiz, jlong imageGray, jlong faces)
{
LOGD("Java_org_opencv_samples_facedetect_DetectionBasedTracker_nativeDetect enter");
try
{
vector<Rect> RectFaces;
((DetectionBasedTracker*)thiz)->process(*((Mat*)imageGray));
((DetectionBasedTracker*)thiz)->getObjects(RectFaces);
for(int i=0; i<RectFaces.size(); i++)
{
Point p,k;
p.x = ((Rect)RectFaces[i]).x;
p.y = ((Rect)RectFaces[i]).y;
k.x = ((Rect)RectFaces[i]).x + ((Rect)RectFaces[i]).width;
k.y = ((Rect)RectFaces[i]).y + ((Rect)RectFaces[i]).height;
rectangle(*((Mat*)imageGray), p, k,Scalar(0,255,255, 255), -1, 8);
}
vector_Rect_to_Mat(RectFaces, *((Mat*)faces));
}
catch(cv::Exception& e)
{
LOGD("nativeCreateObject caught cv::Exception: %s", e.what());
jclass je = jenv->FindClass("org/opencv/core/CvException");
if(!je)
je = jenv->FindClass("java/lang/Exception");
jenv->ThrowNew(je, e.what());
}
catch (...)
{
LOGD("nativeDetect caught unknown exception");
jclass je = jenv->FindClass("java/lang/Exception");
jenv->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1VideoCapture__()}");
}
LOGD("Java_org_opencv_samples_facedetect_DetectionBasedTracker_nativeDetect exit");
}
and java code which return rgba Mat:
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
//Imgproc.Canny(mGray, mCanny, CANNY_MIN_TRESHOLD, CANNY_MAX_TRESHOLD);
Point center = new Point(mRgba.width() / 2, mRgba.height() / 2);
//must be 1 channels 8 bit!
/*//do Hough transform to find lines
double rho = 1;
double theta = Math.PI/180;
Imgproc.HoughLinesP(mCanny, mLines, rho, theta, HOUGH_TRESHOLD, HOUGH_MIN_LINE_LENGTH, HOUGH_MAX_LINE_GAP);*/
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2,
2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize),
new Size());
} else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null)
mNativeDetector.detect(mGray, faces);
//mNativeDetector.findLines(mGray, mCanny);
} else {
Log.e(TAG, "Detection method is not selected!");
}
/*Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++) {
if (center.x > facesArray[i].tl().x
&& center.x < facesArray[i].br().x) {
if (center.y > facesArray[i].tl().y
&& center.y < facesArray[i].br().y) {
Core.rectangle(mRgba, facesArray[i].tl(),
facesArray[i].br(), CAR_RECT_COLOR_RED, 3);
} else {
Core.rectangle(mRgba, facesArray[i].tl(),
facesArray[i].br(), CAR_RECT_COLOR_YELLOW, 3);
}
} else {
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(),
CAR_RECT_COLOR_YELLOW, 3);
}
}*/
Point p1 = new Point(mRgba.width() / 2, 0);
Point p2 = new Point(mRgba.width() / 2, mRgba.height());
Point p3 = new Point(0, mRgba.height() / 2);
Point p4 = new Point(mRgba.width(), mRgba.height() / 2);
Core.line(mRgba, p3, p4, AXIS_COLOR);
Core.line(mRgba, p1, p2, AXIS_COLOR);
return mRgba;
//return mLines;
}
Why are you passing a gray-scale image to the function?
If you want to show things up on the screen, you should pass RGBA data.
I'm a beginner in jni and I'm trying to load a library but I keep getting UnSatisfiedLinkError in the Log. I've checked multiple times all of my files but still the same error.
Android.mk
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := native_sample
LOCAL_SRC_FILES := myFirstApp.cpp
LOCAL_LDLIBS += -llog -ldl
include $(BUILD_SHARED_LIBRARY)
myFirstApp.cpp
#include <jni.h>
#include <opencv2/core/core.hpp>
#include "opencv2/highgui/highgui.hpp"
#include <vector>
#include <math.h>
#include <android/log.h>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/legacy/legacy.hpp>
#include "opencv2/video/tracking.hpp"
#include <time.h>
#include <math.h>
#include <string.h>
#define APPNAME "myFirstApp"
using namespace std;
using namespace cv;
extern "C" {
static int numTemplate = 24;
static int tWidth = 256;
static int tHight = 256;
static vector<Mat> tmplts;
static vector<vector<KeyPoint> > keyPointsTmplts;
static vector<Mat> descriptorsTmplts;
static vector<Mat> trainDescriptors;
// find squares vars
static vector<vector<Point> > squares;
static vector<Point2f> squaresCenters;
static vector<int> squaresAbsAreas;
static vector<int> clustersAreas;
static double scaleFactor = 1.5;
static double MARKER_RATIO = 0.03;
//clustering vars
static vector<Point2f> clusterCenters;
static vector<vector<Point> > clusterBoundaries;
static int CLUSTERTHRESHOLD = 25;
//tracking variables
static Mat prevFrame;
static vector<Point2f> oldPoints;
static TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);
static Size winSize(31, 31);
static Size subPixWinSize(10, 10);
//Recognition
static std::vector<int> matchingResults;
static vector<Mat> hos;
static Mat warpedImg;
static Mat cropped;
static vector<vector<KeyPoint> > candidatesKeypoints;
vector<Mat> candidates;
// DETECTION
static FastFeatureDetector detector(16);
//static int MAX_KEYPOINTS_COUNT=100;
//static GoodFeaturesToTrackDetector detector(MAX_KEYPOINTS_COUNT, 0.01, 10, 3, false, 0.04);
// DESCRIPTOR
static FREAK extractor;
// MATCHER
static BFMatcher matcher(NORM_HAMMING, true);
double diffclock(clock_t clock1, clock_t clock2);
// for int arrays
static Mat points2Mat(const Point* p) {
Mat srcP(4, 2, CV_32FC1);
for (int i = 0; i < 4; i++) {
srcP.at<float>(i, 0) = p[i].x;
srcP.at<float>(i, 1) = p[i].y;
}
return srcP;
}
// for float arrays
static Mat points2MatF(const Point2f* p) {
Mat srcP(4, 2, CV_32FC1);
for (int i = 0; i < 4; i++) {
srcP.at<float>(i, 0) = p[i].x;
srcP.at<float>(i, 1) = p[i].y;
}
return srcP;
}
static Mat prepareWarpDstMat(const Point* p) {
Mat dstP = cvCreateMat(4, 2, CV_32FC1);
dstP.at<float>(0, 0) = p[0].x;
dstP.at<float>(0, 1) = p[0].y;
dstP.at<float>(1, 0) = p[0].x + tWidth;
dstP.at<float>(1, 1) = p[0].y;
dstP.at<float>(2, 0) = p[0].x + tWidth;
dstP.at<float>(2, 1) = p[0].y + tHight;
dstP.at<float>(3, 0) = p[0].x;
dstP.at<float>(3, 1) = p[0].y + tHight;
return dstP;
}
//-----------------------------Find Squares-------------------------------------------
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
static double angle(Point pt1, Point pt2, Point pt0) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1 * dx2 + dy1 * dy2)
/ sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2) + 1e-10);
}
static void clearVectors() {
// clear all vectors from data
squares.clear();
matchingResults.clear();
squaresCenters.clear();
squaresAbsAreas.clear();
clusterCenters.clear();
clusterBoundaries.clear();
clustersAreas.clear();
candidatesKeypoints.clear();
candidates.clear();
}
// the sequence is stored in the specified memory storage
static void findSquares(const Mat& grayImg) {
clock_t begin = clock();
Mat timg, gray, scaledImg;
resize(grayImg, scaledImg, Size(0, 0), 1 / scaleFactor, 1 / scaleFactor,
CV_INTER_CUBIC);
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "scaledImage %i %i",
scaledImg.cols,scaledImg.rows);
// Gaussian blurring better than pyr up and down
GaussianBlur(scaledImg, timg, Size(5, 5), 0, 0, BORDER_DEFAULT); // t11
vector<vector<Point> > contours;
// find squares in every color plane of the image
Canny(timg, gray, 50, 200, 5); //t3
dilate(gray, gray, Mat(), Point(-1, -1));
// find contours and store them all as a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for (int i = 0; i < contours.size(); i++) {
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx,
arcLength(Mat(contours[i]), true) * 0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
int imgArea = gray.cols * gray.rows;
int absArea = fabs(contourArea(Mat(approx)));
if (approx.size() == 4 && absArea > 1000 && isContourConvex(Mat(approx))
&& absArea < 0.8 * imgArea) {
double maxCosine = 0;
Point a, b, c, d;
for (int j = 2; j < 5; j++) {
// find the maximum cosine of the angle between joint edges
a = approx[j % 4];
b = approx[j - 2];
c = approx[j - 1];
double cosine = fabs(angle(a, b, c));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if (maxCosine < 0.3) {
// restore scaling
Point* p0 = (Point*) &approx[0];
Point* p1 = (Point*) &approx[1];
Point* p2 = (Point*) &approx[2];
Point* p3 = (Point*) &approx[3];
p0->x = p0->x * scaleFactor;
p0->y = p0->y * scaleFactor;
p1->x = p1->x * scaleFactor;
p1->y = p1->y * scaleFactor;
p2->x = p2->x * scaleFactor;
p2->y = p2->y * scaleFactor;
p3->x = p3->x * scaleFactor;
p3->y = p3->y * scaleFactor;
Point2f center = (*p0 + *p1 + *p2 + *p3) * (0.25);
// //__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "squareCenter %G,%G",center.x,center.y);
squares.push_back(approx);
squaresCenters.push_back(center);
squaresAbsAreas.push_back(absArea);
}
}
}
}
//--------------------------------Cluster Rectangles-------------------------------------
static void updateCluster(int pNum, int* clusters, int n) {
for (int i = 0; i < n; i++) {
if (clusters[pNum] != clusters[i]) {
Point2f p0 = (Point2f) squaresCenters[pNum];
Point2f p1 = (Point2f) squaresCenters[i];
// //__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "norm %G",
// norm(p0-p1));
if (norm(p0 - p1) < CLUSTERTHRESHOLD) {
clusters[i] = clusters[pNum];
updateCluster(i, clusters, n);
}
}
}
}
static int TRACKED_THRESHOLD = 100;
static bool inTrackingList(Point2f clusterCenter) {
// int tracklistSize = trackedMarkersCenters.size();
// Point2f trackedCenter;
// for (int i = 0; i < tracklistSize; i++) {
// trackedCenter = (Point2f) trackedMarkersCenters[i][0];
//// //__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "dist %G",
//// norm(clusterCenter - trackedCenter));
// if (norm(clusterCenter - trackedCenter) < TRACKED_THRESHOLD)
// return true;
// }
return false;
}
static void mergeRectangles() {
int n = squaresCenters.size();
int clusters[n];
int clusterCounter = 0;
for (int i = 0; i < n; i++)
clusters[i] = -1;
for (int i = 0; i < n; i++)
if (clusters[i] == -1) {
clusters[i] = clusterCounter;
clusterCounter++;
updateCluster(i, clusters, n);
}
//__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "clusters %i",
//clusterCounter);
int members, maxArea;
for (int i = 0; i < clusterCounter; i++) {
members = 0;
Point2f center(0, 0);
maxArea = -1;
vector<Point> maxSquare;
for (int j = 0; j < n; j++) {
if (clusters[j] == i) {
center += (Point2f) squaresCenters[j];
members++;
if (maxArea < (int) squaresAbsAreas[j]) {
maxArea = (int) squaresAbsAreas[j];
maxSquare = squares[j];
}
}
}
center *= (1.0 / members);
if (!inTrackingList(center)) {
clusterCenters.push_back(center);
clusterBoundaries.push_back(maxSquare);
clustersAreas.push_back(maxArea);
matchingResults.push_back(-3);
const Point* floPoin = &maxSquare[0];
Mat scene = points2Mat(floPoin);
}
}
}
//------------------------------Process Filtered squares--------------------------------------
static int imageArea;
// crop squares
static void cropAndWarpCandidate(Mat& grayImg, const Point* p, int i) {
//__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Area %i %i %G %G", i,
// clustersAreas[i], imageArea, clustersAreas[i] * 1.0 / imageArea);
//
// if (clustersAreas[i] < MARKER_RATIO * imageArea) {
//
// matchingResults[i] = -2;
// return;
// }
Mat srcPMat = points2Mat(p);
Mat dstPMat = prepareWarpDstMat(p);
Mat ho = findHomography(srcPMat, dstPMat, 0);
warpPerspective(grayImg, warpedImg, ho,
Size(grayImg.cols + tWidth, grayImg.rows + tHight));
cropped = Mat(warpedImg, Rect(p[0].x, p[0].y, tWidth, tHight));
// int templateIndex = matchCandidate(cropped);
//
// matchingResults[i] = templateIndex;
candidates.push_back(cropped);
srcPMat.release();
dstPMat.release();
warpedImg.release();
cropped.release();
}
// the function draws all the squares in the image
static void processFilteredSquares(Mat& grayImg) {
imageArea = grayImg.cols * grayImg.rows;
int squaresSize = clusterBoundaries.size();
//__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "filtered %d",
// squaresSize);
for (int i = 0; i < squaresSize; i++) {
const Point* p = &clusterBoundaries[i][0];
cropAndWarpCandidate(grayImg, p, i);
}
}
//------------------Drawing ---------------------------------------
static void drawFilteredSquaresWithoutMarker(Mat& rgbImg) {
int squaresSize = clusterBoundaries.size();
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "squares %i",
squaresSize);
int n = 4;
for (int i = 0; i < squaresSize; i++) {
const Point* p = &clusterBoundaries[i][0];
Point center = clusterCenters[i];
polylines(rgbImg, &p, &n, 1, true, Scalar(0, 255, 0, 255), 3, CV_AA);
circle(rgbImg, center, 10, Scalar(0, 255, 0, 255));
}
}
// calc time helper
double diffclock(clock_t clock1, clock_t clock2) {
double diffticks = clock1 - clock2;
double diffms = (diffticks * 1000) / CLOCKS_PER_SEC;
return diffms;
}
JNIEXPORT jint JNICALL Java_com_example_myfirstapp_RegisterMarkerMain_findMarkersNative(
JNIEnv* env, jobject, jlong addrRgba) {
//clock_t begin = clock();
Mat& mRgb = *(Mat*) addrRgba;
Mat mgray(mRgb.rows, mRgb.cols, CV_8UC1);
cvtColor(mRgb, mgray, CV_RGBA2GRAY, 1); // the working one
clearVectors();
findSquares(mgray);
mergeRectangles();
processFilteredSquares(mgray);
drawFilteredSquaresWithoutMarker(mRgb);
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Candidates %i",candidates.size());
return clusterBoundaries.size();
// clock_t end = clock();
// mgray.release();
}
JNIEXPORT void JNICALL Java_com_example_myfirstapp_RegisterMarkerMain_loadCand(
JNIEnv* env, jobject, jlong addrRgba, jlong descriptorAdd, jint i) {
vector<KeyPoint> keyPoints;
Mat nativeM = candidates[i];
Mat& mRgb = *(Mat*) addrRgba;
Mat& descriptor = *(Mat*) descriptorAdd;
nativeM.copyTo(mRgb);
Mat descriptorUnFiltered;
detector.detect(nativeM, keyPoints);
if(keyPoints.size()==0)
return;
extractor.compute(nativeM, keyPoints, descriptorUnFiltered);
vector<vector<DMatch> > matches;
if(descriptorUnFiltered.rows==0)
return;
matcher.radiusMatch(descriptorUnFiltered, descriptorUnFiltered, matches,
50);
descriptor = descriptorUnFiltered.row(0);
std::vector<DMatch> mat;
for (int j = 1; j < matches.size(); j++) {
mat = matches[j];
// if no matches neglect
if (mat.size() >= 2) {
DMatch m = mat[1];
if (m.trainIdx < m.queryIdx)
continue;
else
vconcat(descriptor, descriptorUnFiltered.row(m.queryIdx),
descriptor);
} else {
DMatch m0 = mat[0];
vconcat(descriptor, descriptorUnFiltered.row(m0.queryIdx),
descriptor);
}
}
}
}
That's where I call loadLibrary in the activity
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
Log.i("loading libs", "OpenCV loading status " + status);
switch (status) {
case LoaderCallbackInterface.SUCCESS: {
Log.i("loading libs", "OpenCV loaded successfully");
// Load native library after(!) OpenCV initialization
System.loadLibrary("native_sample");
}
break;
default: {
super.onManagerConnected(status);
}
break;
}
}
};
Any help would be really appreciated.
The UnsatisfiedLinkError is thrown when an application attempts to load a native library like .so in Linux, .dll on Windows or .dylib in Mac and that library does not exist.
I threw this the console:
java.lang.UnsatisfiedLinkError: dalvik.system.PathClassLoader [DexPathList [[zip file "/system/framework/org.apache.http.legacy.boot.jar", zip file "/data/app/com.imaniac.myo- QS9EJbxzOjKhre3FebKwoA == / base.apk "], nativeLibraryDirectories = [/ data / app / com.imaniac.myo-QS9EJbxzOjKhre3FebKwoA == / lib / arm64, / system / lib64]]] could not find" libgesture-classifier.so "
at java.lang.Runtime.loadLibrary0 (Runtime.java:1012)
at java.lang.System.loadLibrary (System.java:1669)
Well, it worked for me by adding [this] [1] in projectfolder \ src \ main
[1]: https://mega.nz/#!HsVijIxa!CLbeM1BhpEd5sUrErFglP7R8BaHPKaYTG3CkCkaoXpk
try to add that library to the path I said earlier (projectfolder \ src \ main)
static void setIntField(JNIEnv* env, jobject obj, const char* path,
jfieldID fieldID) {
const int SIZE = 128;
char buf[SIZE] = "\0";
jint value = 0;
if (readFromFile(path, buf, SIZE) > 0) {
value = atoi(buf);
}
env->SetIntField(obj, fieldID, value);
}
static int readFromFile(const char* path, char* buf, size_t size) {
if (!path)
return -1;
int fd = open(path, O_RDONLY, 0);
if (fd == -1) {
LOGE("Could not open '%s'", path);
return -1;
}
size_t count = read(fd, buf, size);
if (count > 0) {
count = (count < size) ? count : size - 1;
while (count > 0 && buf[count - 1] == '\n')
count--;
buf[count] = '\0';
} else {
buf[0] = '\0';
}
close(fd);
return count;
}
This is my Android JNI code. Why do I read the same path, but the value is not the same twice. I am reading this path /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq.
How to fix it?
scaling_min_freq value is 51000. The first time I get 51000. The second time I get 150000.