JNI UnsatisfiedLinkError - Android - android

I'm a beginner in jni and I'm trying to load a library but I keep getting UnSatisfiedLinkError in the Log. I've checked multiple times all of my files but still the same error.
Android.mk
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := native_sample
LOCAL_SRC_FILES := myFirstApp.cpp
LOCAL_LDLIBS += -llog -ldl
include $(BUILD_SHARED_LIBRARY)
myFirstApp.cpp
#include <jni.h>
#include <opencv2/core/core.hpp>
#include "opencv2/highgui/highgui.hpp"
#include <vector>
#include <math.h>
#include <android/log.h>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/legacy/legacy.hpp>
#include "opencv2/video/tracking.hpp"
#include <time.h>
#include <math.h>
#include <string.h>
#define APPNAME "myFirstApp"
using namespace std;
using namespace cv;
extern "C" {
static int numTemplate = 24;
static int tWidth = 256;
static int tHight = 256;
static vector<Mat> tmplts;
static vector<vector<KeyPoint> > keyPointsTmplts;
static vector<Mat> descriptorsTmplts;
static vector<Mat> trainDescriptors;
// find squares vars
static vector<vector<Point> > squares;
static vector<Point2f> squaresCenters;
static vector<int> squaresAbsAreas;
static vector<int> clustersAreas;
static double scaleFactor = 1.5;
static double MARKER_RATIO = 0.03;
//clustering vars
static vector<Point2f> clusterCenters;
static vector<vector<Point> > clusterBoundaries;
static int CLUSTERTHRESHOLD = 25;
//tracking variables
static Mat prevFrame;
static vector<Point2f> oldPoints;
static TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);
static Size winSize(31, 31);
static Size subPixWinSize(10, 10);
//Recognition
static std::vector<int> matchingResults;
static vector<Mat> hos;
static Mat warpedImg;
static Mat cropped;
static vector<vector<KeyPoint> > candidatesKeypoints;
vector<Mat> candidates;
// DETECTION
static FastFeatureDetector detector(16);
//static int MAX_KEYPOINTS_COUNT=100;
//static GoodFeaturesToTrackDetector detector(MAX_KEYPOINTS_COUNT, 0.01, 10, 3, false, 0.04);
// DESCRIPTOR
static FREAK extractor;
// MATCHER
static BFMatcher matcher(NORM_HAMMING, true);
double diffclock(clock_t clock1, clock_t clock2);
// for int arrays
static Mat points2Mat(const Point* p) {
Mat srcP(4, 2, CV_32FC1);
for (int i = 0; i < 4; i++) {
srcP.at<float>(i, 0) = p[i].x;
srcP.at<float>(i, 1) = p[i].y;
}
return srcP;
}
// for float arrays
static Mat points2MatF(const Point2f* p) {
Mat srcP(4, 2, CV_32FC1);
for (int i = 0; i < 4; i++) {
srcP.at<float>(i, 0) = p[i].x;
srcP.at<float>(i, 1) = p[i].y;
}
return srcP;
}
static Mat prepareWarpDstMat(const Point* p) {
Mat dstP = cvCreateMat(4, 2, CV_32FC1);
dstP.at<float>(0, 0) = p[0].x;
dstP.at<float>(0, 1) = p[0].y;
dstP.at<float>(1, 0) = p[0].x + tWidth;
dstP.at<float>(1, 1) = p[0].y;
dstP.at<float>(2, 0) = p[0].x + tWidth;
dstP.at<float>(2, 1) = p[0].y + tHight;
dstP.at<float>(3, 0) = p[0].x;
dstP.at<float>(3, 1) = p[0].y + tHight;
return dstP;
}
//-----------------------------Find Squares-------------------------------------------
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
static double angle(Point pt1, Point pt2, Point pt0) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1 * dx2 + dy1 * dy2)
/ sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2) + 1e-10);
}
static void clearVectors() {
// clear all vectors from data
squares.clear();
matchingResults.clear();
squaresCenters.clear();
squaresAbsAreas.clear();
clusterCenters.clear();
clusterBoundaries.clear();
clustersAreas.clear();
candidatesKeypoints.clear();
candidates.clear();
}
// the sequence is stored in the specified memory storage
static void findSquares(const Mat& grayImg) {
clock_t begin = clock();
Mat timg, gray, scaledImg;
resize(grayImg, scaledImg, Size(0, 0), 1 / scaleFactor, 1 / scaleFactor,
CV_INTER_CUBIC);
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "scaledImage %i %i",
scaledImg.cols,scaledImg.rows);
// Gaussian blurring better than pyr up and down
GaussianBlur(scaledImg, timg, Size(5, 5), 0, 0, BORDER_DEFAULT); // t11
vector<vector<Point> > contours;
// find squares in every color plane of the image
Canny(timg, gray, 50, 200, 5); //t3
dilate(gray, gray, Mat(), Point(-1, -1));
// find contours and store them all as a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for (int i = 0; i < contours.size(); i++) {
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx,
arcLength(Mat(contours[i]), true) * 0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
int imgArea = gray.cols * gray.rows;
int absArea = fabs(contourArea(Mat(approx)));
if (approx.size() == 4 && absArea > 1000 && isContourConvex(Mat(approx))
&& absArea < 0.8 * imgArea) {
double maxCosine = 0;
Point a, b, c, d;
for (int j = 2; j < 5; j++) {
// find the maximum cosine of the angle between joint edges
a = approx[j % 4];
b = approx[j - 2];
c = approx[j - 1];
double cosine = fabs(angle(a, b, c));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if (maxCosine < 0.3) {
// restore scaling
Point* p0 = (Point*) &approx[0];
Point* p1 = (Point*) &approx[1];
Point* p2 = (Point*) &approx[2];
Point* p3 = (Point*) &approx[3];
p0->x = p0->x * scaleFactor;
p0->y = p0->y * scaleFactor;
p1->x = p1->x * scaleFactor;
p1->y = p1->y * scaleFactor;
p2->x = p2->x * scaleFactor;
p2->y = p2->y * scaleFactor;
p3->x = p3->x * scaleFactor;
p3->y = p3->y * scaleFactor;
Point2f center = (*p0 + *p1 + *p2 + *p3) * (0.25);
// //__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "squareCenter %G,%G",center.x,center.y);
squares.push_back(approx);
squaresCenters.push_back(center);
squaresAbsAreas.push_back(absArea);
}
}
}
}
//--------------------------------Cluster Rectangles-------------------------------------
static void updateCluster(int pNum, int* clusters, int n) {
for (int i = 0; i < n; i++) {
if (clusters[pNum] != clusters[i]) {
Point2f p0 = (Point2f) squaresCenters[pNum];
Point2f p1 = (Point2f) squaresCenters[i];
// //__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "norm %G",
// norm(p0-p1));
if (norm(p0 - p1) < CLUSTERTHRESHOLD) {
clusters[i] = clusters[pNum];
updateCluster(i, clusters, n);
}
}
}
}
static int TRACKED_THRESHOLD = 100;
static bool inTrackingList(Point2f clusterCenter) {
// int tracklistSize = trackedMarkersCenters.size();
// Point2f trackedCenter;
// for (int i = 0; i < tracklistSize; i++) {
// trackedCenter = (Point2f) trackedMarkersCenters[i][0];
//// //__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "dist %G",
//// norm(clusterCenter - trackedCenter));
// if (norm(clusterCenter - trackedCenter) < TRACKED_THRESHOLD)
// return true;
// }
return false;
}
static void mergeRectangles() {
int n = squaresCenters.size();
int clusters[n];
int clusterCounter = 0;
for (int i = 0; i < n; i++)
clusters[i] = -1;
for (int i = 0; i < n; i++)
if (clusters[i] == -1) {
clusters[i] = clusterCounter;
clusterCounter++;
updateCluster(i, clusters, n);
}
//__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "clusters %i",
//clusterCounter);
int members, maxArea;
for (int i = 0; i < clusterCounter; i++) {
members = 0;
Point2f center(0, 0);
maxArea = -1;
vector<Point> maxSquare;
for (int j = 0; j < n; j++) {
if (clusters[j] == i) {
center += (Point2f) squaresCenters[j];
members++;
if (maxArea < (int) squaresAbsAreas[j]) {
maxArea = (int) squaresAbsAreas[j];
maxSquare = squares[j];
}
}
}
center *= (1.0 / members);
if (!inTrackingList(center)) {
clusterCenters.push_back(center);
clusterBoundaries.push_back(maxSquare);
clustersAreas.push_back(maxArea);
matchingResults.push_back(-3);
const Point* floPoin = &maxSquare[0];
Mat scene = points2Mat(floPoin);
}
}
}
//------------------------------Process Filtered squares--------------------------------------
static int imageArea;
// crop squares
static void cropAndWarpCandidate(Mat& grayImg, const Point* p, int i) {
//__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Area %i %i %G %G", i,
// clustersAreas[i], imageArea, clustersAreas[i] * 1.0 / imageArea);
//
// if (clustersAreas[i] < MARKER_RATIO * imageArea) {
//
// matchingResults[i] = -2;
// return;
// }
Mat srcPMat = points2Mat(p);
Mat dstPMat = prepareWarpDstMat(p);
Mat ho = findHomography(srcPMat, dstPMat, 0);
warpPerspective(grayImg, warpedImg, ho,
Size(grayImg.cols + tWidth, grayImg.rows + tHight));
cropped = Mat(warpedImg, Rect(p[0].x, p[0].y, tWidth, tHight));
// int templateIndex = matchCandidate(cropped);
//
// matchingResults[i] = templateIndex;
candidates.push_back(cropped);
srcPMat.release();
dstPMat.release();
warpedImg.release();
cropped.release();
}
// the function draws all the squares in the image
static void processFilteredSquares(Mat& grayImg) {
imageArea = grayImg.cols * grayImg.rows;
int squaresSize = clusterBoundaries.size();
//__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "filtered %d",
// squaresSize);
for (int i = 0; i < squaresSize; i++) {
const Point* p = &clusterBoundaries[i][0];
cropAndWarpCandidate(grayImg, p, i);
}
}
//------------------Drawing ---------------------------------------
static void drawFilteredSquaresWithoutMarker(Mat& rgbImg) {
int squaresSize = clusterBoundaries.size();
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "squares %i",
squaresSize);
int n = 4;
for (int i = 0; i < squaresSize; i++) {
const Point* p = &clusterBoundaries[i][0];
Point center = clusterCenters[i];
polylines(rgbImg, &p, &n, 1, true, Scalar(0, 255, 0, 255), 3, CV_AA);
circle(rgbImg, center, 10, Scalar(0, 255, 0, 255));
}
}
// calc time helper
double diffclock(clock_t clock1, clock_t clock2) {
double diffticks = clock1 - clock2;
double diffms = (diffticks * 1000) / CLOCKS_PER_SEC;
return diffms;
}
JNIEXPORT jint JNICALL Java_com_example_myfirstapp_RegisterMarkerMain_findMarkersNative(
JNIEnv* env, jobject, jlong addrRgba) {
//clock_t begin = clock();
Mat& mRgb = *(Mat*) addrRgba;
Mat mgray(mRgb.rows, mRgb.cols, CV_8UC1);
cvtColor(mRgb, mgray, CV_RGBA2GRAY, 1); // the working one
clearVectors();
findSquares(mgray);
mergeRectangles();
processFilteredSquares(mgray);
drawFilteredSquaresWithoutMarker(mRgb);
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Candidates %i",candidates.size());
return clusterBoundaries.size();
// clock_t end = clock();
// mgray.release();
}
JNIEXPORT void JNICALL Java_com_example_myfirstapp_RegisterMarkerMain_loadCand(
JNIEnv* env, jobject, jlong addrRgba, jlong descriptorAdd, jint i) {
vector<KeyPoint> keyPoints;
Mat nativeM = candidates[i];
Mat& mRgb = *(Mat*) addrRgba;
Mat& descriptor = *(Mat*) descriptorAdd;
nativeM.copyTo(mRgb);
Mat descriptorUnFiltered;
detector.detect(nativeM, keyPoints);
if(keyPoints.size()==0)
return;
extractor.compute(nativeM, keyPoints, descriptorUnFiltered);
vector<vector<DMatch> > matches;
if(descriptorUnFiltered.rows==0)
return;
matcher.radiusMatch(descriptorUnFiltered, descriptorUnFiltered, matches,
50);
descriptor = descriptorUnFiltered.row(0);
std::vector<DMatch> mat;
for (int j = 1; j < matches.size(); j++) {
mat = matches[j];
// if no matches neglect
if (mat.size() >= 2) {
DMatch m = mat[1];
if (m.trainIdx < m.queryIdx)
continue;
else
vconcat(descriptor, descriptorUnFiltered.row(m.queryIdx),
descriptor);
} else {
DMatch m0 = mat[0];
vconcat(descriptor, descriptorUnFiltered.row(m0.queryIdx),
descriptor);
}
}
}
}
That's where I call loadLibrary in the activity
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
Log.i("loading libs", "OpenCV loading status " + status);
switch (status) {
case LoaderCallbackInterface.SUCCESS: {
Log.i("loading libs", "OpenCV loaded successfully");
// Load native library after(!) OpenCV initialization
System.loadLibrary("native_sample");
}
break;
default: {
super.onManagerConnected(status);
}
break;
}
}
};
Any help would be really appreciated.

The UnsatisfiedLinkError is thrown when an application attempts to load a native library like .so in Linux, .dll on Windows or .dylib in Mac and that library does not exist.
I threw this the console:
  java.lang.UnsatisfiedLinkError: dalvik.system.PathClassLoader [DexPathList [[zip file "/system/framework/org.apache.http.legacy.boot.jar", zip file "/data/app/com.imaniac.myo- QS9EJbxzOjKhre3FebKwoA == / base.apk "], nativeLibraryDirectories = [/ data / app / com.imaniac.myo-QS9EJbxzOjKhre3FebKwoA == / lib / arm64, / system / lib64]]] could not find" libgesture-classifier.so "
         at java.lang.Runtime.loadLibrary0 (Runtime.java:1012)
         at java.lang.System.loadLibrary (System.java:1669)
Well, it worked for me by adding [this] [1] in projectfolder \ src \ main
   [1]: https://mega.nz/#!HsVijIxa!CLbeM1BhpEd5sUrErFglP7R8BaHPKaYTG3CkCkaoXpk
try to add that library to the path I said earlier (projectfolder \ src \ main)

Related

Can't passing Mat object in Opencv jni android studio

I'm working on my project which is about Document Shadow removal in android application. I try to modify the c++ algorithm to android by using button to process the input image to remove a shadow but There still some error and I have no ideal how to solve because I'm not good at coding here is the code of my project
MainActivity.java
public class MainActivity extends AppCompatActivity {
// Used to load the 'native-lib' library on application startup.
static {
System.loadLibrary("native-lib");
System.loadLibrary("opencv_java3");
}
public static String TAG ="MainActivity";
ImageView imageView;
Button loadImage;
Button process;
private int REQUEST_CODE = 1;
Mat image = new Mat();
Bitmap bitmap;
Bitmap bmp;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
imageView = (ImageView) findViewById(R.id.imageView);
loadImage = (Button) findViewById(R.id.button);
process = (Button) findViewById(R.id.button2);
loadImage.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View view) {
Intent intent = new Intent();
intent.setType("image/");
intent.setAction(Intent.ACTION_GET_CONTENT);
startActivityForResult(Intent.createChooser(intent,"Select Image"),REQUEST_CODE);
}
});
process.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View view) {
image = new Mat (bitmap.getWidth(), bitmap.getHeight(), CvType.CV_8UC1);
Utils.bitmapToMat(bitmap, image);
Native.removeShadow(image.getNativeObjAddr());
Utils.matToBitmap(image,bitmap);
imageView.setImageBitmap(bitmap);
}
});
}
#Override
protected void onActivityResult(int request_code, int result_code, Intent data){
super.onActivityResult(request_code,result_code,data);
if(request_code == REQUEST_CODE && result_code == RESULT_OK && data != null && data.getData() != null ){
Uri uri = data.getData();
try{
bitmap = MediaStore.Images.Media.getBitmap(getContentResolver(), uri);
imageView.setImageBitmap(bitmap);
}catch (IOException e){
e.printStackTrace();
}
}
}
}
Nativa.java
public class Native {
public native static void removeShadow (long addrRgba);
}
shadowRemove.h
#include "jni.h"
#include <stdio.h>
#include <iostream>
#include <omp.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/ml/ml.hpp>
#include <opencv2/photo/photo.hpp>
#include <cmath>
#include <random>
#ifndef SHADOWDOCTEST_SHADOWREMOVE_H
#define SHADOWDOCTEST_SHADOWREMOVE_H
#define BUFFER_SIZE 1000
#define USE_SAMPLING 1
using namespace cv;
using namespace std;
using namespace cv::ml;
extern "C" {
JNIEXPORT void JNICALL Java_com_example_shadowdoctest_Native_removeShadow(JNIEnv *, jclass, jlong);
void removeShadow(Mat& img);
// Returns the block centered around (x,y).
bool GetBlock(int x, int y, Mat &block, Mat &dsImage);
void ClusterBlock(Mat &block, Mat &clusterMu, int *randInd);
// Identifies the local cluster belonging to the background and saves it in the shadow map
// Currently uses max cluster mean.
void CalculatePaperStatistics(int x, int y, Mat &clusterMu);
// Finds the block that best represents the background region. Used for constructing the
// shadow map (gain map)
void FindReferenceIndex(int &refInd, Mat &dsImage, Vec3f &ref);
void NormalizeShadowMap(int refIndex, Vec3f &ref);
void UpsampleShadowMap();
void ApplyShadowMap();
// Converts x and y index to access downsampled images (xhat = x and
// yhat = y when stride is 1)
void ConvertIndex(int x, int y, int &xHat, int &yHat);
// Input image, shadow map (gain map), and mask of image regions
Mat *image;
Mat *shadowMap;
// Full width, height, and number of channels
int width;
int height;
int channels;
// Number of pixels to skip when performing local analysis
int stride;
// Size of overlapping blocks in local analysis
int blockSize;
// Number of clusters used for local analysis (i.e., 2)
int numOfClusters;
// Number of clusters used for global analysis (i.e., 3)
int numOfClustersRef;
// Maximum number of iterations and epsilon threshold used as stopping condition for GMM clustering
int maxIters;
float emEps;
// Amount of downsampling to be used on the original image (for speedup)
float dsFactor;
// Number of local and global samples in the block and image, respectively (Default is 150 and 1000)
int numOfLocalSamples;
int numOfGlobalSamples;
}
#endif //SHADOWDOCTEST_SHADOWREMOVE_H
shadowRemove.cpp
#include "shadowRemove.h"
#include <cstdlib>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <cstring>
JNIEXPORT void JNICALL Java_com_example_shadowdoctest_Native_removeShadow(JNIEnv *, jclass, jlong addrRgba){
Mat& image = *(Mat*)addrRgba;
removeShadow(image);
}
void removeShadow(Mat& img){
image = new Mat(img);
// image.convertTo(image, CV_32FC3);
width = image->cols;
height = image->rows;
channels = image->channels();
stride = 20; // Number of pixels to skip when performing local analysis
blockSize = 21; // Size of overlapping blocks in local analysis
numOfClusters = 3; // Number of clusters used for local analysis
numOfClustersRef = 3; // Number of clusters used for global analysis
maxIters = 100; // Maximum number of iterations used as stopping condition for GMM clustering.
emEps = 0.1f; // Epsilon threshold used as stopping condition for GMM clustering.
dsFactor = 1.0f; // No downsampling is done
numOfLocalSamples = 150; // Number of samples to take in each block (for local statistics)
numOfGlobalSamples = 1000; // Number of samples to take throughout entire image (for global statistics)
int sHeight, sWidth;
shadowMap = new Mat(image->cols, image->rows, CV_32FC3, CV_RGB(-1, -1, -1));//CV_32FC3 is a three channel matrix of 32-bit floats
resize(*shadowMap, *shadowMap, Size(0, 0), dsFactor, dsFactor, INTER_LANCZOS4);
ConvertIndex(shadowMap->cols, shadowMap->rows, sWidth, sHeight);
resize(*shadowMap, *shadowMap, Size(sWidth, sHeight));
int threadCount = omp_get_max_threads();
Mat* blockList = new Mat[threadCount];
for (int i = 0; i < threadCount; i++) {
blockList[i] = Mat(height, width, CV_32FC3, CV_RGB(0, 0, 0));
}
Mat dsMask = Mat(shadowMap->rows, shadowMap->cols, CV_8UC1, Scalar(0));
int* randInd = new int[numOfLocalSamples];
int size = blockSize * blockSize; // TODO: use size_t
vector<int> freeIndexes;
for (int i = 0; i < size; i++) {
freeIndexes.push_back(i);
}
int count = 0;
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(0, size);
while (count < numOfLocalSamples) {
int indexCandidate = distribution(generator);
vector<int>::iterator it = std::find(freeIndexes.begin(), freeIndexes.end(), indexCandidate);
if (it != freeIndexes.end()) {
randInd[count] = indexCandidate;
freeIndexes.erase(it);
count++;
}
}
Mat dsImage;
resize(*image, dsImage, Size(0, 0), dsFactor, dsFactor, INTER_NEAREST);
width = dsImage.cols;
height = dsImage.rows;
#pragma omp parallel
{
#pragma omp for schedule(dynamic) nowait
for (int i = 0; i < height; i += stride) {
for (int j = 0; j < width; j += stride) {
// Get current block
int threadNum = omp_get_thread_num();
Mat& curBlock = blockList[threadNum];
if (GetBlock(j, i, curBlock, dsImage)) {
// Cluster pixel intensities
Mat curMu;
vector<Mat> listOfCovs;
ClusterBlock(curBlock, curMu, randInd);
// Find paper mu of current block and update global matrix
CalculatePaperStatistics(j, i, curMu);
}
}
}
}
delete[] randInd;
delete[] blockList;
int refIndex = -1;
Vec3f ref;
FindReferenceIndex(refIndex, dsImage, ref);
width = image->cols;
height = image->rows;
medianBlur(*shadowMap, *shadowMap, 3);
GaussianBlur(*shadowMap, *shadowMap, Size(3, 3), 2.5f);
Mat dsShadowMap = *shadowMap;
UpsampleShadowMap();
NormalizeShadowMap(refIndex, ref);
ApplyShadowMap();
}
bool GetBlock (int x, int y, Mat& block, Mat& dsImage) {
int halfBlock = (int) floorf(float(blockSize) / 2.0f);
int minX = max(0, x - halfBlock);
int maxX = min(width - 1, x + halfBlock);
int minY = max(0, y - halfBlock);
int maxY = min(height - 1, y + halfBlock);
int deltaY = maxY - minY + 1;
int deltaX = maxX - minX + 1;
if (block.rows != deltaY || block.cols != deltaX) {
block = Mat(deltaY, deltaX, CV_32FC3, CV_RGB(0, 0, 0));
}
// Copy intensities to block
int bX = 0;
int bY = 0;
for (int i = minY; i <= maxY; i++) {
for (int j = minX; j <= maxX; j++) {
for (int k = 0; k < channels; k++) {
block.at<Vec3f>(bY, bX)[k] = dsImage.at<Vec3f>(i, j)[k];
}
bX++;
}
bX = 0;
bY++;
}
return true;
}
void ClusterBlock (Mat& block, Mat& clusterMu, int* randInd) {
// Set up expectation maximization model
Ptr<EM> emModel = EM::create();
emModel->setClustersNumber(numOfClusters);
emModel->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL);
emModel->setTermCriteria(TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, maxIters, emEps));
// Cluster block with k means initializer
Mat samples;
if (block.rows * block.cols == blockSize * blockSize) {
Mat tmp(numOfLocalSamples, 1, CV_32FC3, CV_RGB(-1, -1, -1));
for (int i = 0; i < numOfLocalSamples; i++) {
assert(randInd[i] >= 0 && randInd[i] < block.rows * block.cols);
tmp.at<Vec3f>(i) = block.at<Vec3f>(randInd[i]);
}
samples = tmp.reshape(1);
}
else {
samples = block.reshape(0, block.rows * block.cols);
samples = samples.reshape(1);
}
emModel->trainEM(samples);
clusterMu = emModel->getMeans();
clusterMu = clusterMu.reshape(channels);
clusterMu.convertTo(clusterMu, CV_32FC3);
}
void CalculatePaperStatistics(int x, int y, Mat& clusterMu) {
int sX, sY;
ConvertIndex(x, y, sX, sY);
Vec3f& shadowVec = shadowMap->at<Vec3f>(sY, sX);
double maxSum = 0;
for (int i = 0; i < numOfClusters; i++) {
double muSum = 0;
for (int k = 0; k < channels; k++) {
muSum += clusterMu.at<Vec3f>(i)[k];
}
if (muSum > maxSum) {
maxSum = muSum;
for (int k = 0; k < channels; k++) {
shadowVec[k] = clusterMu.at<Vec3f>(i)[k];
}
}
}
}
void FindReferenceIndex(int& refIndex, Mat& dsImage, Vec3f& ref) {
// Set up expectation maximization model
Ptr<EM> emModel = EM::create();
emModel->setClustersNumber(numOfClustersRef);
emModel->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL);
emModel->setTermCriteria(TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, maxIters, emEps));
// Cluster block with k means initializer
Mat samples;
#if USE_SAMPLING
Mat tmp(numOfGlobalSamples, 1, CV_32FC3, CV_RGB(-1, -1, -1));
int* randInd = new int[numOfGlobalSamples];
int size = width * height; // TODO: Use size_t
vector<int> freeIndexes;
for (int i = 0; i < size; i++) {
freeIndexes.push_back(i);
}
int count = 0;
int maxIndexCandidiate = -1;
int delta = size / numOfGlobalSamples;
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(0, size);
while (count < numOfGlobalSamples) {
int indexCandidate = distribution(generator);
vector<int>::iterator it = std::find(freeIndexes.begin(), freeIndexes.end(), indexCandidate);
if (it != freeIndexes.end()) {
randInd[count] = indexCandidate;
freeIndexes.erase(it);
count++;
}
}
for (int i = 0; i < numOfGlobalSamples; i++) {
tmp.at<Vec3f>(i) = image->at<Vec3f>(randInd[i]);
}
delete[] randInd;
samples = tmp.reshape(1);
#else
samples = dsImage.reshape(0, width * height);
samples = samples.reshape(1);
#endif
emModel->trainEM(samples);
// Get the cluster means
Mat clusterMu = emModel->getMeans();
clusterMu = clusterMu.reshape(channels);
clusterMu.convertTo(clusterMu, CV_32FC3);
// Get cluster variances
int maxInd = -1;
double curMax = -1;
for (int i = 0; i < numOfClustersRef; i++) {
double muMag = 0;
for (int k = 0; k < channels; k++) {
muMag += clusterMu.at<Vec3f>(i)[k];
}
if (muMag > curMax) {
curMax = muMag;
maxInd = i;
}
}
assert(maxInd != -1 && maxInd < numOfClustersRef);
// Find the closest actual value to the cluster to choose as reference
// TODO: stop earlier once threshold is met?
ref = clusterMu.at<Vec3f>(maxInd);
float curMin = std::numeric_limits<float>::max();
refIndex = -1;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
Vec3f curVal = dsImage.at<Vec3f>(i, j);
float curMag = 0;
for (int k = 0; k < channels; k++) {
float diff = curVal[k] - ref[k];
curMag += diff * diff;
}
if (curMag < curMin) {
curMin = curMag;
refIndex = j + i * width;
}
}
}
}
void UpsampleShadowMap() {
resize(*shadowMap, *shadowMap, Size(width, height), 0, 0, INTER_LANCZOS4);
}
void NormalizeShadowMap(int refIndex, Vec3f& ref) {
assert(shadowMap->rows == height && shadowMap->cols == width);
assert(refIndex >= 0 && refIndex < width * height);
ref = shadowMap->at<Vec3f>(refIndex);
// Divide each local paper intensity by the global reference
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
Vec3f& curShadowVec = shadowMap->at<Vec3f>(i, j);
for (int k = 0; k < channels; k++) {
curShadowVec[k] /= ref[k];
// Clamp negative and zero values to a small number
if (curShadowVec[k] <= 0) {
curShadowVec[k] = 1.0e-6f;
}
}
}
}
}
void ApplyShadowMap() {
// Loop through all the pixels and divide by inverse gain
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
Vec3f invGain = shadowMap->at<Vec3f>(i, j);
Vec3f& color = image->at<Vec3f>(i, j);
for (int k = 0; k < channels; k++) {
color[k] /= invGain[k];
}
}
}
}
void ConvertIndex(int x, int y, int& xHat, int& yHat) {
// Convert from original resolution to downsampled size (downsampled based on stride)
xHat = (int)floor((x - 1) / float(stride)) + 1;
yHat = (int)floor((y - 1) / float(stride)) + 1;
}
when I click the button to process an images the application will be force close, and the Logcat will show like this
Logcat
I'm also already setup my opencv in java and ndk
Here is original sorcecode of Removing Shadow in doucment images

NDK Code crashing in few HTC lollipop devices and few other devices by giving Fatal signal 11 (SIGSEGV), code 2

Below code is used to scale the image border by dragging the edges. It works fine in Moto X, nexus 5x, few other devices and emulators. But it crashes on HTC ONE(Lollipop 5.0.2) and few other devices too. I think this is due to heavy graphics processing. Please some help me in providing the fix.
This is the C Code.
#include <stdio.h>
#include <stdlib.h>
#include <jni.h>
#include <math.h>
#include <malloc.h>
#include <android/log.h>
#include <assert.h>
#include <android/bitmap.h>
//#include <asm/io.h>
#define LOG_TAG "System.out"
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__)
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, LOG_TAG, __VA_ARGS__)
int *colorRed;
int *colorGreen;
int *colorBlue;
int *colorsP;
int *colorAlpha;
int _width,_height;
jintArray imagecolors;
static long SinXDivX_Table_8[(2<<8)+1];
double MySin(double x){
double absx;
absx = x;
if(x<0) absx = -x;
double x2 = absx*absx;
double x3 = x2*absx;
if(absx<1) return (1-2*x2+x3);
else if(absx<2) return (4-8*absx+5*x2-x3);
else return 0;
}
int getAlpha(int i, int j) {
return (*(colorsP+j*_width+i) >> 24)&0xFF;
}
int getRedColor(int i,int j){
//return *(colorRed+j*_width+i);
return (*(colorsP+j*_width+i) >> 16)&0xFF;
}
int getGreenColor(int i,int j){
//return *(colorGreen+j*_width+i);
return (*(colorsP+j*_width+i) >> 8)&0xFF;
}
int getBlueColor(int i,int j){
//return *(colorBlue+j*_width+i);
return (*(colorsP+j*_width+i))&0xFF;
}
/**
* �õ�x*x+y*y��ֵ
* #param x
* #param y
* #return
*/
double hypotsq(double x,double y){
return x*x+y*y;
}
void mapping(JNIEnv *env,jintArray image,int x,int y,double max_dist,
double orig_x,double orig_y,double cur_x,double cur_y){
// if(mode == 120){
float u = 0,v = 0;
float fu = x,fv = y;
double dx = x - orig_x;
double dy = y - orig_y;
long i,j;
int i1,j1,_i,_j,i2,j2;
float du;
float dv;
int colorpix = 0;
int red,green,blue,alpha;
double max_dist_sq = max_dist*max_dist;
double mou_dx = cur_x - orig_x;
//����-------^_^
mou_dx = mou_dx/4;
//--------------
double mou_dy = cur_y - orig_y;
//����-------^_^
mou_dy = mou_dy/4;
//-----------
if(dx > -max_dist&&dx<max_dist&&dy>-max_dist&&dy<max_dist){
double rsq = hypotsq(dx, dy);
if(rsq < max_dist_sq){
//�
double msq = hypotsq(dx - mou_dx,dy - mou_dy);
double edge_dist = max_dist_sq - rsq;
double a = edge_dist/(edge_dist + msq);
a *=a;
fu -= a*mou_dx; //ӳ���ĸ�������
fv -= a*mou_dy; //ӳ���ĸ�������
u = fu;
v = fv;
{
u = u<(_width-1)?u:(_width-1);
v = v<(_height-1)?v:(_height-1);
u = u>0?u:0;
v = v>0?v:0;
}
// i = (int)u; //��ӦԴͼ��X������������
// j = (int)v; //��ӦԴͼ��Y������������
// du = u-i;//С���㲿��
// dv = v-j;//С���㲿��
long intu = (long)(u*(1 << 16)); //�Ŵ�
long intv = (long)(v*(1 << 16)); //�Ŵ�
i = intu >> 16; //Ȼ������С�õ���������
j = intv >> 16; //Ȼ������С�õ���������
long idu = (intu & 0xFFFF)>> 8; //�õ�С������
long idv = (intv & 0xFFFF)>> 8; //�õ�С������
long _idu = (256-idu); //�൱��û�Ŵ�ǰ��(1-du)֮���
long _idv = (256-idv);
i1 = (i+1)<(_width-1)?(i+1):(_width-1);//Math.min(i+1, width-1);
j1 = (j+1)<(_height-1)?(j+1):(_height-1);//Math.min(j+1, height-1);
//*******************************************˫���Բ�ֵ�㷨***************************************************//
alpha = (_idu*_idv*getAlpha(i, j)+_idu*idv*getAlpha(i, j1)+
idu*_idv*getAlpha(i1, j)+idu*idv*getAlpha(i1,j1))>>16;
red = (_idu*_idv*getRedColor(i, j)+_idu*idv*getRedColor(i, j1)+
idu*_idv*getRedColor(i1, j)+idu*idv*getRedColor(i1,j1))>>16;
green = (_idu*_idv*getGreenColor(i, j)+_idu*idv*getGreenColor(i, j1)+
idu*_idv*getGreenColor(i1, j)+idu*idv*getGreenColor(i1,j1))>>16;
blue = (_idu*_idv*getBlueColor(i, j)+_idu*idv*getBlueColor(i, j1)+
idu*_idv*getBlueColor(i1, j)+idu*idv*getBlueColor(i1,j1))>>16;
//*******************************************���ξ����ֵ�㷨****************************************************//
int red3, green3, blue3, alpha3;
i2 = (i+2)<(_width-1)?(i+2):(_width-1);
j2 = (j+2)<(_height-1)?(j+2):(_height-1);
_i = (i-1)>0?(i-1):0;
_j = (j-1)>0?(j-1):0;
long A[4] = {SinXDivX_Table_8[(1<<8)+idu],SinXDivX_Table_8[idu+0],SinXDivX_Table_8[(1<<8)-idu],SinXDivX_Table_8[(2<<8)-idu]};
long C[4] = {SinXDivX_Table_8[(1<<8)+idv],
SinXDivX_Table_8[idv+0],
SinXDivX_Table_8[(1<<8)-idv],
SinXDivX_Table_8[(2<<8)-idv]};
int alphaB[16] = {getAlpha(_i, _j), getAlpha(_i,j), getAlpha(_i,j1), getAlpha(_i,j2),
getAlpha(i,_j), getAlpha(i,j), getAlpha(i,j1), getAlpha(i,j2),
getAlpha(i1,_j), getAlpha(i1,j), getAlpha(i1,j1), getAlpha(i1,j2),
getAlpha(i2,_j), getAlpha(i2,j), getAlpha(i2,j1), getAlpha(i2,j2)};
double aA_B[4] = {(A[0]*alphaB[0]+A[1]*alphaB[4]+A[2]*alphaB[8]+A[3]*alphaB[12]),
(A[0]*alphaB[1]+A[1]*alphaB[5]+A[2]*alphaB[9]+A[3]*alphaB[13]),
(A[0]*alphaB[2]+A[1]*alphaB[6]+A[2]*alphaB[10]+A[3]*alphaB[14]),
(A[0]*alphaB[3]+A[1]*alphaB[7]+A[2]*alphaB[11]+A[3]*alphaB[15])};
alpha3 = (int)(aA_B[0]*C[0]+aA_B[1]*C[1]+aA_B[2]*C[2]+aA_B[3]*C[3])>>16;
//��ɫ
int redB[16] = {getRedColor(_i, _j), getRedColor(_i,j), getRedColor(_i,j1), getRedColor(_i,j2),
getRedColor(i,_j), getRedColor(i,j), getRedColor(i,j1), getRedColor(i,j2),
getRedColor(i1,_j), getRedColor(i1,j), getRedColor(i1,j1), getRedColor(i1,j2),
getRedColor(i2,_j), getRedColor(i2,j), getRedColor(i2,j1), getRedColor(i2,j2)};
double A_B[4] = {(A[0]*redB[0]+A[1]*redB[4]+A[2]*redB[8]+A[3]*redB[12]),
(A[0]*redB[1]+A[1]*redB[5]+A[2]*redB[9]+A[3]*redB[13]),
(A[0]*redB[2]+A[1]*redB[6]+A[2]*redB[10]+A[3]*redB[14]),
(A[0]*redB[3]+A[1]*redB[7]+A[2]*redB[11]+A[3]*redB[15])};
red3 = (int)(A_B[0]*C[0]+A_B[1]*C[1]+A_B[2]*C[2]+A_B[3]*C[3])>>16;
//��ɫ
int greenB[16] = {getGreenColor(_i, _j), getGreenColor(_i,j), getGreenColor(_i,j1), getGreenColor(_i,j2),
getGreenColor(i,_j), getGreenColor(i,j), getGreenColor(i,j1), getGreenColor(i,j2),
getGreenColor(i1,_j), getGreenColor(i1,j), getGreenColor(i1,j1), getGreenColor(i1,j2),
getGreenColor(i2,_j), getGreenColor(i2,j), getGreenColor(i2,j1), getGreenColor(i2,j2)};
double gA_B[4] = {(A[0]*greenB[0]+A[1]*greenB[4]+A[2]*greenB[8]+A[3]*greenB[12]),
(A[0]*greenB[1]+A[1]*greenB[5]+A[2]*greenB[9]+A[3]*greenB[13]),
(A[0]*greenB[2]+A[1]*greenB[6]+A[2]*greenB[10]+A[3]*greenB[14]),
(A[0]*greenB[3]+A[1]*greenB[7]+A[2]*greenB[11]+A[3]*greenB[15])};
green3 = (int)(gA_B[0]*C[0]+gA_B[1]*C[1]+gA_B[2]*C[2]+gA_B[3]*C[3])>>16;
//��ɫ
int blueB[16] = {getBlueColor(_i, _j), getBlueColor(_i,j), getBlueColor(_i,j1), getBlueColor(_i,j2),
getBlueColor(i,_j), getBlueColor(i,j), getBlueColor(i,j1), getBlueColor(i,j2),
getBlueColor(i1,_j), getBlueColor(i1,j), getBlueColor(i1,j1), getBlueColor(i1,j2),
getBlueColor(i2,_j), getBlueColor(i2,j), getBlueColor(i2,j1), getBlueColor(i2,j2)};
double bA_B[4] = {(A[0]*blueB[0]+A[1]*blueB[4]+A[2]*blueB[8]+A[3]*blueB[12]),
(A[0]*blueB[1]+A[1]*blueB[5]+A[2]*blueB[9]+A[3]*blueB[13]),
(A[0]*blueB[2]+A[1]*blueB[6]+A[2]*blueB[10]+A[3]*blueB[14]),
(A[0]*blueB[3]+A[1]*blueB[7]+A[2]*blueB[11]+A[3]*blueB[15])};
blue3 = (int)(bA_B[0]*C[0]+bA_B[1]*C[1]+bA_B[2]*C[2]+bA_B[3]*C[3])>>16;
//**************************************************************************************************************//
//**************************************************************************************************************//
//Ϊ�˵õ������Ⱥ���������������������һ���жϣ�������ڻ�С��ij��ֵ����˫�����㷨������ֵ�����������ξ���㷨�õ���ֵ
if(alpha3<0||red3<0||green3<0||blue3<0||alpha3>255||red3>255||green3>255||blue3>255){
colorpix = (alpha << 24)|(red << 16)|(green << 8)|blue;
}else{
colorpix = (alpha3 << 24)|(red3 << 16)|(green3 << 8)|blue3;
}
//LOGI("c---> ca = %d",);
(*env)->SetIntArrayRegion(env,image,(int) (y*_width + x),1,&colorpix);
}
}
}
JNIEXPORT jintArray JNICALL Java_com_test_mypicture_ui_WarpView_warpPhotoFromC
(JNIEnv * env, jobject obj, jintArray imagearr, jint height, jint width, jdouble r,
jdouble orig_x, jdouble orig_y, jdouble cur_x, jdouble cur_y){
//��ʼ���ɫ����
int len = (*env)->GetArrayLength(env,imagearr);
int* color = (*env)->GetIntArrayElements(env,imagearr,0);
int colors[len];
int colorsOr[len];
_width = width;
_height = height;
int i = 0;
for(;i<len;i++){
int colorpix = *(color+i);//��ǰ����ֵ
colors[i] = colorpix;
}
colorsP = &colors[0];
//Ť��ͼƬ��ӳ��
int or_x = (orig_x-r)>0?(orig_x-r):0;//(int) Math.max((orig_x-r), 0);
int or_y = (orig_y-r)>0?(orig_y-r):0;//(int) Math.max((orig_y-r), 0);
int max_x = (orig_x+r)<width?(orig_x+r):width;//(int) Math.min((orig_x+r), width);
int max_y = (orig_y+r)<height?(orig_y+r):height;//(int) Math.min((orig_y+r), height);
int m = or_y;
for(;m<max_y;m++){
int n = or_x;
for(;n<max_x;n++){
mapping(env,imagearr,n, m,r,orig_x,orig_y,cur_x,cur_y);
}
}
return imagecolors;
}
//��ʼ�����ұ�
JNIEXPORT jint JNICALL Java_com_test_mypicture_MainActivity_initArray
(JNIEnv * env, jobject obj){
long k;
for (k = 0;k<=(2<<8);++k){
SinXDivX_Table_8[k]=(long)(0.5+256*MySin(k*(1.0/(256))))*1;
}
return 0;
}
This is Java Code from where i invoke the C method.
package com.test.mypicture.ui;
import com.test.mypicture.R;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.graphics.Paint;
import android.graphics.RectF;
import android.util.AttributeSet;
import android.view.MotionEvent;
import android.view.View;
public class WarpView extends View {
static{
System.loadLibrary("Warp");
}
public native int[] warpPhotoFromC(int[] image,int height,int width,double max_dist,
double orig_x,double orig_y,double cur_x,double cur_y);
private Bitmap mBmp; // original bitmap
private Bitmap newBmp; // ARGB_8888 bitmap
private int[] image; // array of pixels
private int[] colorR;
private int[] colorG;
private int[] colorB;
private boolean fg = true;
private static final int DEFAULT_PAINT_FLAGS =
Paint.FILTER_BITMAP_FLAG | Paint.DITHER_FLAG;
Paint mPaint = new Paint(DEFAULT_PAINT_FLAGS);
public static int HWPPQ = 110;
public static int MMQFJ = 120;
private int MODE = MMQFJ;
private double orig_x, orig_y;
private double mou_dx,mou_dy;
private double max_dist;
private int width; // image width
private int height; // image height
private float scale;
private RectF dest;
private double move_x,move_y;
private int dist = (int) getResources().getDimension(R.dimen.max_dist);
public WarpView(Context context) {
super(context);
}
public WarpView(Context context, AttributeSet attrs) {
super(context, attrs);
//setLayerType(View.LAYER_TYPE_SOFTWARE, null);
dest = new RectF(0,0,0,0);
}
#Override
protected void onDraw(Canvas canvas) {
//super.onDraw(canvas);
if(fg){
int viewWidth = getWidth();
int viewHeight = getHeight();
float scale1 = (float)width/(float)viewWidth;
float scale2 = (float)height/(float)viewHeight;
scale = scale1>scale2?scale1:scale2;
float wscale = width / scale;
float hscale = height / scale;
int xoffset = (viewWidth-(int)wscale)/2;
int yoffset = (viewHeight-(int)hscale)/2;
dest.set(xoffset, yoffset, (int)wscale+xoffset, (int)hscale+yoffset);// = new RectF(xoffset, yoffset, (int) (width/scale)+xoffset, (int) (height/scale)+yoffset);
canvas.drawBitmap(mBmp, null, dest, mPaint);
}else{
canvas.drawBitmap(newBmp,null, dest, mPaint);
}
}
#Override
public boolean onTouchEvent(MotionEvent event) {
switch(event.getAction()){
case MotionEvent.ACTION_DOWN:
orig_x = event.getX();
orig_y = event.getY();
orig_x = (orig_x-dest.left)*scale;
orig_y = (orig_y-dest.top)*scale;
break;
case MotionEvent.ACTION_MOVE:
max_dist = dist*scale;//Math.hypot(mou_dx, mou_dy);
if(event.getAction() != 1){
//int m = event.getHistorySize();
move_x = event.getX();
move_y = event.getY();
move_x = (move_x-dest.left)*scale;
move_y = (move_y-dest.top)*scale;
// if(m > 0){
// int i2 = m + -1;
// orig_x = (event.getHistoricalX(i2) - dest.left)*scale;
// orig_y = (event.getHistoricalY(i2) - dest.top)*scale;
// }
if(move_x >=0 && move_y >= 0){
warpPhotoFromC(image,height,width,max_dist,orig_x,orig_y,move_x,move_y);
newBmp.setPixels(image, 0, width, 0, 0, width, height);
fg = false;
}
}
orig_x = move_x;
orig_y = move_y;
break;
case MotionEvent.ACTION_UP:
break;
}
invalidate();
return true;
}
public void setWarpBitmap(Bitmap bmp){
fg = true;
mBmp = bmp;
width = bmp.getWidth();
height = bmp.getHeight();
newBmp = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888/*Bitmap.Config.RGB_565*/);
image = new int[width*height];
mBmp.getPixels(image, 0, width, 0, 0, width, height);
newBmp.setPixels(image, 0, width, 0, 0, width, height);
//super.setImageBitmap(mBmp);
}
public void setMode(int mode){
this.MODE = mode;
}
public Bitmap getWrapBitmap(){
return newBmp;
}
}
I just updated my build.gradle settings ndk configuration and it started to work in every device.
ndk {
moduleName "native"
toolchain "clang"
toolchainVersion "3.5"
CFlags.add("-DCUSTOM_DEFINE")
cppFlags.add("-DCUSTOM_DEFINE")
ldFlags.add("-L/custom/lib/path")
ldLibs.add("log")
stl "stlport_static"
}

how to find accurate edges of the image?

The above image is my output
i am using opencv edge dection c++ code,as written below
JNIEXPORT jfloatArray JNICALL Java_com_test_getPoints
(JNIEnv *env, jobject thiz,jobject bitmap)
{
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Scaning getPoints");
int ret;
AndroidBitmapInfo info;
void* pixels = 0;
if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME,"AndroidBitmap_getInfo() failed ! error=%d", ret);
return 0;
}
if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888 )
{ __android_log_print(ANDROID_LOG_VERBOSE, APPNAME,"Bitmap format is not RGBA_8888!");
return 0;
}
if ((ret = AndroidBitmap_lockPixels(env, bitmap, &pixels)) < 0) {
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME,"AndroidBitmap_lockPixels() failed ! error=%d", ret);
}
Mat mbgra(info.height, info.width, CV_8UC4, pixels);
=========================================
vector<Point> img_pts = getPoints(mbgra);
=====================================
jfloatArray jArray = env->NewFloatArray(8);
if (jArray != NULL)
{
jfloat *ptr = env->GetFloatArrayElements(jArray, NULL);
for (int i=0,j=i+4; j<8; i++,j++)
{
ptr[i] = img_pts[i].x;
ptr[j] = img_pts[i].y;
}
env->ReleaseFloatArrayElements(jArray, ptr, NULL);
}
AndroidBitmap_unlockPixels(env, bitmap);
return jArray;
}
vector<Point> getPoints(Mat image)
{
int width = image.size().width;
int height = image.size().height;
Mat image_proc = image.clone();
vector<vector<Point> > squares;
Mat blurred(image_proc);
medianBlur(image_proc, blurred, 9);
Mat gray0(blurred.size(), CV_8U), gray;
vector<vector<Point> > contours;
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
if (approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
double largest_area = -1;
int largest_contour_index = 0;
for(int i=0;i<squares.size();i++)
{
double a =contourArea(squares[i],false);
if(a>largest_area)
{
largest_area = a;
largest_contour_index = i;
}
}
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Scaning size() %d",squares.size());
vector<Point> points;
if(squares.size() > 0)
{
points = squares[largest_contour_index];
}
else
{
points.push_back(Point(0, 0));
points.push_back(Point(width, 0));
points.push_back(Point(0, height));
points.push_back(Point(width, height));
}
return points;
}
}
How to find accurate edges as per above screen shoot.
i am new for opencv can any one guide me,thanks for response.
The ouput image
required output

Coloring bitmap with java

Hi I am creating an app for android and I encountered a problem with how to color bitmaps
I am using the following simple code
for(int i=0;i<pixels.length;i++){
if(pixels[i] == COLOR.WHITE){
pixels[i]=Color.RED;
}
}
Where pixels is the array of pixels of the bitmap
However the problem is that I'm getting in the edges of the colored area a thin layer of pixels that weren't colored I understand this stems from that this layer of white is somewhat shadowy(not entirely white partially black) how do I get over this?
Hope I made my question clear enough
Right now you are matching and replacing one specific integer value for white. However, in your original bitmap that white color bleeds into other colors and therefore you have white color values around the edges of these white patches that are slightly different.
You need to change your algorithm to take a color matching tolerance into account. For that you'll have to split up your pixel and your key color into their three color channels and check them individually if the differences between them are within a certain tolerance value.
That way you can match these whitish colored pixels around the edges. But even with added tolerance you cannot just replace your matching pixels just with red. You would get aliased hard, red edges and it wouldn't look pretty. I wrote a similar algorithm a while ago and got around that aliasing issue by doing some color blending in HSV color space:
public Bitmap changeColor(Bitmap src, int keyColor,
int replColor, int tolerance) {
Bitmap copy = src.copy(Bitmap.Config.ARGB_8888, true);
int width = copy.getWidth();
int height = copy.getHeight();
int[] pixels = new int[width * height];
src.getPixels(pixels, 0, width, 0, 0, width, height);
int sR = Color.red(keyColor);
int sG = Color.green(keyColor);
int sB = Color.blue(keyColor);
int tR = Color.red(replColor);
int tG = Color.green(replColor);
int tB = Color.blue(replColor);
float[] hsv = new float[3];
Color.RGBToHSV(tR, tG, tB, hsv);
float targetHue = hsv[0];
float targetSat = hsv[1];
float targetVal = hsv[2];
for(int i = 0; i < pixels.length; ++i) {
int pixel = pixels[i];
if(pixel == keyColor) {
pixels[i] = replColor;
} else {
int pR = Color.red(pixel);
int pG = Color.green(pixel);
int pB = Color.blue(pixel);
int deltaR = Math.abs(pR - sR);
int deltaG = Math.abs(pG - sG);
int deltaB = Math.abs(pB - sB);
if(deltaR <= tolerance && deltaG <= tolerance
&& deltaB <= tolerance) {
Color.RGBToHSV(pR, pG, pB, hsv);
hsv[0] = targetHue;
hsv[1] = targetSat;
hsv[2] *= targetVal;
int mixTrgColor = Color.HSVToColor(Color.alpha(pixel),
hsv);
pixels[i] = mixTrgColor;
}
}
}
copy.setPixels(pixels, 0, width, 0, 0, width, height);
return copy;
}
keyColor and replColor are ARGB encoded integer values such as Color.WHITE and Color.RED. tolerance is a value from 0 to 255 that specifies the key color matching tolerance per color channel. I had to rewrite that snippet a bit to remove framework specifics. I hope I didn't make any mistakes.
As a word of warning: Java (on Android) is pretty slow with image processing. If it's not fast enough for you, you should rewrite the algorithm in C for example and use the NDK.
UPDATE: Color replace algorithm in C
Here is the implementation of the same algorithm written in C. The last function is the actual algorithm which takes the bitmap's pixel array as argument. You need to create a header file with that function declaration and set up some NDK compilation boilerplate and create an additional Java class with the following method declaration:
native static void changeColor(int[] pixels, int width, int height, int keyColor, int replColor, int tolerance);
C implementation:
#include <math.h>
#define MIN(x,y) ((x < y) ? x : y)
#define MAX(x,y) ((x > y) ? x : y)
int clamp_byte(int val) {
if(val > 255) {
return 255;
} else if(val < 0) {
return 0;
} else {
return val;
}
}
int encode_argb(uint8_t r, uint8_t g, uint8_t b, uint8_t a) {
return ((a & 0xFF) << 24) | ((r & 0xFF) << 16) | ((g & 0xFF) << 8) | (b & 0xFF);
}
int alpha(int c) {
return (c >> 24) & 0xFF;
}
int red(int c) {
return (c >> 16) & 0xFF;
}
int green(int c) {
return (c >> 8) & 0xFF;
}
int blue(int c) {
return c & 0xFF;
}
typedef struct struct_hsv {
uint16_t h;
uint8_t s;
uint8_t v;
} hsv;
// http://www.ruinelli.ch/rgb-to-hsv
hsv rgb255_to_hsv(uint8_t r, uint8_t g, uint8_t b) {
uint8_t min, max, delta;
hsv result;
int h;
min = MIN(r, MIN(g, b));
max = MAX(r, MAX(g, b));
result.v = max; // v, 0..255
delta = max - min; // 0..255, < v
if(delta != 0 && max != 0) {
result.s = ((int) delta) * 255 / max; // s, 0..255
if(r == max) {
h = (g - b) * 60 / delta; // between yellow & magenta
} else if(g == max) {
h = 120 + (b - r) * 60 / delta; // between cyan & yellow
} else {
h = 240 + (r - g) * 60 / delta; // between magenta & cyan
}
if(h < 0) h += 360;
result.h = h;
} else {
// r = g = b = 0
result.h = 0;
result.s = 0;
}
return result;
}
int hsv_to_argb(hsv color, uint8_t alpha) {
int i;
uint8_t r,g,b;
float f, p, q, t, h, s, v;
h = (float) color.h;
s = (float) color.s;
v = (float) color.v;
s /= 255;
if(s == 0) {
// achromatic (grey)
return encode_argb(color.v, color.v, color.v, alpha);
}
h /= 60; // sector 0 to 5
i = floor(h);
f = h - i; // factorial part of h
p = (unsigned char) (v * (1 - s));
q = (unsigned char) (v * (1 - s * f));
t = (unsigned char) (v * (1 - s * (1 - f)));
switch(i) {
case 0:
r = v;
g = t;
b = p;
break;
case 1:
r = q;
g = v;
b = p;
break;
case 2:
r = p;
g = v;
b = t;
break;
case 3:
r = p;
g = q;
b = v;
break;
case 4:
r = t;
g = p;
b = v;
break;
default: // case 5:
r = v;
g = p;
b = q;
break;
}
return encode_argb(r, g, b, alpha);
}
JNIEXPORT void JNICALL Java_my_package_name_ClassName_changeColor(
JNIEnv* env, jclass clazz, jintArray bitmapArray, jint width, jint height,
jint keyColor, jint replColor, jint tolerance) {
jint* pixels = (*env)->GetPrimitiveArrayCritical(env, bitmapArray, 0);
int sR = red(keyColor);
int sG = green(keyColor);
int sB = blue(keyColor);
int tR = red(replColor);
int tG = green(replColor);
int tB = blue(replColor);
hsv cHsv = rgb255_to_hsv(tR, tG, tB);
int targetHue = cHsv.h;
int targetSat = cHsv.s;
int targetVal = cHsv.v;
int i;
int max = width * height;
for(i = 0; i < max; ++i) {
int pixel = pixels[i];
if(pixel == keyColor) {
pixels[i] = replColor;
} else {
int pR = red(pixel);
int pG = green(pixel);
int pB = blue(pixel);
int deltaR = abs(pR - sR);
int deltaG = abs(pG - sG);
int deltaB = abs(pB - sB);
if(deltaR <= tolerance && deltaG <= tolerance
&& deltaB <= tolerance) {
cHsv = rgb255_to_hsv(pR, pG, pB);
cHsv.h = targetHue;
cHsv.s = targetSat;
int newValue = ((int) cHsv.v * targetVal) / 255;
cHsv.v = newValue;
int mixTrgColor = hsv_to_argb(cHsv, alpha(pixel));
pixels[i] = mixTrgColor;
}
}
}
(*env)->ReleasePrimitiveArrayCritical(env, bitmapArray, pixels, 0);
}

How to convert RGB565 to YUV420SP faster on android?

I need display a jpeg picture, and convert it to YUV420SP. First I use SkBitmap to parse jpeg and display it, then I use the code below to convert RGB565 to YUV420SP on android, but it spend 75ms to convert a 640*480 RGB565 picture, so anybody know the faster way to convert RGB565 to YUV420SP on android? or faster way to convert jpeg file to YUV420SP on android?
// Convert from RGB to YUV420
int RGB2YUV_YR[256], RGB2YUV_YG[256], RGB2YUV_YB[256];
int RGB2YUV_UR[256], RGB2YUV_UG[256], RGB2YUV_UBVR[256];
int RGB2YUV_VG[256], RGB2YUV_VB[256];
//
// Table used for RGB to YUV420 conversion
//
void InitLookupTable()
{
static bool hasInited = false;
if(hasInited)
return ;
hasInited = true;
int i;
for (i = 0; i < 256; i++)
RGB2YUV_YR[i] = (float) 65.481 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_YG[i] = (float) 128.553 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_YB[i] = (float) 24.966 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_UR[i] = (float) 37.797 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_UG[i] = (float) 74.203 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_VG[i] = (float) 93.786 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_VB[i] = (float) 18.214 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_UBVR[i] = (float) 112 * (i << 8);
}
int ConvertRGB5652YUV420SP(int w, int h, unsigned char *bmp, unsigned char *yuv)
{
unsigned char *u, *v, *y, *uu, *vv;
unsigned char *pu1, *pu2, *pu3, *pu4;
unsigned char *pv1, *pv2, *pv3, *pv4;
unsigned char rValue = 0, gValue = 0, bValue = 0;
uint16_t* bmpPtr;
int i, j;
printf("ConvertRGB5652YUV420SP begin,w=%d,h=%d,bmp=%p,yuv=%p\n", w, h, bmp, yuv);
struct timeval tpstart,tpend;
gettimeofday(&tpstart,NULL);
InitLookupTable();
gettimeofday(&tpend,NULL);
float timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec;
timeuse/=1000;
printf("InitLookupTable used time=%f\n", timeuse);
gettimeofday(&tpstart,NULL);
uu = new unsigned char[w * h];
vv = new unsigned char[w * h];
if (uu == NULL || vv == NULL || yuv == NULL)
return 0;
y = yuv;
u = uu;
v = vv;
// Get r,g,b pointers from bmp image data....
bmpPtr = (uint16_t*)bmp;
//Get YUV values for rgb values...
for (i = 0; i < h; i++) {
for (j = 0; j < w; j++) {
uint16_t color = *bmpPtr;
unsigned int r = (color>>11) & 0x1f;
unsigned int g = (color>> 5) & 0x3f;
unsigned int b = (color ) & 0x1f;
rValue = (r<<3) | (r>>2);
gValue = (g<<2) | (g>>4);
bValue = (b<<3) | (b>>2);
*y++ = (RGB2YUV_YR[rValue] + RGB2YUV_YG[gValue] + RGB2YUV_YB[bValue] +
1048576) >> 16;
*u++ = (-RGB2YUV_UR[rValue] - RGB2YUV_UG[gValue] + RGB2YUV_UBVR[bValue] +
8388608) >> 16;
*v++ = (RGB2YUV_UBVR[rValue] - RGB2YUV_VG[gValue] - RGB2YUV_VB[bValue] +
8388608) >> 16;
bmpPtr++;
}
}
gettimeofday(&tpend,NULL);
timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec;
timeuse/=1000;
printf("Get YUV values used time=%f\n", timeuse);
gettimeofday(&tpstart,NULL);
// Now sample the U & V to obtain YUV 4:2:0 format
// Get the right pointers...
u = yuv + w * h;
v = u + 1;
// For U
pu1 = uu;
pu2 = pu1 + 1;
pu3 = pu1 + w;
pu4 = pu3 + 1;
// For V
pv1 = vv;
pv2 = pv1 + 1;
pv3 = pv1 + w;
pv4 = pv3 + 1;
// Do sampling....
for (i = 0; i < h; i += 2) {
for (j = 0; j < w; j += 2) {
*u = (*pu1 + *pu2 + *pu3 + *pu4) >> 2;
u += 2;
*v = (*pv1 + *pv2 + *pv3 + *pv4) >> 2;
v += 2;
pu1 += 2;
pu2 += 2;
pu3 += 2;
pu4 += 2;
pv1 += 2;
pv2 += 2;
pv3 += 2;
pv4 += 2;
}
pu1 += w;
pu2 += w;
pu3 += w;
pu4 += w;
pv1 += w;
pv2 += w;
pv3 += w;
pv4 += w;
}
gettimeofday(&tpend,NULL);
timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec;
timeuse/=1000;
printf("Do sampling used time=%f\n", timeuse);
gettimeofday(&tpstart,NULL);
delete uu;
delete vv;
return 1;
}
int main(int argc, char **argv) {
unsigned char bmp[640*480*2] = {0};
unsigned char yuv[(640*480*3)/2] = {0};
struct timeval tpstart,tpend;
gettimeofday(&tpstart,NULL);
ConvertRGB5652YUV420SP(640, 480, bmp, yuv);
gettimeofday(&tpend,NULL);
float timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec;
timeuse/=1000;
printf("ConvertARGB2YUV420SP used time=%f\n", timeuse);
return 0;
}
output on android(armv6):
ConvertRGB5652YUV420SP begin,w=640,h=480,bmp=0xbe7314fc,yuv=0xbe7c74fc
InitLookupTable used time=0.383000
Get YUV values used time=61.394001
Do sampling used time=11.918000
ConvertARGB2YUV420SP used time=74.596001
cpu info:
$ cat /proc/cpuinfo
cat /proc/cpuinfo
Processor : ARMv6-compatible processor rev 5 (v6l)
BogoMIPS : 791.34
Features : swp half thumb fastmult vfp edsp java
CPU implementer : 0x41
CPU architecture: 6TEJ
CPU variant : 0x1
CPU part : 0xb36
CPU revision : 5
Hardware : IMAPX200
Revision : 0000
Serial : 0000000000000000
On ARMv7, use NEON. It will do the job in less than 1ms. (VGA)
If you are stuck with ARMv6, optimize it in ARM assembly. (about 8ms on VGA)
Use fixed-point arithmetic instead of the lookup tables. Get rid of them.
make two masks :
0x001f001f : mask1
0x003f003f : mask2
then load two pixels at once into a 32bit register (which is a lot faster than 16bit read)
and red, mask1, pixel, lsr #11
and grn, mask2, pixel, lsr #5
and blu, mask1, pixel
now you have three registers, each containing two values - one in the lower, and the other in the upper 16 bits.
smulxy instructions will do some miracles from here on. (16bit multiply)
Good luck.
PS : your lookup table isn't that good either. Why are they all in length of 256?
You could reduce them to 32 (r and b related) and 64 (g related) Which will increase the cache hit rate.
Probably that will just do for the targeted 40ms without resorting to assembly.
Yes, cache-misses are THAT painful.
I have found a faster way in skia, it runs about 40ms.
#include "SkColorPriv.h"
#include "SkBitmap.h"
#include "SkCanvas.h"
#include "SkStream.h"
using namespace android;
// taken from jcolor.c in libjpeg
#if 0 // 16bit - precise but slow
#define CYR 19595 // 0.299
#define CYG 38470 // 0.587
#define CYB 7471 // 0.114
#define CUR -11059 // -0.16874
#define CUG -21709 // -0.33126
#define CUB 32768 // 0.5
#define CVR 32768 // 0.5
#define CVG -27439 // -0.41869
#define CVB -5329 // -0.08131
#define CSHIFT 16
#else // 8bit - fast, slightly less precise
#define CYR 77 // 0.299
#define CYG 150 // 0.587
#define CYB 29 // 0.114
#define CUR -43 // -0.16874
#define CUG -85 // -0.33126
#define CUB 128 // 0.5
#define CVR 128 // 0.5
#define CVG -107 // -0.41869
#define CVB -21 // -0.08131
#define CSHIFT 8
#endif
static void rgb2yuv_32(uint8_t dst[], SkPMColor c) {
int r = SkGetPackedR32(c);
int g = SkGetPackedG32(c);
int b = SkGetPackedB32(c);
int y = ( CYR*r + CYG*g + CYB*b ) >> CSHIFT;
int u = ( CUR*r + CUG*g + CUB*b ) >> CSHIFT;
int v = ( CVR*r + CVG*g + CVB*b ) >> CSHIFT;
dst[0] = SkToU8(y);
dst[1] = SkToU8(u + 128);
dst[2] = SkToU8(v + 128);
}
static void rgb2yuv_32_x(uint8_t *py, uint8_t *pu, uint8_t *pv, SkPMColor c) {
int r = SkGetPackedR32(c);
int g = SkGetPackedG32(c);
int b = SkGetPackedB32(c);
if(py != NULL){
int y = ( CYR*r + CYG*g + CYB*b ) >> CSHIFT;
*py = SkToU8(y);
}
if(pu != NULL){
int u = ( CUR*r + CUG*g + CUB*b ) >> CSHIFT;
*pu = SkToU8(u + 128);
}
if(pv != NULL){
int v = ( CVR*r + CVG*g + CVB*b ) >> CSHIFT;
*pv = SkToU8(v + 128);
}
}
static void rgb2yuv_4444(uint8_t dst[], U16CPU c) {
int r = SkGetPackedR4444(c);
int g = SkGetPackedG4444(c);
int b = SkGetPackedB4444(c);
int y = ( CYR*r + CYG*g + CYB*b ) >> (CSHIFT - 4);
int u = ( CUR*r + CUG*g + CUB*b ) >> (CSHIFT - 4);
int v = ( CVR*r + CVG*g + CVB*b ) >> (CSHIFT - 4);
dst[0] = SkToU8(y);
dst[1] = SkToU8(u + 128);
dst[2] = SkToU8(v + 128);
}
static void rgb2yuv_4444_x(uint8_t *py, uint8_t *pu, uint8_t *pv, U16CPU c) {
int r = SkGetPackedR4444(c);
int g = SkGetPackedG4444(c);
int b = SkGetPackedB4444(c);
if(py != NULL){
int y = ( CYR*r + CYG*g + CYB*b ) >> (CSHIFT - 4);
*py = SkToU8(y);
}
if(pu != NULL){
int u = ( CUR*r + CUG*g + CUB*b ) >> (CSHIFT - 4);
*pu = SkToU8(u + 128);
}
if(pv != NULL){
int v = ( CVR*r + CVG*g + CVB*b ) >> (CSHIFT - 4);
*pv = SkToU8(v + 128);
}
}
static void rgb2yuv_16(uint8_t dst[], U16CPU c) {
int r = SkGetPackedR16(c);
int g = SkGetPackedG16(c);
int b = SkGetPackedB16(c);
int y = ( 2*CYR*r + CYG*g + 2*CYB*b ) >> (CSHIFT - 2);
int u = ( 2*CUR*r + CUG*g + 2*CUB*b ) >> (CSHIFT - 2);
int v = ( 2*CVR*r + CVG*g + 2*CVB*b ) >> (CSHIFT - 2);
dst[0] = SkToU8(y);
dst[1] = SkToU8(u + 128);
dst[2] = SkToU8(v + 128);
}
static void rgb2yuv_16_x(uint8_t *py, uint8_t *pu, uint8_t *pv, U16CPU c) {
int r = SkGetPackedR16(c);
int g = SkGetPackedG16(c);
int b = SkGetPackedB16(c);
if(py != NULL){
int y = ( 2*CYR*r + CYG*g + 2*CYB*b ) >> (CSHIFT - 2);
*py = SkToU8(y);
}
if(pu != NULL){
int u = ( 2*CUR*r + CUG*g + 2*CUB*b ) >> (CSHIFT - 2);
*pu = SkToU8(u + 128);
}
if(pv != NULL){
int v = ( 2*CVR*r + CVG*g + 2*CVB*b ) >> (CSHIFT - 2);
*pv = SkToU8(v + 128);
}
}
int ConvertRGB5652YUV420SPBySkia(SkBitmap* bmp, unsigned char* dst) {
if(!bmp || !dst || bmp->getConfig() != SkBitmap::kRGB_565_Config)
return -1;
int width = bmp->width();
int height = bmp->height();
void *src = bmp->getPixels();
int src_rowbytes = bmp->rowBytes();
int stride = width;
int dstheight = height;
int i, j;
uint8_t *y_base = (uint8_t *)dst;
uint8_t *cb_base = (uint8_t *)((unsigned int)y_base + stride * dstheight);
uint8_t *cr_base = cb_base + 1;
uint8_t yuv[3];
uint8_t *y = NULL, *cb = NULL, *cr = NULL;
uint16_t *rgb = (uint16_t *)src;
for(i=0; i<height; i++){
rgb = (uint16_t *)((unsigned int)src + i * src_rowbytes);
y = (uint8_t *)((unsigned int)y_base + i * stride);
if((i & 0x1) == 0){
cb = (uint8_t *)((unsigned int)cb_base + ((i>>1) * stride));
cr = cb + 1;
}
for(j=0; j<width; j++){
if(i & 0x1){// valid y and cr
if(j & 0x01){ // only y
rgb2yuv_16_x(y++, NULL, NULL, *rgb++);
}else{ // both y and cr
rgb2yuv_16_x(y++, NULL, cr++, *rgb++);
cr++;
}
}else{// valid y and cb
if(j & 0x01){ // only y
rgb2yuv_16_x(y++, NULL, NULL, *rgb++);
}else{ // both y and cb
rgb2yuv_16_x(y++, cb++, NULL, *rgb++);
cb++;
}
}
}
}
return 0;
}

Categories

Resources