Can't passing Mat object in Opencv jni android studio - android

I'm working on my project which is about Document Shadow removal in android application. I try to modify the c++ algorithm to android by using button to process the input image to remove a shadow but There still some error and I have no ideal how to solve because I'm not good at coding here is the code of my project
MainActivity.java
public class MainActivity extends AppCompatActivity {
// Used to load the 'native-lib' library on application startup.
static {
System.loadLibrary("native-lib");
System.loadLibrary("opencv_java3");
}
public static String TAG ="MainActivity";
ImageView imageView;
Button loadImage;
Button process;
private int REQUEST_CODE = 1;
Mat image = new Mat();
Bitmap bitmap;
Bitmap bmp;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
imageView = (ImageView) findViewById(R.id.imageView);
loadImage = (Button) findViewById(R.id.button);
process = (Button) findViewById(R.id.button2);
loadImage.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View view) {
Intent intent = new Intent();
intent.setType("image/");
intent.setAction(Intent.ACTION_GET_CONTENT);
startActivityForResult(Intent.createChooser(intent,"Select Image"),REQUEST_CODE);
}
});
process.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View view) {
image = new Mat (bitmap.getWidth(), bitmap.getHeight(), CvType.CV_8UC1);
Utils.bitmapToMat(bitmap, image);
Native.removeShadow(image.getNativeObjAddr());
Utils.matToBitmap(image,bitmap);
imageView.setImageBitmap(bitmap);
}
});
}
#Override
protected void onActivityResult(int request_code, int result_code, Intent data){
super.onActivityResult(request_code,result_code,data);
if(request_code == REQUEST_CODE && result_code == RESULT_OK && data != null && data.getData() != null ){
Uri uri = data.getData();
try{
bitmap = MediaStore.Images.Media.getBitmap(getContentResolver(), uri);
imageView.setImageBitmap(bitmap);
}catch (IOException e){
e.printStackTrace();
}
}
}
}
Nativa.java
public class Native {
public native static void removeShadow (long addrRgba);
}
shadowRemove.h
#include "jni.h"
#include <stdio.h>
#include <iostream>
#include <omp.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/ml/ml.hpp>
#include <opencv2/photo/photo.hpp>
#include <cmath>
#include <random>
#ifndef SHADOWDOCTEST_SHADOWREMOVE_H
#define SHADOWDOCTEST_SHADOWREMOVE_H
#define BUFFER_SIZE 1000
#define USE_SAMPLING 1
using namespace cv;
using namespace std;
using namespace cv::ml;
extern "C" {
JNIEXPORT void JNICALL Java_com_example_shadowdoctest_Native_removeShadow(JNIEnv *, jclass, jlong);
void removeShadow(Mat& img);
// Returns the block centered around (x,y).
bool GetBlock(int x, int y, Mat &block, Mat &dsImage);
void ClusterBlock(Mat &block, Mat &clusterMu, int *randInd);
// Identifies the local cluster belonging to the background and saves it in the shadow map
// Currently uses max cluster mean.
void CalculatePaperStatistics(int x, int y, Mat &clusterMu);
// Finds the block that best represents the background region. Used for constructing the
// shadow map (gain map)
void FindReferenceIndex(int &refInd, Mat &dsImage, Vec3f &ref);
void NormalizeShadowMap(int refIndex, Vec3f &ref);
void UpsampleShadowMap();
void ApplyShadowMap();
// Converts x and y index to access downsampled images (xhat = x and
// yhat = y when stride is 1)
void ConvertIndex(int x, int y, int &xHat, int &yHat);
// Input image, shadow map (gain map), and mask of image regions
Mat *image;
Mat *shadowMap;
// Full width, height, and number of channels
int width;
int height;
int channels;
// Number of pixels to skip when performing local analysis
int stride;
// Size of overlapping blocks in local analysis
int blockSize;
// Number of clusters used for local analysis (i.e., 2)
int numOfClusters;
// Number of clusters used for global analysis (i.e., 3)
int numOfClustersRef;
// Maximum number of iterations and epsilon threshold used as stopping condition for GMM clustering
int maxIters;
float emEps;
// Amount of downsampling to be used on the original image (for speedup)
float dsFactor;
// Number of local and global samples in the block and image, respectively (Default is 150 and 1000)
int numOfLocalSamples;
int numOfGlobalSamples;
}
#endif //SHADOWDOCTEST_SHADOWREMOVE_H
shadowRemove.cpp
#include "shadowRemove.h"
#include <cstdlib>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <cstring>
JNIEXPORT void JNICALL Java_com_example_shadowdoctest_Native_removeShadow(JNIEnv *, jclass, jlong addrRgba){
Mat& image = *(Mat*)addrRgba;
removeShadow(image);
}
void removeShadow(Mat& img){
image = new Mat(img);
// image.convertTo(image, CV_32FC3);
width = image->cols;
height = image->rows;
channels = image->channels();
stride = 20; // Number of pixels to skip when performing local analysis
blockSize = 21; // Size of overlapping blocks in local analysis
numOfClusters = 3; // Number of clusters used for local analysis
numOfClustersRef = 3; // Number of clusters used for global analysis
maxIters = 100; // Maximum number of iterations used as stopping condition for GMM clustering.
emEps = 0.1f; // Epsilon threshold used as stopping condition for GMM clustering.
dsFactor = 1.0f; // No downsampling is done
numOfLocalSamples = 150; // Number of samples to take in each block (for local statistics)
numOfGlobalSamples = 1000; // Number of samples to take throughout entire image (for global statistics)
int sHeight, sWidth;
shadowMap = new Mat(image->cols, image->rows, CV_32FC3, CV_RGB(-1, -1, -1));//CV_32FC3 is a three channel matrix of 32-bit floats
resize(*shadowMap, *shadowMap, Size(0, 0), dsFactor, dsFactor, INTER_LANCZOS4);
ConvertIndex(shadowMap->cols, shadowMap->rows, sWidth, sHeight);
resize(*shadowMap, *shadowMap, Size(sWidth, sHeight));
int threadCount = omp_get_max_threads();
Mat* blockList = new Mat[threadCount];
for (int i = 0; i < threadCount; i++) {
blockList[i] = Mat(height, width, CV_32FC3, CV_RGB(0, 0, 0));
}
Mat dsMask = Mat(shadowMap->rows, shadowMap->cols, CV_8UC1, Scalar(0));
int* randInd = new int[numOfLocalSamples];
int size = blockSize * blockSize; // TODO: use size_t
vector<int> freeIndexes;
for (int i = 0; i < size; i++) {
freeIndexes.push_back(i);
}
int count = 0;
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(0, size);
while (count < numOfLocalSamples) {
int indexCandidate = distribution(generator);
vector<int>::iterator it = std::find(freeIndexes.begin(), freeIndexes.end(), indexCandidate);
if (it != freeIndexes.end()) {
randInd[count] = indexCandidate;
freeIndexes.erase(it);
count++;
}
}
Mat dsImage;
resize(*image, dsImage, Size(0, 0), dsFactor, dsFactor, INTER_NEAREST);
width = dsImage.cols;
height = dsImage.rows;
#pragma omp parallel
{
#pragma omp for schedule(dynamic) nowait
for (int i = 0; i < height; i += stride) {
for (int j = 0; j < width; j += stride) {
// Get current block
int threadNum = omp_get_thread_num();
Mat& curBlock = blockList[threadNum];
if (GetBlock(j, i, curBlock, dsImage)) {
// Cluster pixel intensities
Mat curMu;
vector<Mat> listOfCovs;
ClusterBlock(curBlock, curMu, randInd);
// Find paper mu of current block and update global matrix
CalculatePaperStatistics(j, i, curMu);
}
}
}
}
delete[] randInd;
delete[] blockList;
int refIndex = -1;
Vec3f ref;
FindReferenceIndex(refIndex, dsImage, ref);
width = image->cols;
height = image->rows;
medianBlur(*shadowMap, *shadowMap, 3);
GaussianBlur(*shadowMap, *shadowMap, Size(3, 3), 2.5f);
Mat dsShadowMap = *shadowMap;
UpsampleShadowMap();
NormalizeShadowMap(refIndex, ref);
ApplyShadowMap();
}
bool GetBlock (int x, int y, Mat& block, Mat& dsImage) {
int halfBlock = (int) floorf(float(blockSize) / 2.0f);
int minX = max(0, x - halfBlock);
int maxX = min(width - 1, x + halfBlock);
int minY = max(0, y - halfBlock);
int maxY = min(height - 1, y + halfBlock);
int deltaY = maxY - minY + 1;
int deltaX = maxX - minX + 1;
if (block.rows != deltaY || block.cols != deltaX) {
block = Mat(deltaY, deltaX, CV_32FC3, CV_RGB(0, 0, 0));
}
// Copy intensities to block
int bX = 0;
int bY = 0;
for (int i = minY; i <= maxY; i++) {
for (int j = minX; j <= maxX; j++) {
for (int k = 0; k < channels; k++) {
block.at<Vec3f>(bY, bX)[k] = dsImage.at<Vec3f>(i, j)[k];
}
bX++;
}
bX = 0;
bY++;
}
return true;
}
void ClusterBlock (Mat& block, Mat& clusterMu, int* randInd) {
// Set up expectation maximization model
Ptr<EM> emModel = EM::create();
emModel->setClustersNumber(numOfClusters);
emModel->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL);
emModel->setTermCriteria(TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, maxIters, emEps));
// Cluster block with k means initializer
Mat samples;
if (block.rows * block.cols == blockSize * blockSize) {
Mat tmp(numOfLocalSamples, 1, CV_32FC3, CV_RGB(-1, -1, -1));
for (int i = 0; i < numOfLocalSamples; i++) {
assert(randInd[i] >= 0 && randInd[i] < block.rows * block.cols);
tmp.at<Vec3f>(i) = block.at<Vec3f>(randInd[i]);
}
samples = tmp.reshape(1);
}
else {
samples = block.reshape(0, block.rows * block.cols);
samples = samples.reshape(1);
}
emModel->trainEM(samples);
clusterMu = emModel->getMeans();
clusterMu = clusterMu.reshape(channels);
clusterMu.convertTo(clusterMu, CV_32FC3);
}
void CalculatePaperStatistics(int x, int y, Mat& clusterMu) {
int sX, sY;
ConvertIndex(x, y, sX, sY);
Vec3f& shadowVec = shadowMap->at<Vec3f>(sY, sX);
double maxSum = 0;
for (int i = 0; i < numOfClusters; i++) {
double muSum = 0;
for (int k = 0; k < channels; k++) {
muSum += clusterMu.at<Vec3f>(i)[k];
}
if (muSum > maxSum) {
maxSum = muSum;
for (int k = 0; k < channels; k++) {
shadowVec[k] = clusterMu.at<Vec3f>(i)[k];
}
}
}
}
void FindReferenceIndex(int& refIndex, Mat& dsImage, Vec3f& ref) {
// Set up expectation maximization model
Ptr<EM> emModel = EM::create();
emModel->setClustersNumber(numOfClustersRef);
emModel->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL);
emModel->setTermCriteria(TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, maxIters, emEps));
// Cluster block with k means initializer
Mat samples;
#if USE_SAMPLING
Mat tmp(numOfGlobalSamples, 1, CV_32FC3, CV_RGB(-1, -1, -1));
int* randInd = new int[numOfGlobalSamples];
int size = width * height; // TODO: Use size_t
vector<int> freeIndexes;
for (int i = 0; i < size; i++) {
freeIndexes.push_back(i);
}
int count = 0;
int maxIndexCandidiate = -1;
int delta = size / numOfGlobalSamples;
std::default_random_engine generator;
std::uniform_int_distribution<int> distribution(0, size);
while (count < numOfGlobalSamples) {
int indexCandidate = distribution(generator);
vector<int>::iterator it = std::find(freeIndexes.begin(), freeIndexes.end(), indexCandidate);
if (it != freeIndexes.end()) {
randInd[count] = indexCandidate;
freeIndexes.erase(it);
count++;
}
}
for (int i = 0; i < numOfGlobalSamples; i++) {
tmp.at<Vec3f>(i) = image->at<Vec3f>(randInd[i]);
}
delete[] randInd;
samples = tmp.reshape(1);
#else
samples = dsImage.reshape(0, width * height);
samples = samples.reshape(1);
#endif
emModel->trainEM(samples);
// Get the cluster means
Mat clusterMu = emModel->getMeans();
clusterMu = clusterMu.reshape(channels);
clusterMu.convertTo(clusterMu, CV_32FC3);
// Get cluster variances
int maxInd = -1;
double curMax = -1;
for (int i = 0; i < numOfClustersRef; i++) {
double muMag = 0;
for (int k = 0; k < channels; k++) {
muMag += clusterMu.at<Vec3f>(i)[k];
}
if (muMag > curMax) {
curMax = muMag;
maxInd = i;
}
}
assert(maxInd != -1 && maxInd < numOfClustersRef);
// Find the closest actual value to the cluster to choose as reference
// TODO: stop earlier once threshold is met?
ref = clusterMu.at<Vec3f>(maxInd);
float curMin = std::numeric_limits<float>::max();
refIndex = -1;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
Vec3f curVal = dsImage.at<Vec3f>(i, j);
float curMag = 0;
for (int k = 0; k < channels; k++) {
float diff = curVal[k] - ref[k];
curMag += diff * diff;
}
if (curMag < curMin) {
curMin = curMag;
refIndex = j + i * width;
}
}
}
}
void UpsampleShadowMap() {
resize(*shadowMap, *shadowMap, Size(width, height), 0, 0, INTER_LANCZOS4);
}
void NormalizeShadowMap(int refIndex, Vec3f& ref) {
assert(shadowMap->rows == height && shadowMap->cols == width);
assert(refIndex >= 0 && refIndex < width * height);
ref = shadowMap->at<Vec3f>(refIndex);
// Divide each local paper intensity by the global reference
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
Vec3f& curShadowVec = shadowMap->at<Vec3f>(i, j);
for (int k = 0; k < channels; k++) {
curShadowVec[k] /= ref[k];
// Clamp negative and zero values to a small number
if (curShadowVec[k] <= 0) {
curShadowVec[k] = 1.0e-6f;
}
}
}
}
}
void ApplyShadowMap() {
// Loop through all the pixels and divide by inverse gain
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
Vec3f invGain = shadowMap->at<Vec3f>(i, j);
Vec3f& color = image->at<Vec3f>(i, j);
for (int k = 0; k < channels; k++) {
color[k] /= invGain[k];
}
}
}
}
void ConvertIndex(int x, int y, int& xHat, int& yHat) {
// Convert from original resolution to downsampled size (downsampled based on stride)
xHat = (int)floor((x - 1) / float(stride)) + 1;
yHat = (int)floor((y - 1) / float(stride)) + 1;
}
when I click the button to process an images the application will be force close, and the Logcat will show like this
Logcat
I'm also already setup my opencv in java and ndk
Here is original sorcecode of Removing Shadow in doucment images

Related

Otsu histogram self implementation

I tried to make my own implementation of Otsu. I already read some source code from java and some sites that explains the formula and tried to implement it. I want to share this to ask if anyone can help me or at least tell about what can I do or improve.
I already coded get width, height and the background and foreground weight, mean, variance, and within class variance.
Note that I have not implemented how to set or find the exact threshold or even change the picture to black-white(binarize) using within class variance. If you can help me, feel welcome to. I also see some java codes that has treshhold = i or treshhold = t but I can't see how they made the image to black-white.
Here is my code:
Otsu.java
Bitmap tempImg = (Bitmap) original;
Bitmap OImg = Bitmap.createBitmap(tempImg.getWidth(), tempImg.getHeight(), tempImg.getConfig());
int width = tempImg.getWidth();
int height = tempImg.getHeight();
int A, R, G, B,colorPixel;
for (int x = 0; x < width; x++) { //original image to grayscale
for (int y = 0; y < height; y++) {
colorPixel = tempImg.getPixel(x, y);
A = Color.alpha(colorPixel);
R = Color.red(colorPixel);
G = Color.green(colorPixel);
B = Color.blue(colorPixel);
R = (R + G + B) / 3;
G = R;
B = R;
OImg.setPixel(x, y, Color.argb(A, R, G,B ));
}
}
return OImg;
}
public static Bitmap Botsu(Bitmap gImg){
Bitmap tempImg = (Bitmap) gImg;
Bitmap BWimg = Bitmap.createBitmap(tempImg.getWidth(), tempImg.getHeight(), tempImg.getConfig());
int width = tempImg.getWidth();
int height = tempImg.getHeight();
int A, R, G, B, colorPixel;
// histo-thresh
double Wcv = 0;
int[] Bx = new int[256];
int[] By = new int[256];
int[] Fx = new int[256];
int[] Fy = new int[256];
double Bw =0, Bm =0, Bv =0, Bp = 0;
double Fw =0, Fm =0, Fv =0, Fp = 0;
int c = 0, ImgPix = 0, ImgPixB = 0, ImgPixF = 0, newPixel = 0;
// pixel check for histogram
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
colorPixel = tempImg.getPixel(x, y);
A = Color.alpha(colorPixel);
R = Color.red(colorPixel);
G = Color.green(colorPixel);
B = Color.blue(colorPixel);
int gray = (int) (0.2989 * R + 0.5870 * G + 0.1140 * B);
if (gray > 128) { // white - foreground
for (int z=0; z<Fx.length; z++){
if (Fx[z] == gray){
c++;
}
}
if (c==1){
Fy[gray] = Fy[gray]+1; //y axis - counter for pixels for each x
}
else{
Fx[x] = gray; //x axis - 0-255
Fy[gray] = Fy[gray]+1;
}
}//By[Bx[x]]
else{ // black - background
for (int z=0; z<Bx.length; z++){
if (Bx[z] == gray){
c++;
}
}
if (c==1){
By[gray] = By[gray]+1; //y axis - counter for pixels for each x
}
else{
Bx[x] = gray; //x axis - 0-255
By[gray] = By[gray]+1;
}
}
}
}
for (int b=0; b<By.length; b++){
ImgPixB = ImgPixB + By[b];
}
for (int f=0; f<Fy.length; f++){
ImgPixF = ImgPixF + Fy[f];
}
ImgPix = ImgPixB + ImgPixF;
//bg part hist
for (int i=0; i<By.length; i++){ //weight
Bw = Bw + By[i];
}
Bw = Bw/ImgPix;
for (int i=0; i<By.length; i++){ //pixel sum
Bp = Bp + By[i];
}
for (int i = 0; i<Bx.length; i++){ //mean
Bm = Bm + (Bx[i]*By[Bx[i]]);
}
Bm = Bm/Bp;
for (int i=0; i<Bx.length; i++){ //variance
Bv = Bv + (Math.pow((Bx[i]-Bm),2)*By[Bx[i]]); // (Bx[i]-Bm) * (Bx[i]-Bm)
}
Bv = Bv/Bp;
//fg part hist
for (int i=0; i<Fy.length; i++){ //weight
Fw = Fw + Fy[i];
}
Fw = Fw/ImgPix;
for (int i=0; i<Fy.length; i++){ //pixel sum
Fp = Fp + Fy[i];
}
for (int i = 0; i<Fx.length; i++){ //mean
Fm = Fm + (Fx[i]*Fy[Fx[i]]);
}
Fm = Fm/Fp;
for (int i=0; i<Fx.length; i++){ //variance
Fv = Fv + (Math.pow((Fx[i]-Fm),2)*Fy[Fx[i]]); // (Bx[i]-Bm) * (Bx[i]-Bm)
}
Fv = Fv/Fp;
// within class variance
Wcv = (Bw * Bv) + (Fw * Fv);
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
colorPixel = tempImg.getPixel(x, y);
A = Color.alpha(colorPixel);
R = Color.red(colorPixel);
G = Color.green(colorPixel);
B = Color.blue(colorPixel);
//int gray = (int) (0.2989 * R + 0.5870 * G + 0.1140 * B);
int gray2 = (int) (Wcv * R + Wcv * G + Wcv * B);
if (gray2 > 128) {
gray2 = 255;
}
else if (gray2 <129){
gray2 = 0;
}
BWimg.setPixel(x, y, Color.argb(A, gray2, gray2, gray2));
}
}
return BWimg;
x[z] is for x-axis andy[gray] is for y-axis. I based this on the graph on Lab Book
x = 0-255
y = how many pixels is on a certain color shade
feel free to send more samples that can help me.
OUTPUT: (I added 2 function with 3 output that has an output. Other value will only return few black dots or just white image.)
if (gray2 > 128) {
gray2 = 255;
}
else if (gray2 < 129){
gray2 = 0;
}
if (gray2 > 64 && gray2 < 129) {
gray2 = 255;
}
else if (gray2 < 65){
gray2 = 0;
}

Android Zipper Animation for unlock screen

I am currently working on zip animation to unlock android mobile screen. Changing background images is a expensive task and have not a smooth effect. I want a smooth effect in it. Any help please? Thanks
Try this:
The smooth effect makes use of Convolution Matrix:
Some image effects are better to implement using Convolution Matrix
method like: Gaussian Blur, Sharpening, Embossing, Smooth…
Check That Link to know more about Convolution Matrix or Another one
To do Convolution Matrix
import android.graphics.Bitmap;
import android.graphics.Color;
public class ConvolutionMatrix
{
public static final int SIZE = 3;
public double[][] Matrix;
public double Factor = 1;
public double Offset = 1;
public ConvolutionMatrix(int size) {
Matrix = new double[size][size];
}
public void setAll(double value) {
for (int x = 0; x < SIZE; ++x) {
for (int y = 0; y < SIZE; ++y) {
Matrix[x][y] = value;
}
}
}
public void applyConfig(double[][] config) {
for(int x = 0; x < SIZE; ++x) {
for(int y = 0; y < SIZE; ++y) {
Matrix[x][y] = config[x][y];
}
}
}
public static Bitmap computeConvolution3x3(Bitmap src, ConvolutionMatrix matrix) {
int width = src.getWidth();
int height = src.getHeight();
Bitmap result = Bitmap.createBitmap(width, height, src.getConfig());
int A, R, G, B;
int sumR, sumG, sumB;
int[][] pixels = new int[SIZE][SIZE];
for(int y = 0; y < height - 2; ++y) {
for(int x = 0; x < width - 2; ++x) {
// get pixel matrix
for(int i = 0; i < SIZE; ++i) {
for(int j = 0; j < SIZE; ++j) {
pixels[i][j] = src.getPixel(x + i, y + j);
}
}
// get alpha of center pixel
A = Color.alpha(pixels[1][1]);
// init color sum
sumR = sumG = sumB = 0;
// get sum of RGB on matrix
for(int i = 0; i < SIZE; ++i) {
for(int j = 0; j < SIZE; ++j) {
sumR += (Color.red(pixels[i][j]) * matrix.Matrix[i][j]);
sumG += (Color.green(pixels[i][j]) * matrix.Matrix[i][j]);
sumB += (Color.blue(pixels[i][j]) * matrix.Matrix[i][j]);
}
}
// get final Red
R = (int)(sumR / matrix.Factor + matrix.Offset);
if(R < 0) { R = 0; }
else if(R > 255) { R = 255; }
// get final Green
G = (int)(sumG / matrix.Factor + matrix.Offset);
if(G < 0) { G = 0; }
else if(G > 255) { G = 255; }
// get final Blue
B = (int)(sumB / matrix.Factor + matrix.Offset);
if(B < 0) { B = 0; }
else if(B > 255) { B = 255; }
// apply new pixel
result.setPixel(x + 1, y + 1, Color.argb(A, R, G, B));
}
}
// final image
return result;
}
}
Then to do Smooth effect
public static Bitmap smooth(Bitmap src, double value) {
ConvolutionMatrix convMatrix = new ConvolutionMatrix(3);
convMatrix.setAll(1);
convMatrix.Matrix[1][1] = value;
convMatrix.Factor = value + 8;
convMatrix.Offset = 1;
return ConvolutionMatrix.computeConvolution3x3(src, convMatrix);
}
You can change values and get the smooth effect as you want.
That tutorial it's found HERE

JNI UnsatisfiedLinkError - Android

I'm a beginner in jni and I'm trying to load a library but I keep getting UnSatisfiedLinkError in the Log. I've checked multiple times all of my files but still the same error.
Android.mk
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := native_sample
LOCAL_SRC_FILES := myFirstApp.cpp
LOCAL_LDLIBS += -llog -ldl
include $(BUILD_SHARED_LIBRARY)
myFirstApp.cpp
#include <jni.h>
#include <opencv2/core/core.hpp>
#include "opencv2/highgui/highgui.hpp"
#include <vector>
#include <math.h>
#include <android/log.h>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/legacy/legacy.hpp>
#include "opencv2/video/tracking.hpp"
#include <time.h>
#include <math.h>
#include <string.h>
#define APPNAME "myFirstApp"
using namespace std;
using namespace cv;
extern "C" {
static int numTemplate = 24;
static int tWidth = 256;
static int tHight = 256;
static vector<Mat> tmplts;
static vector<vector<KeyPoint> > keyPointsTmplts;
static vector<Mat> descriptorsTmplts;
static vector<Mat> trainDescriptors;
// find squares vars
static vector<vector<Point> > squares;
static vector<Point2f> squaresCenters;
static vector<int> squaresAbsAreas;
static vector<int> clustersAreas;
static double scaleFactor = 1.5;
static double MARKER_RATIO = 0.03;
//clustering vars
static vector<Point2f> clusterCenters;
static vector<vector<Point> > clusterBoundaries;
static int CLUSTERTHRESHOLD = 25;
//tracking variables
static Mat prevFrame;
static vector<Point2f> oldPoints;
static TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);
static Size winSize(31, 31);
static Size subPixWinSize(10, 10);
//Recognition
static std::vector<int> matchingResults;
static vector<Mat> hos;
static Mat warpedImg;
static Mat cropped;
static vector<vector<KeyPoint> > candidatesKeypoints;
vector<Mat> candidates;
// DETECTION
static FastFeatureDetector detector(16);
//static int MAX_KEYPOINTS_COUNT=100;
//static GoodFeaturesToTrackDetector detector(MAX_KEYPOINTS_COUNT, 0.01, 10, 3, false, 0.04);
// DESCRIPTOR
static FREAK extractor;
// MATCHER
static BFMatcher matcher(NORM_HAMMING, true);
double diffclock(clock_t clock1, clock_t clock2);
// for int arrays
static Mat points2Mat(const Point* p) {
Mat srcP(4, 2, CV_32FC1);
for (int i = 0; i < 4; i++) {
srcP.at<float>(i, 0) = p[i].x;
srcP.at<float>(i, 1) = p[i].y;
}
return srcP;
}
// for float arrays
static Mat points2MatF(const Point2f* p) {
Mat srcP(4, 2, CV_32FC1);
for (int i = 0; i < 4; i++) {
srcP.at<float>(i, 0) = p[i].x;
srcP.at<float>(i, 1) = p[i].y;
}
return srcP;
}
static Mat prepareWarpDstMat(const Point* p) {
Mat dstP = cvCreateMat(4, 2, CV_32FC1);
dstP.at<float>(0, 0) = p[0].x;
dstP.at<float>(0, 1) = p[0].y;
dstP.at<float>(1, 0) = p[0].x + tWidth;
dstP.at<float>(1, 1) = p[0].y;
dstP.at<float>(2, 0) = p[0].x + tWidth;
dstP.at<float>(2, 1) = p[0].y + tHight;
dstP.at<float>(3, 0) = p[0].x;
dstP.at<float>(3, 1) = p[0].y + tHight;
return dstP;
}
//-----------------------------Find Squares-------------------------------------------
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
static double angle(Point pt1, Point pt2, Point pt0) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1 * dx2 + dy1 * dy2)
/ sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2) + 1e-10);
}
static void clearVectors() {
// clear all vectors from data
squares.clear();
matchingResults.clear();
squaresCenters.clear();
squaresAbsAreas.clear();
clusterCenters.clear();
clusterBoundaries.clear();
clustersAreas.clear();
candidatesKeypoints.clear();
candidates.clear();
}
// the sequence is stored in the specified memory storage
static void findSquares(const Mat& grayImg) {
clock_t begin = clock();
Mat timg, gray, scaledImg;
resize(grayImg, scaledImg, Size(0, 0), 1 / scaleFactor, 1 / scaleFactor,
CV_INTER_CUBIC);
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "scaledImage %i %i",
scaledImg.cols,scaledImg.rows);
// Gaussian blurring better than pyr up and down
GaussianBlur(scaledImg, timg, Size(5, 5), 0, 0, BORDER_DEFAULT); // t11
vector<vector<Point> > contours;
// find squares in every color plane of the image
Canny(timg, gray, 50, 200, 5); //t3
dilate(gray, gray, Mat(), Point(-1, -1));
// find contours and store them all as a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for (int i = 0; i < contours.size(); i++) {
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx,
arcLength(Mat(contours[i]), true) * 0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
int imgArea = gray.cols * gray.rows;
int absArea = fabs(contourArea(Mat(approx)));
if (approx.size() == 4 && absArea > 1000 && isContourConvex(Mat(approx))
&& absArea < 0.8 * imgArea) {
double maxCosine = 0;
Point a, b, c, d;
for (int j = 2; j < 5; j++) {
// find the maximum cosine of the angle between joint edges
a = approx[j % 4];
b = approx[j - 2];
c = approx[j - 1];
double cosine = fabs(angle(a, b, c));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if (maxCosine < 0.3) {
// restore scaling
Point* p0 = (Point*) &approx[0];
Point* p1 = (Point*) &approx[1];
Point* p2 = (Point*) &approx[2];
Point* p3 = (Point*) &approx[3];
p0->x = p0->x * scaleFactor;
p0->y = p0->y * scaleFactor;
p1->x = p1->x * scaleFactor;
p1->y = p1->y * scaleFactor;
p2->x = p2->x * scaleFactor;
p2->y = p2->y * scaleFactor;
p3->x = p3->x * scaleFactor;
p3->y = p3->y * scaleFactor;
Point2f center = (*p0 + *p1 + *p2 + *p3) * (0.25);
// //__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "squareCenter %G,%G",center.x,center.y);
squares.push_back(approx);
squaresCenters.push_back(center);
squaresAbsAreas.push_back(absArea);
}
}
}
}
//--------------------------------Cluster Rectangles-------------------------------------
static void updateCluster(int pNum, int* clusters, int n) {
for (int i = 0; i < n; i++) {
if (clusters[pNum] != clusters[i]) {
Point2f p0 = (Point2f) squaresCenters[pNum];
Point2f p1 = (Point2f) squaresCenters[i];
// //__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "norm %G",
// norm(p0-p1));
if (norm(p0 - p1) < CLUSTERTHRESHOLD) {
clusters[i] = clusters[pNum];
updateCluster(i, clusters, n);
}
}
}
}
static int TRACKED_THRESHOLD = 100;
static bool inTrackingList(Point2f clusterCenter) {
// int tracklistSize = trackedMarkersCenters.size();
// Point2f trackedCenter;
// for (int i = 0; i < tracklistSize; i++) {
// trackedCenter = (Point2f) trackedMarkersCenters[i][0];
//// //__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "dist %G",
//// norm(clusterCenter - trackedCenter));
// if (norm(clusterCenter - trackedCenter) < TRACKED_THRESHOLD)
// return true;
// }
return false;
}
static void mergeRectangles() {
int n = squaresCenters.size();
int clusters[n];
int clusterCounter = 0;
for (int i = 0; i < n; i++)
clusters[i] = -1;
for (int i = 0; i < n; i++)
if (clusters[i] == -1) {
clusters[i] = clusterCounter;
clusterCounter++;
updateCluster(i, clusters, n);
}
//__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "clusters %i",
//clusterCounter);
int members, maxArea;
for (int i = 0; i < clusterCounter; i++) {
members = 0;
Point2f center(0, 0);
maxArea = -1;
vector<Point> maxSquare;
for (int j = 0; j < n; j++) {
if (clusters[j] == i) {
center += (Point2f) squaresCenters[j];
members++;
if (maxArea < (int) squaresAbsAreas[j]) {
maxArea = (int) squaresAbsAreas[j];
maxSquare = squares[j];
}
}
}
center *= (1.0 / members);
if (!inTrackingList(center)) {
clusterCenters.push_back(center);
clusterBoundaries.push_back(maxSquare);
clustersAreas.push_back(maxArea);
matchingResults.push_back(-3);
const Point* floPoin = &maxSquare[0];
Mat scene = points2Mat(floPoin);
}
}
}
//------------------------------Process Filtered squares--------------------------------------
static int imageArea;
// crop squares
static void cropAndWarpCandidate(Mat& grayImg, const Point* p, int i) {
//__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Area %i %i %G %G", i,
// clustersAreas[i], imageArea, clustersAreas[i] * 1.0 / imageArea);
//
// if (clustersAreas[i] < MARKER_RATIO * imageArea) {
//
// matchingResults[i] = -2;
// return;
// }
Mat srcPMat = points2Mat(p);
Mat dstPMat = prepareWarpDstMat(p);
Mat ho = findHomography(srcPMat, dstPMat, 0);
warpPerspective(grayImg, warpedImg, ho,
Size(grayImg.cols + tWidth, grayImg.rows + tHight));
cropped = Mat(warpedImg, Rect(p[0].x, p[0].y, tWidth, tHight));
// int templateIndex = matchCandidate(cropped);
//
// matchingResults[i] = templateIndex;
candidates.push_back(cropped);
srcPMat.release();
dstPMat.release();
warpedImg.release();
cropped.release();
}
// the function draws all the squares in the image
static void processFilteredSquares(Mat& grayImg) {
imageArea = grayImg.cols * grayImg.rows;
int squaresSize = clusterBoundaries.size();
//__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "filtered %d",
// squaresSize);
for (int i = 0; i < squaresSize; i++) {
const Point* p = &clusterBoundaries[i][0];
cropAndWarpCandidate(grayImg, p, i);
}
}
//------------------Drawing ---------------------------------------
static void drawFilteredSquaresWithoutMarker(Mat& rgbImg) {
int squaresSize = clusterBoundaries.size();
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "squares %i",
squaresSize);
int n = 4;
for (int i = 0; i < squaresSize; i++) {
const Point* p = &clusterBoundaries[i][0];
Point center = clusterCenters[i];
polylines(rgbImg, &p, &n, 1, true, Scalar(0, 255, 0, 255), 3, CV_AA);
circle(rgbImg, center, 10, Scalar(0, 255, 0, 255));
}
}
// calc time helper
double diffclock(clock_t clock1, clock_t clock2) {
double diffticks = clock1 - clock2;
double diffms = (diffticks * 1000) / CLOCKS_PER_SEC;
return diffms;
}
JNIEXPORT jint JNICALL Java_com_example_myfirstapp_RegisterMarkerMain_findMarkersNative(
JNIEnv* env, jobject, jlong addrRgba) {
//clock_t begin = clock();
Mat& mRgb = *(Mat*) addrRgba;
Mat mgray(mRgb.rows, mRgb.cols, CV_8UC1);
cvtColor(mRgb, mgray, CV_RGBA2GRAY, 1); // the working one
clearVectors();
findSquares(mgray);
mergeRectangles();
processFilteredSquares(mgray);
drawFilteredSquaresWithoutMarker(mRgb);
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Candidates %i",candidates.size());
return clusterBoundaries.size();
// clock_t end = clock();
// mgray.release();
}
JNIEXPORT void JNICALL Java_com_example_myfirstapp_RegisterMarkerMain_loadCand(
JNIEnv* env, jobject, jlong addrRgba, jlong descriptorAdd, jint i) {
vector<KeyPoint> keyPoints;
Mat nativeM = candidates[i];
Mat& mRgb = *(Mat*) addrRgba;
Mat& descriptor = *(Mat*) descriptorAdd;
nativeM.copyTo(mRgb);
Mat descriptorUnFiltered;
detector.detect(nativeM, keyPoints);
if(keyPoints.size()==0)
return;
extractor.compute(nativeM, keyPoints, descriptorUnFiltered);
vector<vector<DMatch> > matches;
if(descriptorUnFiltered.rows==0)
return;
matcher.radiusMatch(descriptorUnFiltered, descriptorUnFiltered, matches,
50);
descriptor = descriptorUnFiltered.row(0);
std::vector<DMatch> mat;
for (int j = 1; j < matches.size(); j++) {
mat = matches[j];
// if no matches neglect
if (mat.size() >= 2) {
DMatch m = mat[1];
if (m.trainIdx < m.queryIdx)
continue;
else
vconcat(descriptor, descriptorUnFiltered.row(m.queryIdx),
descriptor);
} else {
DMatch m0 = mat[0];
vconcat(descriptor, descriptorUnFiltered.row(m0.queryIdx),
descriptor);
}
}
}
}
That's where I call loadLibrary in the activity
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
Log.i("loading libs", "OpenCV loading status " + status);
switch (status) {
case LoaderCallbackInterface.SUCCESS: {
Log.i("loading libs", "OpenCV loaded successfully");
// Load native library after(!) OpenCV initialization
System.loadLibrary("native_sample");
}
break;
default: {
super.onManagerConnected(status);
}
break;
}
}
};
Any help would be really appreciated.
The UnsatisfiedLinkError is thrown when an application attempts to load a native library like .so in Linux, .dll on Windows or .dylib in Mac and that library does not exist.
I threw this the console:
  java.lang.UnsatisfiedLinkError: dalvik.system.PathClassLoader [DexPathList [[zip file "/system/framework/org.apache.http.legacy.boot.jar", zip file "/data/app/com.imaniac.myo- QS9EJbxzOjKhre3FebKwoA == / base.apk "], nativeLibraryDirectories = [/ data / app / com.imaniac.myo-QS9EJbxzOjKhre3FebKwoA == / lib / arm64, / system / lib64]]] could not find" libgesture-classifier.so "
         at java.lang.Runtime.loadLibrary0 (Runtime.java:1012)
         at java.lang.System.loadLibrary (System.java:1669)
Well, it worked for me by adding [this] [1] in projectfolder \ src \ main
   [1]: https://mega.nz/#!HsVijIxa!CLbeM1BhpEd5sUrErFglP7R8BaHPKaYTG3CkCkaoXpk
try to add that library to the path I said earlier (projectfolder \ src \ main)

pixelate image in code

I've searched for how to pixelate an image in android via code, the results are varied.
I've found libraries and tutorials on how to apply other effects found here: http://xjaphx.wordpress.com/learning/tutorials/
Can someone clear things up for me, what is the simplest way of pixelating an image on the fly in android
Also it would be handy if it was a function that I could how many rounds or how much I wanted the image pixelating.
Thank in advance.
The simplest way to pixelate the image would be to scale image down using "nearest neighbour" algorithm, and then scale up, using the same algorithm.
Filtering over the image trying to find an average takes much more time, but does not actually give any improvements in result quality, after all you do intentionally want your image distorted.
I have done this before in vb.net and its easily made into a function whose parameter can control how pixelated you want it.
The basic idea is to scan the image in section of blocks of X width and y height. for each block you find the average RGB value and set all those pixels to that color. the smaller the block size the less pixelated.
int avR,avB,avG; // store average of rgb
int pixel;
Bitmap bmOut = Bitmap.createBitmap(width, height, src.getConfig());
for(int x = 0; x < width; x+= pixelationAmount) { // do the whole image
for(int y = 0; y < height; y++ pixelationamount) {
avR = 0; avG = 0; avB =0;
for(int xx =x; xx <pixelationAmount;xx++){// YOU WILL WANT TO PUYT SOME OUT OF BOUNDS CHECKING HERE
for(int yy= y; yy <pixelationAmount;yy++){ // this is scanning the colors
pixel = src.getPixel(x, y);
avR += (int) (color.red(pixel);
avG+= (int) (color.green(pixel);
avB += (int) (color.blue(pixel);
}
}
avrR/= pixelationAmount^2; //divide all by the amount of samples taken to get an average
avrG/= pixelationAmount^2;
avrB/= pixelationAmount^2;
for(int xx =x; xx <pixelationAmount;xx++){// YOU WILL WANT TO PUYT SOME OUT OF BOUNDS CHECKING HERE
for(int yy= y; yy <pixelationAmount;yy++){ // this is going back over the block
bmOut.setPixel(xx, yy, Color.argb(255, avR, avG,avB)); //sets the block to the average color
}
}
}
}
sorry about the bad formatting (wrote it in notepad quickly) but thought it might give you a framework to make your own pixelate function
This is corrected of above algorithm that works:
Bitmap bmOut = Bitmap.createBitmap(OriginalBitmap.getWidth(),OriginalBitmap.getHeight(),OriginalBitmap.getConfig());
int pixelationAmount = 50; //you can change it!!
int width = OriginalBitmap.getWidth();
int height = OriginalBitmap.getHeight();
int avR,avB,avG; // store average of rgb
int pixel;
for(int x = 0; x < width; x+= pixelationAmount) { // do the whole image
for(int y = 0; y < height; y+= pixelationAmount) {
avR = 0; avG = 0; avB =0;
int bx = x + pixelationAmount;
int by = y + pixelationAmount;
if(by >= height) by = height;
if(bx >= width)bx = width;
for(int xx =x; xx < bx;xx++){// YOU WILL WANT TO PUYT SOME OUT OF BOUNDS CHECKING HERE
for(int yy= y; yy < by;yy++){ // this is scanning the colors
pixel = OriginalBitmap.getPixel(xx, yy);
avR += (int) (Color.red(pixel));
avG+= (int) (Color.green(pixel));
avB += (int) (Color.blue(pixel));
}
}
avR/= pixelationAmount^2; //divide all by the amount of samples taken to get an average
avG/= pixelationAmount^2;
avB/= pixelationAmount^2;
for(int xx =x; xx < bx;xx++)// YOU WILL WANT TO PUYT SOME OUT OF BOUNDS CHECKING HERE
for(int yy= y; yy <by;yy++){ // this is going back over the block
bmOut.setPixel(xx, yy, Color.argb(255, avR, avG,avB)); //sets the block to the average color
}
}
}
iv.setImageBitmap(bmOut);
anyway it was not what i was looking for
I have change previous algorithm completely and it really done something like mosaic filter!
the idea is to replace each block pixels with its below block pixels
use this function simply:
public void filter(){
Bitmap bmOut = Bitmap.createBitmap(OriginalBitmap.getWidth(),OriginalBitmap.getHeight(),OriginalBitmap.getConfig());
int pixelationAmount = 10;
Bitmap a = Bitmap.createBitmap(pixelationAmount,pixelationAmount,OriginalBitmap.getConfig());
Bitmap b = Bitmap.createBitmap(pixelationAmount,pixelationAmount,OriginalBitmap.getConfig());
int width = OriginalBitmap.getWidth();
int height = OriginalBitmap.getHeight();
int pixel;
int counter = 1;
int px = 0;int py = 0;int pbx=0;int pby=0;
for(int x = 0; x < width; x+= pixelationAmount) { // do the whole image
for(int y = 0; y < height; y+= pixelationAmount) {
int bx = x + pixelationAmount;
int by = y + pixelationAmount;
if(by >= height) by = height;
if(bx >= width)bx = width;
int xxx = -1;
int yyy = -1;
for(int xx =x; xx < bx;xx++){// YOU WILL WANT TO PUYT SOME OUT OF BOUNDS CHECKING HERE
xxx++;
yyy = -1;
for(int yy= y; yy < by;yy++){ // this is scanning the colors
yyy++;
pixel = OriginalBitmap.getPixel(xx, yy);
if(counter == 1)
{
a.setPixel(xxx, yyy, pixel);
px = x;//previous x
py = y;//previous y
pbx = bx;
pby = by;
}
else
b.setPixel(xxx, yyy, pixel);
}
}
counter++;
if(counter == 3)
{
int xxxx = -1;
int yyyy = -1;
for(int xx =x; xx < bx;xx++)
{
xxxx++;
yyyy = -1;
for(int yy= y; yy <by;yy++){
yyyy++;
bmOut.setPixel(xx, yy, b.getPixel(xxxx, yyyy));
}
}
for(int xx =px; xx < pbx;xx++)
{
for(int yy= py; yy <pby;yy++){
bmOut.setPixel(xx, yy, a.getPixel(xxxx, yyyy)); //sets the block to the average color
}
}
counter = 1;
}
}
}
image_view.setImageBitmap(bmOut);
}
This is the code I used:
ImageFilter is the parent class:
public abstract class ImageFilter {
protected int [] pixels;
protected int width;
protected int height;
public ImageFilter (int [] _pixels, int _width,int _height){
setPixels(_pixels,_width,_height);
}
public void setPixels(int [] _pixels, int _width,int _height){
pixels = _pixels;
width = _width;
height = _height;
}
/**
* a weighted Euclidean distance in RGB space
* #param c1
* #param c2
* #return
*/
public double colorDistance(int c1, int c2)
{
int red1 = Color.red(c1);
int red2 = Color.red(c2);
int rmean = (red1 + red2) >> 1;
int r = red1 - red2;
int g = Color.green(c1) - Color.green(c2);
int b = Color.blue(c1) - Color.blue(c2);
return Math.sqrt((((512+rmean)*r*r)>>8) + 4*g*g + (((767-rmean)*b*b)>>8));
}
public abstract int[] procImage();
}
public class PixelateFilter extends ImageFilter {
int pixelSize;
int[] colors;
/**
* #param _pixels
* #param _width
* #param _height
*/
public PixelateFilter(int[] _pixels, int _width, int _height) {
this(_pixels, _width, _height, 10);
}
public PixelateFilter(int[] _pixels, int _width, int _height, int _pixelSize) {
this(_pixels, _width, _height, _pixelSize, null);
}
public PixelateFilter(int[] _pixels, int _width, int _height, int _pixelSize, int[] _colors) {
super(_pixels, _width, _height);
pixelSize = _pixelSize;
colors = _colors;
}
/* (non-Javadoc)
* #see imageProcessing.ImageFilter#procImage()
*/
#Override
public int[] procImage() {
for (int i = 0; i < width; i += pixelSize) {
for (int j = 0; j < height; j += pixelSize) {
int rectColor = getRectColor(i, j);
fillRectColor(rectColor, i, j);
}
}
return pixels;
}
private int getRectColor(int col, int row) {
int r = 0, g = 0, b = 0;
int sum = 0;
for (int x = col; x < col + pixelSize; x++) {
for (int y = row; y < row + pixelSize; y++) {
int index = x + y * width;
if (index < width * height) {
int color = pixels[x + y * width];
r += Color.red(color);
g += Color.green(color);
b += Color.blue(color);
}
}
}
sum = pixelSize * pixelSize;
int newColor = Color.rgb(r / sum, g / sum, b / sum);
if (colors != null)
newColor = getBestMatch(newColor);
return newColor;
}
private int getBestMatch(int color) {
double diff = Double.MAX_VALUE;
int res = color;
for (int c : colors) {
double currDiff = colorDistance(color, c);
if (currDiff < diff) {
diff = currDiff;
res = c;
}
}
return res;
}
private void fillRectColor(int color, int col, int row) {
for (int x = col; x < col + pixelSize; x++) {
for (int y = row; y < row + pixelSize; y++) {
int index = x + y * width;
if (x < width && y < height && index < width * height) {
pixels[x + y * width] = color;
}
}
}
}
public static final Bitmap changeToPixelate(Bitmap bitmap, int pixelSize, int [] colors) {
int width = bitmap.getWidth();
int height = bitmap.getHeight();
int[] pixels = new int[width * height];
bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
PixelateFilter pixelateFilter = new PixelateFilter(pixels, width, height, pixelSize, colors);
int[] returnPixels = pixelateFilter.procImage();
Bitmap returnBitmap = Bitmap.createBitmap(returnPixels, width, height, Bitmap.Config.ARGB_8888);
return returnBitmap;
}
}
Here is how you use it:
int [] colors = new int [] { Color.BLACK,Color.WHITE,Color.BLUE,Color.CYAN,Color.RED};
final Bitmap bmOut = PixelateFilter.changeToPixelate(OriginalBitmap, pixelSize,colors);

implement water color effect on image using JNI

i implement some code for water color effect on image in android but it was to slow(it's take more then 2 minute) now i try to implement this in JNI for batter speed ,
hear is my java code for
the inPixels is pixel of Bitmap .
protected int[] filterPixels( int width, int height, int[] inPixels )
{
int levels = 256;
int index = 0;
int[] rHistogram = new int[levels];
int[] gHistogram = new int[levels];
int[] bHistogram = new int[levels];
int[] rTotal = new int[levels];
int[] gTotal = new int[levels];
int[] bTotal = new int[levels];
int[] outPixels = new int[width * height];
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
for (int i = 0; i < levels; i++)
rHistogram[i] = gHistogram[i] = bHistogram[i] = rTotal[i] = gTotal[i] = bTotal[i] = 0;
for (int row = -range; row <= range; row++)
{
int iy = y+row;
int ioffset;
if (0 <= iy && iy < height)
{
ioffset = iy*width;
for (int col = -range; col <= range; col++)
{
int ix = x+col;
if (0 <= ix && ix < width) {
int rgb = inPixels[ioffset+ix];
int r = (rgb >> 16) & 0xff;
int g = (rgb >> 8) & 0xff;
int b = rgb & 0xff;
int ri = r*levels/256;
int gi = g*levels/256;
int bi = b*levels/256;
rTotal[ri] += r;
gTotal[gi] += g;
bTotal[bi] += b;
rHistogram[ri]++;
gHistogram[gi]++;
bHistogram[bi]++;
}
}
}
}
int r = 0, g = 0, b = 0;
for (int i = 1; i < levels; i++)
{
if (rHistogram[i] > rHistogram[r])
r = i;
if (gHistogram[i] > gHistogram[g])
g = i;
if (bHistogram[i] > bHistogram[b])
b = i;
}
r = rTotal[r] / rHistogram[r];
g = gTotal[g] / gHistogram[g];
b = bTotal[b] / bHistogram[b];
outPixels[index] = (inPixels[index] & 0xff000000) | ( r << 16 ) | ( g << 8 ) | b;
index++;
}
}
return outPixels;
}
**OUTPUT image **
and i try to convert this java code to c code but i don't what is the wrong ,
hear the code for C
void filterPixels( int width, int height, int inPixels[] )
{
int levels = 256;
int index = 0;
int rHistogram [levels];
int gHistogram [levels];
int bHistogram [levels];
int rTotal [levels];
int gTotal [levels];
int bTotal [levels];
int outPixels [width * height];
//Loop Variables
int y ;
int x ;
int i ;
int row ;
int col ;
int j ;
int range = 5 ;
for ( y = 0; y < height; y++)
{
for ( x = 0; x < width; x++)
{
for ( i = 0; i < levels; i++)
rHistogram[i] = gHistogram[i] = bHistogram[i] = rTotal[i] = gTotal[i] = bTotal[i] = 0;
for ( row = -range; row <= range; row++)
{
int iy = y+row;
int ioffset;
if (0 <= iy && iy < height)
{
ioffset = iy*width;
for ( col = -range; col <= range; col++)
{
int ix = x+col;
if (0 <= ix && ix < width) {
int rgb = inPixels[ioffset+ix];
int r = (rgb >> 16) & 0xff;
int g = (rgb >> 8) & 0xff;
int b = rgb & 0xff;
int ri = r*levels/256;
int gi = g*levels/256;
int bi = b*levels/256;
rTotal[ri] += r;
gTotal[gi] += g;
bTotal[bi] += b;
rHistogram[ri]++;
gHistogram[gi]++;
bHistogram[bi]++;
}
}
}
}
int r = 0, g = 0, b = 0;
for ( j = 1; j < levels; j++)
{
if (rHistogram[j] > rHistogram[r])
r = j;
if (gHistogram[j] > gHistogram[g])
g = j;
if (bHistogram[j] > bHistogram[b])
b = j;
}
r = rTotal[r] / rHistogram[r];
g = gTotal[g] / gHistogram[g];
b = bTotal[b] / bHistogram[b];
outPixels[index] = (inPixels[index] & 0xff000000) | ( r << 16 ) | ( g << 8 ) | b;
index++;
}
}
}
i check the pixel value of java code and c code both are same(for same image)
code for call native function from my android activity .
int[] pix = new int[oraginal.getWidth() * oraginal.getHeight()];
Bitmap bitmap = oraginal.copy(oraginal.getConfig(), true);
bitmap.getPixels(pix, 0, bitmap.getWidth(), 0, 0,bitmap.getWidth(), bitmap.getHeight());
filterPixelsJNI(bitmap.getWidth(), bitmap.getHeight(), pix);
bitmap.setPixels(pix, 0, bitmap.getWidth(), 0, 0,bitmap.getWidth(), bitmap.getHeight());
myView.setImageBitmap(bitmap);
this is my first try for JNI so plz help me in this .
UPDATE
public native void filterPixelsJNI( int width, int height, int inPixels[] );
JNI
JNIEXPORT void JNICALL Java_com_testndk_HelloWorldActivity_filterPixelsJNI (JNIEnv * env, jobject obj , jint width,jint height,jint inPixels[]){
filterPixels( width, height, inPixels);
}
filterPixels method witch is call from c code .
There are several problems with your JNI code. The algorithmic part is probably correct, but you're not dealing with the Java array to C array conversion correctly.
First of all, the last argument of Java_com_testndk_HelloWorldActivity_filterPixelsJNI should be of type jintArray, and not jint []. This is how you pass a Java array to C code.
Once you get this array, you can't process it directly, you'll have to convert it to a C array:
JNIEXPORT void JNICALL Java_com_testndk_HelloWorldActivity_filterPixelsJNI (JNIEnv * env, jobject obj , jint width, jint height, jintArray inPixels) {
int *c_inPixels = (*env)->GetIntArrayElements(env, inPixels, NULL);
filterPixels( width, height, c_inPixels);
// passing 0 as the last argument should copy native array to Java array
(*env)->ReleaseIntArrayElements(env, inPixels, c_inPixels, 0);
}
I advise you to look at the JNI documentation, which explains how to deal with arrays: http://docs.oracle.com/javase/1.5.0/docs/guide/jni/spec/functions.html
Note that there are now easier ways of processing Java Bitmap objects using android NDK. See an other of my answers here for details.

Categories

Resources