Android: Adaptive Thresholding - android

I'm trying to implement adaptive thresholding algorithm by Derek Bradley using Android. But it is returning black pixels all the time. Here is my code snippet. Please suggest me about what should I do. Thanks in advance.
public static Bitmap GrayscaleToBin(Bitmap bm2)
{
Bitmap bm;
bm=bm2.copy(Config.ARGB_8888, true);
final int width = bm.getWidth();
final int height = bm.getHeight();
int[] pixels;
pixels = new int[width*height];
bm.getPixels(pixels,0,width,0,0,width,height);
//Bradley AdaptiveThrsholdging
int []intImg= new int[width*height];
int sum=0;
for(int i=0;i<width;++i){
sum=0;
for(int j=0;j<height;++j)
{
sum=sum+pixels[i+j*width];
if(i==0){intImg[i+j*width]=sum;}
else
{
intImg[i+j*width]= intImg[i-1+j*width]+sum;
}
}
}
int x1,x2,y1,y2=0,count=0;
int s=width >> 3;
int t=15;
for(int i=0;i<width;++i)
{
for(int j=0;j<height;++j)
{
x1=i-s/2;
x2=i+s/2;
y1=j-s/2;
y2=j+s/2;
if (x1 <0) x1 = 0;
if (x2>= width) x2 = width-1;
if (y1 <0) y1 = 0;
if (y2>= height) y2 = height-1;
count = (x2-x1) * (y2-y1);
sum = intImg [y2 * width + x2] -
intImg [y1 * width + x2] -
intImg [y2 * width + x1] +
intImg [y1 * width + x1];
if((pixels[i+j*width]*count)<=(sum*(100-t)/100))
{
pixels[i+j*width]=0;
}
else
{
pixels[i+j*width]=255;
}
}
}
/*---------------------------------------------------------------------------*/
bm.setPixels(pixels,0,width,0,0,width,height);
// Log.d("cdsfss","afterloop");
return bm;
}

After a Long struggle I have solved the issue with the following code.
public static Bitmap GrayscaleToBin(Bitmap bm2)
{
Bitmap bm;
bm=bm2.copy(Config.RGB_565, true);
final int width = bm.getWidth();
final int height = bm.getHeight();
int pixel1,pixel2,pixel3,pixel4,A,R;
int[] pixels;
pixels = new int[width*height];
bm.getPixels(pixels,0,width,0,0,width,height);
int size=width*height;
int s=width/8;
int s2=s>>1;
double t=0.15;
double it=1.0-t;
int []integral= new int[size];
int []threshold=new int[size];
int i,j,diff,x1,y1,x2,y2,ind1,ind2,ind3;
int sum=0;
int ind=0;
while(ind<size)
{
sum+=pixels[ind] & 0xFF;
integral[ind]=sum;
ind+=width;
}
x1=0;
for(i=1;i<width;++i)
{
sum=0;
ind=i;
ind3=ind-s2;
if(i>s)
{
x1=i-s;
}
diff=i-x1;
for(j=0;j<height;++j)
{
sum+=pixels[ind] & 0xFF;
integral[ind]=integral[(int)(ind-1)]+sum;
ind+=width;
if(i<s2)continue;
if(j<s2)continue;
y1=(j<s ? 0 : j-s);
ind1=y1*width;
ind2=j*width;
if (((pixels[ind3]&0xFF)*(diff * (j - y1))) < ((integral[(int)(ind2 + i)] - integral[(int)(ind1 + i)] - integral[(int)(ind2 + x1)] + integral[(int)(ind1 + x1)])*it)) {
threshold[ind3] = 0x00;
} else {
threshold[ind3] = 0xFFFFFF;
}
ind3 += width;
}
}
y1 = 0;
for( j = 0; j < height; ++j )
{
i = 0;
y2 =height- 1;
if( j <height- s2 )
{
i = width - s2;
y2 = j + s2;
}
ind = j * width + i;
if( j > s2 ) y1 = j - s2;
ind1 = y1 * width;
ind2 = y2 * width;
diff = y2 - y1;
for( ; i < width; ++i, ++ind )
{
x1 = ( i < s2 ? 0 : i - s2);
x2 = i + s2;
// check the border
if (x2 >= width) x2 = width - 1;
if (((pixels[ind]&0xFF)*((x2 - x1) * diff)) < ((integral[(int)(ind2 + x2)] - integral[(int)(ind1 + x2)] - integral[(int)(ind2 + x1)] + integral[(int)(ind1 + x1)])*it)) {
threshold[ind] = 0x00;
} else {
threshold[ind] = 0xFFFFFF;
}
}
}
/*-------------------------------
* --------------------------------------------*/
bm.setPixels(threshold,0,width,0,0,width,height);
return bm;
}

You can use Catalano Framework. There's an example using Bradley for Android in samples folder.
FastBitmap fb = new FastBitmap(bitmap);
fb.toGrayscale();
BradleyLocalThreshold bradley = new BradleyLocalThreshold();
bradley.applyInPlace(fb);
bitmap = fb.toBitmap();

Related

interpolate a given array to be in new lenght

in order to interpolate 2 values, I can use
lerp(int a, int b) {
return (a + b) / 2;
}
Now imagine I've an array(1, 30, 100, 300) and I want to interpolate it to array in size N (N=10 for example).
If N == 7, then:
1,15,30,65,100,200,300
I've no idea how to interpolate 4 values to be 10. I need a method that looks like:
interpolate(fina int[] input, final int newSize) {
int[] res = new int[newSize];
...
return res;
}
that works even on my example above with newSize of 7, 10 or whatever.
Any idea how to implement it?
SOLVED.
public static double[] interpolate(double[] x, int newLength) {
double[] y = null;
if (newLength > 0) {
int N = x.length;
if (N == 1) {
y = new double[1];
y[0] = x[0];
return y;
} else if (newLength == 1) {
y = new double[1];
int ind = (int) Math.floor(N * 0.5 + 0.5);
ind = Math.max(1, ind);
ind = Math.min(ind, N);
y[0] = x[ind - 1];
return y;
} else {
y = new double[newLength];
double Beta = ((double) newLength) / N;
double newBeta = 1.0;
if (newLength > 2)
newBeta = (N - 2.0) / (newLength - 2.0);
y[0] = x[0];
y[1] = x[1];
y[newLength - 1] = x[N - 1];
double tmp, alpha;
int i, j;
for (i = 2; i <= newLength - 2; i++) {
tmp = 1.0 + (i - 1) * newBeta;
j = (int) Math.floor(tmp);
alpha = tmp - j;
y[i] = (1.0 - alpha) * x[Math.max(0, j)] + alpha * x[Math.min(N - 1, j + 1)];
}
}
}
return y;
}
/**
* Find the maximum of all elements in the array, ignoring elements that are NaN.
* #param data
* #return
*/
public static double max(double[] data) {
double max = Double.NaN;
for (int i = 0; i < data.length; i++) {
if (Double.isNaN(data[i]))
continue;
if (Double.isNaN(max) || data[i] > max)
max = data[i];
}
return max;
}
public static int max(int[] data) {
int max = data[0];
for (int i = 1; i < data.length; i++) {
if (data[i] > max)
max = data[i];
}
return max;
}

rotate image frame while video recording in android

i want to rotate image frame while video recording, i found one solution for rotate image byte array like below code.
private byte[] rotateYUV420Degree90(byte[] data, int imageWidth, int imageHeight) {
byte[] yuv = new byte[imageWidth * imageHeight * 3 / 2];
// Rotate the Y luma
int i = 0;
for (int x = 0; x < imageWidth; x++) {
for (int y = imageHeight - 1; y >= 0; y--) {
yuv[i] = data[y * imageWidth + x];
i++;
}
}
// Rotate the U and V color components
i = imageWidth * imageHeight * 3 / 2 - 1;
for (int x = imageWidth - 1; x > 0; x = x - 2) {
for (int y = 0; y < imageHeight / 2; y++) {
yuv[i] = data[(imageWidth * imageHeight) + (y * imageWidth) + x];
i--;
yuv[i] = data[(imageWidth * imageHeight) + (y * imageWidth) + (x - 1)];
i--;
}
}
return yuv;
}
this code return bad image like this
Please help me to rotate byte array image if any one have idea to achieve this.
Thank You.
I have found solution with NV12 method, this method through i have achieved proper output, if anyone facing same issue try below codes.
public byte[] rotateNV21(byte[] input, int width, int height, int rotation) {
byte[] output = new byte[input.length];
boolean swap = (rotation == 90 || rotation == 270);
// **EDIT:** in portrait mode & front cam this needs to be set to true:
boolean yflip = true;// (rotation == 90 || rotation == 180);
boolean xflip = (rotation == 270 || rotation == 180);
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
int xo = x, yo = y;
int w = width, h = height;
int xi = xo, yi = yo;
if (swap) {
xi = w * yo / h;
yi = h * xo / w;
}
if (yflip) {
yi = h - yi - 1;
}
if (xflip) {
xi = w - xi - 1;
}
output[w * yo + xo] = input[w * yi + xi];
int fs = w * h;
int qs = (fs >> 2);
xi = (xi >> 1);
yi = (yi >> 1);
xo = (xo >> 1);
yo = (yo >> 1);
w = (w >> 1);
h = (h >> 1);
// adjust for interleave here
int ui = fs + (w * yi + xi) * 2;
int uo = fs + (w * yo + xo) * 2;
// and here
int vi = ui + 1;
int vo = uo + 1;
output[uo] = input[ui];
output[vo] = input[vi];
}
}
return output;
}

Otsu histogram self implementation

I tried to make my own implementation of Otsu. I already read some source code from java and some sites that explains the formula and tried to implement it. I want to share this to ask if anyone can help me or at least tell about what can I do or improve.
I already coded get width, height and the background and foreground weight, mean, variance, and within class variance.
Note that I have not implemented how to set or find the exact threshold or even change the picture to black-white(binarize) using within class variance. If you can help me, feel welcome to. I also see some java codes that has treshhold = i or treshhold = t but I can't see how they made the image to black-white.
Here is my code:
Otsu.java
Bitmap tempImg = (Bitmap) original;
Bitmap OImg = Bitmap.createBitmap(tempImg.getWidth(), tempImg.getHeight(), tempImg.getConfig());
int width = tempImg.getWidth();
int height = tempImg.getHeight();
int A, R, G, B,colorPixel;
for (int x = 0; x < width; x++) { //original image to grayscale
for (int y = 0; y < height; y++) {
colorPixel = tempImg.getPixel(x, y);
A = Color.alpha(colorPixel);
R = Color.red(colorPixel);
G = Color.green(colorPixel);
B = Color.blue(colorPixel);
R = (R + G + B) / 3;
G = R;
B = R;
OImg.setPixel(x, y, Color.argb(A, R, G,B ));
}
}
return OImg;
}
public static Bitmap Botsu(Bitmap gImg){
Bitmap tempImg = (Bitmap) gImg;
Bitmap BWimg = Bitmap.createBitmap(tempImg.getWidth(), tempImg.getHeight(), tempImg.getConfig());
int width = tempImg.getWidth();
int height = tempImg.getHeight();
int A, R, G, B, colorPixel;
// histo-thresh
double Wcv = 0;
int[] Bx = new int[256];
int[] By = new int[256];
int[] Fx = new int[256];
int[] Fy = new int[256];
double Bw =0, Bm =0, Bv =0, Bp = 0;
double Fw =0, Fm =0, Fv =0, Fp = 0;
int c = 0, ImgPix = 0, ImgPixB = 0, ImgPixF = 0, newPixel = 0;
// pixel check for histogram
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
colorPixel = tempImg.getPixel(x, y);
A = Color.alpha(colorPixel);
R = Color.red(colorPixel);
G = Color.green(colorPixel);
B = Color.blue(colorPixel);
int gray = (int) (0.2989 * R + 0.5870 * G + 0.1140 * B);
if (gray > 128) { // white - foreground
for (int z=0; z<Fx.length; z++){
if (Fx[z] == gray){
c++;
}
}
if (c==1){
Fy[gray] = Fy[gray]+1; //y axis - counter for pixels for each x
}
else{
Fx[x] = gray; //x axis - 0-255
Fy[gray] = Fy[gray]+1;
}
}//By[Bx[x]]
else{ // black - background
for (int z=0; z<Bx.length; z++){
if (Bx[z] == gray){
c++;
}
}
if (c==1){
By[gray] = By[gray]+1; //y axis - counter for pixels for each x
}
else{
Bx[x] = gray; //x axis - 0-255
By[gray] = By[gray]+1;
}
}
}
}
for (int b=0; b<By.length; b++){
ImgPixB = ImgPixB + By[b];
}
for (int f=0; f<Fy.length; f++){
ImgPixF = ImgPixF + Fy[f];
}
ImgPix = ImgPixB + ImgPixF;
//bg part hist
for (int i=0; i<By.length; i++){ //weight
Bw = Bw + By[i];
}
Bw = Bw/ImgPix;
for (int i=0; i<By.length; i++){ //pixel sum
Bp = Bp + By[i];
}
for (int i = 0; i<Bx.length; i++){ //mean
Bm = Bm + (Bx[i]*By[Bx[i]]);
}
Bm = Bm/Bp;
for (int i=0; i<Bx.length; i++){ //variance
Bv = Bv + (Math.pow((Bx[i]-Bm),2)*By[Bx[i]]); // (Bx[i]-Bm) * (Bx[i]-Bm)
}
Bv = Bv/Bp;
//fg part hist
for (int i=0; i<Fy.length; i++){ //weight
Fw = Fw + Fy[i];
}
Fw = Fw/ImgPix;
for (int i=0; i<Fy.length; i++){ //pixel sum
Fp = Fp + Fy[i];
}
for (int i = 0; i<Fx.length; i++){ //mean
Fm = Fm + (Fx[i]*Fy[Fx[i]]);
}
Fm = Fm/Fp;
for (int i=0; i<Fx.length; i++){ //variance
Fv = Fv + (Math.pow((Fx[i]-Fm),2)*Fy[Fx[i]]); // (Bx[i]-Bm) * (Bx[i]-Bm)
}
Fv = Fv/Fp;
// within class variance
Wcv = (Bw * Bv) + (Fw * Fv);
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
colorPixel = tempImg.getPixel(x, y);
A = Color.alpha(colorPixel);
R = Color.red(colorPixel);
G = Color.green(colorPixel);
B = Color.blue(colorPixel);
//int gray = (int) (0.2989 * R + 0.5870 * G + 0.1140 * B);
int gray2 = (int) (Wcv * R + Wcv * G + Wcv * B);
if (gray2 > 128) {
gray2 = 255;
}
else if (gray2 <129){
gray2 = 0;
}
BWimg.setPixel(x, y, Color.argb(A, gray2, gray2, gray2));
}
}
return BWimg;
x[z] is for x-axis andy[gray] is for y-axis. I based this on the graph on Lab Book
x = 0-255
y = how many pixels is on a certain color shade
feel free to send more samples that can help me.
OUTPUT: (I added 2 function with 3 output that has an output. Other value will only return few black dots or just white image.)
if (gray2 > 128) {
gray2 = 255;
}
else if (gray2 < 129){
gray2 = 0;
}
if (gray2 > 64 && gray2 < 129) {
gray2 = 255;
}
else if (gray2 < 65){
gray2 = 0;
}

Android Bitmap remove white margin

I've got a question regarding Bitmaps in Android: I 've got a Bitmap with white margins [size unknown] around. Is it possible to create a new Bitmap with all the white margins removed (rectangular shape)?
Bitmap bmp = Bitmap.createBitmap(width, bmpheigth, Config.ARGB_8888);
Canvas canvas = new Canvas(bmp);
canvas.setBitmap(bmp);
canvas.drawColor(Color.WHITE);
// draw here things!
It is asumed to be unknown where are things painted.
What is a good way to do that?
thanks!
Thanks #Maxim Efimov & #StackOverflowException
Just in Case Someone will need a snippet for this kind of problems:
this method returns a cut out smaller Bitmap with Margins removed. passing the pixels to a int-array first and then working with the array is a bit faster than the Bitmap.getPixel method
just call the method indicating Source Bitmap and Background color.
Bitmap bmp2 = removeMargins(bmp, Color.WHITE);
private static Bitmap removeMargins2(Bitmap bmp, int color) {
// TODO Auto-generated method stub
long dtMili = System.currentTimeMillis();
int MTop = 0, MBot = 0, MLeft = 0, MRight = 0;
boolean found1 = false, found2 = false;
int[] bmpIn = new int[bmp.getWidth() * bmp.getHeight()];
int[][] bmpInt = new int[bmp.getWidth()][bmp.getHeight()];
bmp.getPixels(bmpIn, 0, bmp.getWidth(), 0, 0, bmp.getWidth(),
bmp.getHeight());
for (int ii = 0, contX = 0, contY = 0; ii < bmpIn.length; ii++) {
bmpInt[contX][contY] = bmpIn[ii];
contX++;
if (contX >= bmp.getWidth()) {
contX = 0;
contY++;
if (contY >= bmp.getHeight()) {
break;
}
}
}
for (int hP = 0; hP < bmpInt[0].length && !found2; hP++) {
// looking for MTop
for (int wP = 0; wP < bmpInt.length && !found2; wP++) {
if (bmpInt[wP][hP] != color) {
Log.e("MTop 2", "Pixel found #" + hP);
MTop = hP;
found2 = true;
break;
}
}
}
found2 = false;
for (int hP = bmpInt[0].length - 1; hP >= 0 && !found2; hP--) {
// looking for MBot
for (int wP = 0; wP < bmpInt.length && !found2; wP++) {
if (bmpInt[wP][hP] != color) {
Log.e("MBot 2", "Pixel found #" + hP);
MBot = bmp.getHeight() - hP;
found2 = true;
break;
}
}
}
found2 = false;
for (int wP = 0; wP < bmpInt.length && !found2; wP++) {
// looking for MLeft
for (int hP = 0; hP < bmpInt[0].length && !found2; hP++) {
if (bmpInt[wP][hP] != color) {
Log.e("MLeft 2", "Pixel found #" + wP);
MLeft = wP;
found2 = true;
break;
}
}
}
found2 = false;
for (int wP = bmpInt.length - 1; wP >= 0 && !found2; wP--) {
// looking for MRight
for (int hP = 0; hP < bmpInt[0].length && !found2; hP++) {
if (bmpInt[wP][hP] != color) {
Log.e("MRight 2", "Pixel found #" + wP);
MRight = bmp.getWidth() - wP;
found2 = true;
break;
}
}
}
found2 = false;
int sizeY = bmp.getHeight() - MBot - MTop, sizeX = bmp.getWidth()
- MRight - MLeft;
Bitmap bmp2 = Bitmap.createBitmap(bmp, MLeft, MTop, sizeX, sizeY);
dtMili = (System.currentTimeMillis() - dtMili);
Log.e("Margin 2",
"Time needed " + dtMili + "mSec\nh:" + bmp.getWidth() + "w:"
+ bmp.getHeight() + "\narray x:" + bmpInt.length + "y:"
+ bmpInt[0].length);
return bmp2;
}
Use Bitmap.createBitmap(source, x, y, width, height) so knowing the white margin size you can do what you want.
My solution:
private Bitmap trim(Bitmap bitmap, int trimColor){
int minX = Integer.MAX_VALUE;
int maxX = 0;
int minY = Integer.MAX_VALUE;
int maxY = 0;
for(int x = 0; x < bitmap.getWidth(); x++){
for(int y = 0; y < bitmap.getHeight(); y++){
if(bitmap.getPixel(x, y) != trimColor){
if(x < minX){
minX = x;
}
if(x > maxX){
maxX = x;
}
if(y < minY){
minY = y;
}
if(y > maxY){
maxY = y;
}
}
}
}
return Bitmap.createBitmap(bitmap, minX, minY, maxX - minX + 1, maxY - minY + 1);
}
It isn't very fast, for 1280 x 576 px bitmap execution took 2965ms on Xiaomi Redmi 3S.
If it possible scale down image before triming:
private Bitmap scaleDown(Bitmap bitmap, float maxImageSize, boolean filter) {
float ratio = Math.min(maxImageSize / bitmap.getWidth(), maxImageSize / bitmap.getHeight());
int width = Math.round(ratio * bitmap.getWidth());
int height = Math.round(ratio * bitmap.getHeight());
return Bitmap.createScaledBitmap(bitmap, width, height, filter);
}
Late to the party, but this variation is a bit faster and perhaps easier to read:
public static Bitmap imageWithMargin(Bitmap bitmap, int color, int maxMargin) {
int maxTop = 0, maxBottom = 0, maxLeft = 0, maxRight = 0;
int width = bitmap.getWidth();
int height = bitmap.getHeight();
int[] bitmapArray = new int[width * height];
bitmap.getPixels(bitmapArray, 0, width, 0, 0, width, height);
// Find first non-color pixel from top of bitmap
searchTopMargin:
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
if (bitmapArray[width * y + x] != color) {
maxTop = y > maxMargin ? y - maxMargin : 0;
break searchTopMargin;
}
}
}
// Find first non-color pixel from bottom of bitmap
searchBottomMargin:
for (int y = height - 1; y >= 0; y--) {
for (int x = width - 1; x >= 0; x--) {
if (bitmapArray[width * y + x] != color) {
maxBottom = y < height - maxMargin ? y + maxMargin : height;
break searchBottomMargin;
}
}
}
// Find first non-color pixel from left of bitmap
searchLeftMargin:
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
if (bitmapArray[width * y + x] != color) {
maxLeft = x > maxMargin ? x - maxMargin : 0;
break searchLeftMargin;
}
}
}
// Find first non-color pixel from right of bitmap
searchRightMargin:
for (int x = width - 1; x >= 0; x--) {
for (int y = height - 1; y >= 0; y--) {
if (bitmapArray[width * y + x] != color) {
maxRight = x < width - maxMargin ? x + maxMargin : width;
break searchRightMargin;
}
}
}
return Bitmap.createBitmap(bitmap, maxLeft, maxTop, maxRight - maxLeft, maxBottom - maxTop);
}

pixelate image in code

I've searched for how to pixelate an image in android via code, the results are varied.
I've found libraries and tutorials on how to apply other effects found here: http://xjaphx.wordpress.com/learning/tutorials/
Can someone clear things up for me, what is the simplest way of pixelating an image on the fly in android
Also it would be handy if it was a function that I could how many rounds or how much I wanted the image pixelating.
Thank in advance.
The simplest way to pixelate the image would be to scale image down using "nearest neighbour" algorithm, and then scale up, using the same algorithm.
Filtering over the image trying to find an average takes much more time, but does not actually give any improvements in result quality, after all you do intentionally want your image distorted.
I have done this before in vb.net and its easily made into a function whose parameter can control how pixelated you want it.
The basic idea is to scan the image in section of blocks of X width and y height. for each block you find the average RGB value and set all those pixels to that color. the smaller the block size the less pixelated.
int avR,avB,avG; // store average of rgb
int pixel;
Bitmap bmOut = Bitmap.createBitmap(width, height, src.getConfig());
for(int x = 0; x < width; x+= pixelationAmount) { // do the whole image
for(int y = 0; y < height; y++ pixelationamount) {
avR = 0; avG = 0; avB =0;
for(int xx =x; xx <pixelationAmount;xx++){// YOU WILL WANT TO PUYT SOME OUT OF BOUNDS CHECKING HERE
for(int yy= y; yy <pixelationAmount;yy++){ // this is scanning the colors
pixel = src.getPixel(x, y);
avR += (int) (color.red(pixel);
avG+= (int) (color.green(pixel);
avB += (int) (color.blue(pixel);
}
}
avrR/= pixelationAmount^2; //divide all by the amount of samples taken to get an average
avrG/= pixelationAmount^2;
avrB/= pixelationAmount^2;
for(int xx =x; xx <pixelationAmount;xx++){// YOU WILL WANT TO PUYT SOME OUT OF BOUNDS CHECKING HERE
for(int yy= y; yy <pixelationAmount;yy++){ // this is going back over the block
bmOut.setPixel(xx, yy, Color.argb(255, avR, avG,avB)); //sets the block to the average color
}
}
}
}
sorry about the bad formatting (wrote it in notepad quickly) but thought it might give you a framework to make your own pixelate function
This is corrected of above algorithm that works:
Bitmap bmOut = Bitmap.createBitmap(OriginalBitmap.getWidth(),OriginalBitmap.getHeight(),OriginalBitmap.getConfig());
int pixelationAmount = 50; //you can change it!!
int width = OriginalBitmap.getWidth();
int height = OriginalBitmap.getHeight();
int avR,avB,avG; // store average of rgb
int pixel;
for(int x = 0; x < width; x+= pixelationAmount) { // do the whole image
for(int y = 0; y < height; y+= pixelationAmount) {
avR = 0; avG = 0; avB =0;
int bx = x + pixelationAmount;
int by = y + pixelationAmount;
if(by >= height) by = height;
if(bx >= width)bx = width;
for(int xx =x; xx < bx;xx++){// YOU WILL WANT TO PUYT SOME OUT OF BOUNDS CHECKING HERE
for(int yy= y; yy < by;yy++){ // this is scanning the colors
pixel = OriginalBitmap.getPixel(xx, yy);
avR += (int) (Color.red(pixel));
avG+= (int) (Color.green(pixel));
avB += (int) (Color.blue(pixel));
}
}
avR/= pixelationAmount^2; //divide all by the amount of samples taken to get an average
avG/= pixelationAmount^2;
avB/= pixelationAmount^2;
for(int xx =x; xx < bx;xx++)// YOU WILL WANT TO PUYT SOME OUT OF BOUNDS CHECKING HERE
for(int yy= y; yy <by;yy++){ // this is going back over the block
bmOut.setPixel(xx, yy, Color.argb(255, avR, avG,avB)); //sets the block to the average color
}
}
}
iv.setImageBitmap(bmOut);
anyway it was not what i was looking for
I have change previous algorithm completely and it really done something like mosaic filter!
the idea is to replace each block pixels with its below block pixels
use this function simply:
public void filter(){
Bitmap bmOut = Bitmap.createBitmap(OriginalBitmap.getWidth(),OriginalBitmap.getHeight(),OriginalBitmap.getConfig());
int pixelationAmount = 10;
Bitmap a = Bitmap.createBitmap(pixelationAmount,pixelationAmount,OriginalBitmap.getConfig());
Bitmap b = Bitmap.createBitmap(pixelationAmount,pixelationAmount,OriginalBitmap.getConfig());
int width = OriginalBitmap.getWidth();
int height = OriginalBitmap.getHeight();
int pixel;
int counter = 1;
int px = 0;int py = 0;int pbx=0;int pby=0;
for(int x = 0; x < width; x+= pixelationAmount) { // do the whole image
for(int y = 0; y < height; y+= pixelationAmount) {
int bx = x + pixelationAmount;
int by = y + pixelationAmount;
if(by >= height) by = height;
if(bx >= width)bx = width;
int xxx = -1;
int yyy = -1;
for(int xx =x; xx < bx;xx++){// YOU WILL WANT TO PUYT SOME OUT OF BOUNDS CHECKING HERE
xxx++;
yyy = -1;
for(int yy= y; yy < by;yy++){ // this is scanning the colors
yyy++;
pixel = OriginalBitmap.getPixel(xx, yy);
if(counter == 1)
{
a.setPixel(xxx, yyy, pixel);
px = x;//previous x
py = y;//previous y
pbx = bx;
pby = by;
}
else
b.setPixel(xxx, yyy, pixel);
}
}
counter++;
if(counter == 3)
{
int xxxx = -1;
int yyyy = -1;
for(int xx =x; xx < bx;xx++)
{
xxxx++;
yyyy = -1;
for(int yy= y; yy <by;yy++){
yyyy++;
bmOut.setPixel(xx, yy, b.getPixel(xxxx, yyyy));
}
}
for(int xx =px; xx < pbx;xx++)
{
for(int yy= py; yy <pby;yy++){
bmOut.setPixel(xx, yy, a.getPixel(xxxx, yyyy)); //sets the block to the average color
}
}
counter = 1;
}
}
}
image_view.setImageBitmap(bmOut);
}
This is the code I used:
ImageFilter is the parent class:
public abstract class ImageFilter {
protected int [] pixels;
protected int width;
protected int height;
public ImageFilter (int [] _pixels, int _width,int _height){
setPixels(_pixels,_width,_height);
}
public void setPixels(int [] _pixels, int _width,int _height){
pixels = _pixels;
width = _width;
height = _height;
}
/**
* a weighted Euclidean distance in RGB space
* #param c1
* #param c2
* #return
*/
public double colorDistance(int c1, int c2)
{
int red1 = Color.red(c1);
int red2 = Color.red(c2);
int rmean = (red1 + red2) >> 1;
int r = red1 - red2;
int g = Color.green(c1) - Color.green(c2);
int b = Color.blue(c1) - Color.blue(c2);
return Math.sqrt((((512+rmean)*r*r)>>8) + 4*g*g + (((767-rmean)*b*b)>>8));
}
public abstract int[] procImage();
}
public class PixelateFilter extends ImageFilter {
int pixelSize;
int[] colors;
/**
* #param _pixels
* #param _width
* #param _height
*/
public PixelateFilter(int[] _pixels, int _width, int _height) {
this(_pixels, _width, _height, 10);
}
public PixelateFilter(int[] _pixels, int _width, int _height, int _pixelSize) {
this(_pixels, _width, _height, _pixelSize, null);
}
public PixelateFilter(int[] _pixels, int _width, int _height, int _pixelSize, int[] _colors) {
super(_pixels, _width, _height);
pixelSize = _pixelSize;
colors = _colors;
}
/* (non-Javadoc)
* #see imageProcessing.ImageFilter#procImage()
*/
#Override
public int[] procImage() {
for (int i = 0; i < width; i += pixelSize) {
for (int j = 0; j < height; j += pixelSize) {
int rectColor = getRectColor(i, j);
fillRectColor(rectColor, i, j);
}
}
return pixels;
}
private int getRectColor(int col, int row) {
int r = 0, g = 0, b = 0;
int sum = 0;
for (int x = col; x < col + pixelSize; x++) {
for (int y = row; y < row + pixelSize; y++) {
int index = x + y * width;
if (index < width * height) {
int color = pixels[x + y * width];
r += Color.red(color);
g += Color.green(color);
b += Color.blue(color);
}
}
}
sum = pixelSize * pixelSize;
int newColor = Color.rgb(r / sum, g / sum, b / sum);
if (colors != null)
newColor = getBestMatch(newColor);
return newColor;
}
private int getBestMatch(int color) {
double diff = Double.MAX_VALUE;
int res = color;
for (int c : colors) {
double currDiff = colorDistance(color, c);
if (currDiff < diff) {
diff = currDiff;
res = c;
}
}
return res;
}
private void fillRectColor(int color, int col, int row) {
for (int x = col; x < col + pixelSize; x++) {
for (int y = row; y < row + pixelSize; y++) {
int index = x + y * width;
if (x < width && y < height && index < width * height) {
pixels[x + y * width] = color;
}
}
}
}
public static final Bitmap changeToPixelate(Bitmap bitmap, int pixelSize, int [] colors) {
int width = bitmap.getWidth();
int height = bitmap.getHeight();
int[] pixels = new int[width * height];
bitmap.getPixels(pixels, 0, width, 0, 0, width, height);
PixelateFilter pixelateFilter = new PixelateFilter(pixels, width, height, pixelSize, colors);
int[] returnPixels = pixelateFilter.procImage();
Bitmap returnBitmap = Bitmap.createBitmap(returnPixels, width, height, Bitmap.Config.ARGB_8888);
return returnBitmap;
}
}
Here is how you use it:
int [] colors = new int [] { Color.BLACK,Color.WHITE,Color.BLUE,Color.CYAN,Color.RED};
final Bitmap bmOut = PixelateFilter.changeToPixelate(OriginalBitmap, pixelSize,colors);

Categories

Resources