Android Facial recognition using OpenCV in Android - android

I am trying to develop facial recognition using opencv in android I have successfully integrated opencv with JavaCv and Javacpp I trained data images from sdcard and then recognize with same image but getting '0' confidence each time although I have change my data, but still it is showing '0' confidence even using the similar images.
private void imagePrediction() {
String trainingDir = Environment.getExternalStorageDirectory()
+ File.separator + "SpySignage_filter";
File root = new File(trainingDir);
FilenameFilter jpgFilter = new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.toLowerCase().endsWith(".jpg");
}
};
File[] imageFiles = root.listFiles(jpgFilter);
opencv_core.MatVector images = new opencv_core.MatVector(imageFiles.length);
int[] labels = new int[imageFiles.length];
int counter = 0;
int label;
opencv_core.IplImage img;
opencv_core.IplImage grayImg;
for (int i = 0; i < imageFiles.length; i++) {
// Get image and label:
img = cvLoadImage(imageFiles[i].getAbsolutePath());
label = Integer.parseInt(imageFiles[i].getName().split("\\-")[0]);
// Convert image to grayscale:
grayImg = opencv_core.IplImage.create(img.width(), img.height(), IPL_DEPTH_8U, 1);
cvCvtColor(img, grayImg, CV_BGR2GRAY);
// Append it in the image list:
images.put(counter, grayImg);
// And in the labels list:
labels[counter] = label;
// Increase counter for next image:
counter++;
}
// opencv_contrib.FaceRecognizer faceRecognizer = createFisherFaceRecognizer();
// opencv_contrib.FaceRecognizer faceRecognizer = createEigenFaceRecognizer();
// FaceRecognizer faceRecognizer = createLBPHFaceRecognizer()
opencv_contrib.FaceRecognizer faceRecognizer = com.googlecode.javacv.cpp.opencv_contrib.createLBPHFaceRecognizer();
// opencv_contrib.FaceRecognizer faceRecognizer = createLBPHFaceRecognizer();
faceRecognizer.train(images, labels);
String dataTorecognize = Environment.getExternalStorageDirectory()
+ File.separator + "SpySignage_filter";
File rootDir = new File(dataTorecognize);
File[] imagelist = rootDir.listFiles(jpgFilter);
// for (int j = 0; j < imagelist.length; j++) {
// opencv_core.IplImage testImage = cvLoadImage( imagelist[j].getAbsolutePath());
opencv_core.IplImage testImage = cvLoadImage(Environment.getExternalStorageDirectory().getAbsolutePath() + "/SpySignage_filter/2-hrithik_two.jpg");
opencv_core.IplImage greyTestImage = opencv_core.IplImage.create(testImage.width(), testImage.height(), IPL_DEPTH_8U, 1);
cvCvtColor(testImage, greyTestImage, CV_BGR2GRAY);
// And get a prediction:
// int predictedLabel = faceRecognizer.predict(greyTestImage);
// System.out.println("Predicted label: " + predictedLabel);
int n[] = new int[1];
double p[] = new double[1];
// opencv_core.IplImage ipl = MatToIplImage(m,WIDTH, HEIGHT);
// IplImage ipl = MatToIplImage(m,-1, -1);
faceRecognizer.predict(greyTestImage, n, p);
if (n[0] != -1) {
mProb = (int) p[0];
} else
mProb = -1;
// if ((n[0] != -1)&&(p[0]<95))
if (n[0] != -1) {
Toast.makeText(getApplicationContext(), "Image Recognized Confidence =" + mProb, Toast.LENGTH_LONG).show();
// return label.get(n[0]);
} else {
Toast.makeText(getApplicationContext(), "Image Not Recognized", Toast.LENGTH_LONG).show();
}
}

Related

tflite.run() returning same output for different input values

I am trying to make an android app for monument recognition. The input changes on every run but output returned is always same.
Below are the code snippets
to load tflite model stored in assets directory
private ByteBuffer loadModelFile(String filename) throws IOException {
AssetFileDescriptor fileDescriptor = this.getAssets().openFd(filename);
FileInputStream inputStream = new FileInputStream(fileDescriptor.getFileDescriptor());
FileChannel fileChannel = inputStream.getChannel();
long startOffset = fileDescriptor.getStartOffset();
long declaredLength = fileDescriptor.getDeclaredLength();
return fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength);
}
to initialize tflite interpreter
predict.setOnClickListener(new View.OnClickListener() {
#RequiresApi(api = Build.VERSION_CODES.O)
#Override
public void onClick(View v) {
try {
tflite = new Interpreter(loadModelFile("converted_model.tflite"));
Log.println(7,"tflite", "tflite init");
doInference(picFile);
} catch (Exception e) {
System.out.println(e);
}
}
});
to run the model
#RequiresApi(api = Build.VERSION_CODES.O)
public void doInference(File photo) throws IOException {
img = findViewById(R.id.imgToDisp);
Bitmap bitmapImg = BitmapFactory.decodeFile(pathToFile);
img.setImageBitmap(bitmapImg);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
bitmapImg.compress(Bitmap.CompressFormat.JPEG, 50, stream);
byte[] arr = stream.toByteArray();
changedim = new float[1][150][150][3];
outputval = new float[1][28];
int m = 0;
for (int i = 0; i < 1; i++) {
for (int j = 0; j < 150; j++) {
for (int k = 0; k < 150; k++) {
for (int l = 0; l < 3; l++) {
byte a = arr[m++];
changedim[i][j][k][l] = Byte.toUnsignedLong(a);
}
}
}
}
tflite.run(changedim, outputval);
for(int i=0;i<28;i++) {
Log.println(7,"outputval",i+" "+outputval[0][i]);
}
path = findViewById(R.id.path);
String out = "";
float[] op = outputval[0];
int ind = 0;
float max = op[0];
while (op[ind] != 1) {
ind++;
//Log.println(7,"op", " "+op[ind]+" "+ind);
}
for (float f : op) {
out += Float.toString(f) + ",";
}
predict.setText("result: " + labels.get(ind));
Log.println(7, "label", ind + " " + labels.get(ind));
//path.setText(""+pathToFile);
}
input to the model must be an image of size 150*150 converted to 4d float32 array of shape 1*150*150*3
Input to the model is the color values of individual pixels.
Which can be extracted using
int p = bitmapImg.getPixel(j, k);
int R = (p >> 16) & 0xff;
int G = (p >> 8) & 0xff;
int B = p & 0xff;
change that and your model will work correctly!

How to convert image to zpl code for printing using zebra printer?

I want to print image along with some other texts through zebra printer from android application. I am able to create zpl code for text data but I am having to problem to create zpl code for image. The zpl do not supports base64 code. The image needs to be converted to hex ascii and print using ^GF command.
The raw data with both text and image is available on this Pastebin link, and can be viewed in labelary viewer.
Is there any image conversion process?
I solved the problem and I am posting the answer so that other people can take benefit from the solution. The bitmap image can be converted to zpl code using following converter class.
public class ZPLConverter {
private int blackLimit = 380;
private int total;
private int widthBytes;
private boolean compressHex = false;
private static Map<Integer, String> mapCode = new HashMap<Integer, String>();
{
mapCode.put(1, "G");
mapCode.put(2, "H");
mapCode.put(3, "I");
mapCode.put(4, "J");
mapCode.put(5, "K");
mapCode.put(6, "L");
mapCode.put(7, "M");
mapCode.put(8, "N");
mapCode.put(9, "O");
mapCode.put(10, "P");
mapCode.put(11, "Q");
mapCode.put(12, "R");
mapCode.put(13, "S");
mapCode.put(14, "T");
mapCode.put(15, "U");
mapCode.put(16, "V");
mapCode.put(17, "W");
mapCode.put(18, "X");
mapCode.put(19, "Y");
mapCode.put(20, "g");
mapCode.put(40, "h");
mapCode.put(60, "i");
mapCode.put(80, "j");
mapCode.put(100, "k");
mapCode.put(120, "l");
mapCode.put(140, "m");
mapCode.put(160, "n");
mapCode.put(180, "o");
mapCode.put(200, "p");
mapCode.put(220, "q");
mapCode.put(240, "r");
mapCode.put(260, "s");
mapCode.put(280, "t");
mapCode.put(300, "u");
mapCode.put(320, "v");
mapCode.put(340, "w");
mapCode.put(360, "x");
mapCode.put(380, "y");
mapCode.put(400, "z");
}
public String convertFromImage(Bitmap image, Boolean addHeaderFooter) {
String hexAscii = createBody(image);
if (compressHex) {
hexAscii = encodeHexAscii(hexAscii);
}
String zplCode = "^GFA," + total + "," + total + "," + widthBytes + ", " + hexAscii;
if (addHeaderFooter) {
String header = "^XA " + "^FO0,0^GFA," + total + "," + total + "," + widthBytes + ", ";
String footer = "^FS" + "^XZ";
zplCode = header + zplCode + footer;
}
return zplCode;
}
private String createBody(Bitmap bitmapImage) {
StringBuilder sb = new StringBuilder();
int height = bitmapImage.getHeight();
int width = bitmapImage.getWidth();
int rgb, red, green, blue, index = 0;
char auxBinaryChar[] = {'0', '0', '0', '0', '0', '0', '0', '0'};
widthBytes = width / 8;
if (width % 8 > 0) {
widthBytes = (((int) (width / 8)) + 1);
} else {
widthBytes = width / 8;
}
this.total = widthBytes * height;
for (int h = 0; h < height; h++) {
for (int w = 0; w < width; w++) {
rgb = bitmapImage.getPixel(w, h);
red = (rgb >> 16) & 0x000000FF;
green = (rgb >> 8) & 0x000000FF;
blue = (rgb) & 0x000000FF;
char auxChar = '1';
int totalColor = red + green + blue;
if (totalColor > blackLimit) {
auxChar = '0';
}
auxBinaryChar[index] = auxChar;
index++;
if (index == 8 || w == (width - 1)) {
sb.append(fourByteBinary(new String(auxBinaryChar)));
auxBinaryChar = new char[]{'0', '0', '0', '0', '0', '0', '0', '0'};
index = 0;
}
}
sb.append("\n");
}
return sb.toString();
}
private String fourByteBinary(String binaryStr) {
int decimal = Integer.parseInt(binaryStr, 2);
if (decimal > 15) {
return Integer.toString(decimal, 16).toUpperCase();
} else {
return "0" + Integer.toString(decimal, 16).toUpperCase();
}
}
private String encodeHexAscii(String code) {
int maxlinea = widthBytes * 2;
StringBuilder sbCode = new StringBuilder();
StringBuilder sbLinea = new StringBuilder();
String previousLine = null;
int counter = 1;
char aux = code.charAt(0);
boolean firstChar = false;
for (int i = 1; i < code.length(); i++) {
if (firstChar) {
aux = code.charAt(i);
firstChar = false;
continue;
}
if (code.charAt(i) == '\n') {
if (counter >= maxlinea && aux == '0') {
sbLinea.append(",");
} else if (counter >= maxlinea && aux == 'F') {
sbLinea.append("!");
} else if (counter > 20) {
int multi20 = (counter / 20) * 20;
int resto20 = (counter % 20);
sbLinea.append(mapCode.get(multi20));
if (resto20 != 0) {
sbLinea.append(mapCode.get(resto20)).append(aux);
} else {
sbLinea.append(aux);
}
} else {
sbLinea.append(mapCode.get(counter)).append(aux);
}
counter = 1;
firstChar = true;
if (sbLinea.toString().equals(previousLine)) {
sbCode.append(":");
} else {
sbCode.append(sbLinea.toString());
}
previousLine = sbLinea.toString();
sbLinea.setLength(0);
continue;
}
if (aux == code.charAt(i)) {
counter++;
} else {
if (counter > 20) {
int multi20 = (counter / 20) * 20;
int resto20 = (counter % 20);
sbLinea.append(mapCode.get(multi20));
if (resto20 != 0) {
sbLinea.append(mapCode.get(resto20)).append(aux);
} else {
sbLinea.append(aux);
}
} else {
sbLinea.append(mapCode.get(counter)).append(aux);
}
counter = 1;
aux = code.charAt(i);
}
}
return sbCode.toString();
}
public void setCompressHex(boolean compressHex) {
this.compressHex = compressHex;
}
public void setBlacknessLimitPercentage(int percentage) {
blackLimit = (percentage * 768 / 100);
}
}
Example usage:
You need to convert the your image to bitmap, convert to monochrome image and do hex acii conversion. The generated zpl code can be checked at labelary viewer.
public class Utils {
public static String getZplCode(Bitmap bitmap, Boolean addHeaderFooter) {
ZPLConverter zp = new ZPLConverter();
zp.setCompressHex(true);
zp.setBlacknessLimitPercentage(50);
Bitmap grayBitmap = toGrayScale(bitmap);
return zp.convertFromImage(grayBitmap, addHeaderFooter);
}
public static Bitmap toGrayScale(Bitmap bmpOriginal) {
int width, height;
height = bmpOriginal.getHeight();
width = bmpOriginal.getWidth();
Bitmap grayScale = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
Canvas c = new Canvas(grayScale);
Paint paint = new Paint();
ColorMatrix cm = new ColorMatrix();
cm.setSaturation(0);
ColorMatrixColorFilter f = new ColorMatrixColorFilter(cm);
paint.setColorFilter(f);
c.drawBitmap(bmpOriginal, 0, 0, paint);
return grayScale;
}
}
The converter code has been referenced from here and added support for android use.
I know this is kind of done deal but I still struggle a lot with the current answer. I wanted to share my experience for those who might need it.
First of all,^GFA stands for hexadecial representation of the pixel but it has to be converted into readable text (ASCII). Here is an example :
Pixels white = 1, black = 1
1011 0100 translate into 0xB4
In the data section of ^GFA, you need to have B4 as data.
If we go with
Pixel line 1 : 1011 0100 1001 1100 = 0xBA 0x9C
Pixel line 2 : 0011 0110 0001 1111 = 0x36 0x1F
the resulting ZPL code will be :
^XA (needed to start a ZPL file)
^F10,0 (offsets 10 pixel horizontal, 0 pixel vertical)
^GFA,4,4,2,BA9C361F (4 is the total number of byte, 2 is the number of byte per line)
^F0^XZ (end of file)
Now, the interesting point. How to get to code that :
You need a gray-scale bitmap. You need pixel-wise access to the bitmap. In another words, an array containing integer which values varies from 0 to 255.
With that array you take each bunch of 8 pixels, convert it to hexadecimal value and then to text representation of these hexadecimals. Here is the c++ code made with Borland :
Graphics::TBitmap *imageFax = new Graphics::TBitmap();
unsigned char r;
unsigned char b;
ofstream outFile;
char listeHex[16];
int lineByteWidth;
int j;
int bytesCount = 0;
int widthHeight;
AnsiString testOut;
listeHex[0] = '0';
listeHex[1] = '1';
listeHex[2] = '2';
listeHex[3] = '3';
listeHex[4] = '4';
listeHex[5] = '5';
listeHex[6] = '6';
listeHex[7] = '7';
listeHex[8] = '8';
listeHex[9] = '9';
listeHex[10] = 'A';
listeHex[11] = 'B';
listeHex[12] = 'C';
listeHex[13] = 'D';
listeHex[14] = 'E';
listeHex[15] = 'F';
imageFax->Monochrome = true;
imageFax->PixelFormat = pf8bit;
imageFax->LoadFromFile("c:/testEtiquette/test.bmp"); //1200x300pixels bitmap test image
testOut = "c:/testEtiquette/outputfile.txt";
outFile.open(testOut.c_str());
imageFax->PixelFormat = pf8bit;
lineByteWidth = imageFax->Width/8;//Number of byte per line
widthHeight = lineByteWidth*imageFax->Height;//number of total byte to be written into the output file
testOut = "^XA^FO10,0^GFA,";
outFile << testOut.c_str() << widthHeight << ',' << widthHeight << ',' << lineByteWidth << ',' ;
for(int i = 0; i < imageFax->Height; i++)
{
unsigned char * pixel = (unsigned char *)imageFax->ScanLine[i];
bytesCount = 0;
b=0x00;
for(j = 0; j < imageFax->Width; j++)
{
//Here is the "switch" : what is not white (255) bit = 0, is black bit = 1.
//You can set your switch at whatever value you think is best. 0, 255 or anything between.
//I think 255 (white) is a good for my application
if(pixel[j] != 255)
{
b = b<<1;
//It is not white (hence black), we force value 1 into current position
b = b|0x01;
}
else
{
//Since it white, we move 1 bit to the left, pushing 0 into current position
b = b<<1;
b = b&0xFE;//Forcing a 0 in the current position
}
//If we've got a full byte (8-bits), we write it into the file
//This will lead into cutting off part of images that width is not a multiple of 8
if(j%8 == 7)
{
bytesCount++;
r = b;
r = r&0xF0; //Cleaning last digits
r=r>>4; //Moving the bits to the left 0xF0 => 0x0F
outFile << listeHex[r%16]; //Reaching into the conversion array listeHex, ASCII representation of hex value
r = listeHex[r%16]; //For debug only
r = b;
r = r&0x0F;//Cleaning first digits
outFile << listeHex[r%16];//Reaching into the conversion array listeHex, ASCII representation of hex value
r = listeHex[r%16]; //For debug only
b = 0x00; //Reseting for next Byte
}
}
}
testOut = "^F0^XZ";
outFile << testOut.c_str();
outFile.close();
delete imageFax;
Some links :
ZPL PDF doc (see page 191 for Graphic conversion) https://www.zebra.com/content/dam/zebra/manuals/printers/common/programming/zpl-zbi2-pm-en.pdf
(If link does not work, try "zpl-zbi2-pm-en.pdf" on google)
https://www.rapidtables.com/convert/number/binary-to-hex.html
here is the complete working code of IP printer (Model GK420t ZPL And you can access any IP printer). Just replace only three things 1) Add you IP address 2) add your port number 3) Add you PNG File path
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Management;
using System.Net.Http;
using System.ServiceModel.Channels;
using System.Web;
using System.Web.Http;
using System.Net.Sockets;
using System.Net;
using System.Text;
using System.IO;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Text;
using System.Printing;
using System.Net.NetworkInformation;
using System.Drawing.Imaging;
using System.Text.RegularExpressions;
using System.Drawing.Drawing2D;
using System.Runtime.InteropServices;
using System.Windows.Forms;
using System.Runtime.InteropServices;
using System.Drawing.Printing;
namespace ConsoleApplication2
{
class Program
{
static void Main(string[] args)
{
string ipAddress = "Your IP address";
int port = Your port number;
string zplImageData = string.Empty;
string filePath = #"your png file path";
byte[] binaryData = System.IO.File.ReadAllBytes(filePath);
foreach (Byte b in binaryData)
{
string hexRep = String.Format("{0:X}", b);
if (hexRep.Length == 1)
hexRep = "0" + hexRep;
zplImageData += hexRep;
}
string zplToSend = "^XA" + "^FO50" + "50^GFA,120000,120000,100" + binaryData.Length + ",," + zplImageData + "^XZ";
string printImage = "^XA^FO115,50^IME:LOGO.PNG^FS^XZ";
try
{
// Open connection
System.Net.Sockets.TcpClient client = new System.Net.Sockets.TcpClient();
client.Connect(ipAddress, port);
// Write ZPL String to connection
System.IO.StreamWriter writer = new System.IO.StreamWriter(client.GetStream(), Encoding.UTF8);
writer.Write(zplToSend);
writer.Flush();
writer.Write(printImage);
writer.Flush();
// Close Connection
writer.Close();
client.Close();
}
catch (Exception ex)
{
// Catch Exception
}
}
}
}

TfLite Image classification score is not consistent it keeps increasing for same image untill it reaches to some saturation(actual score)

With same instance of 'interpreter' score is getting increased for same image until it reaches at some saturation.
Interpreter tflite = new Interpreter(loadModelFile(context));
Create Instance for ImageClassifier and use the same instance to classify Frame and run inference for the same image.
ImageClassifier(Activity activity) throws IOException {
tflite = new Interpreter(loadModelFile(activity));
labelList = loadLabelList(activity);
imgData =
ByteBuffer.allocateDirect(
DIM_BATCH_SIZE
* getImageSizeX()
* getImageSizeY()
* DIM_PIXEL_SIZE
* getNumBytesPerChannel());
imgData.order(ByteOrder.nativeOrder());
filterLabelProbArray = new float[FILTER_STAGES][getNumLabels()];
Log.d(TAG, "Created a Tensorflow Lite Image Classifier.");
}
Classifies a frame for the same image. Same image can be picked up from the Sd card.
private void classifyImage() {
if (classifier == null || getActivity() == null || cameraDevice == null) {
showToast("Uninitialized Classifier or invalid context.");
return;
}
String imgPath = "/storage/emulated/0/DCIM/test.jpg";
Log.d("Image Path is %s", imgPath);
Bitmap bitmap = BitmapFactory.decodeFile(imgPath);
Bitmap newbitmap = Bitmap.createScaledBitmap(bitmap, 299, 299, false);
String textToShow = classifier.classifyFrame(newbitmap);
bitmap.recycle();
showToast(textToShow);
}
classifyFrame() Method of ImageClassifier.java
String classifyFrame(Bitmap bitmap) {
if (tflite == null) {
Log.e(TAG, "Image classifier has not been initialized; Skipped.");
return "Uninitialized Classifier.";
}
convertBitmapToByteBuffer(bitmap);
// Here's where the magic happens!!!
long startTime = SystemClock.uptimeMillis();
runInference();
long endTime = SystemClock.uptimeMillis();
Log.d(TAG, "Timecost to run model inference: " + Long.toString(endTime - startTime));
// Smooth the results across frames.
applyFilter();
// Print the results.
String textToShow = printTopKLabels();
textToShow = Long.toString(endTime - startTime) + "ms" + textToShow;
return textToShow;
}
applyFilter() method of ImageClassifier.java
void applyFilter() {
int numLabels = getNumLabels();
// Low pass filter `labelProbArray` into the first stage of the filter.
for (int j = 0; j < numLabels; ++j) {
filterLabelProbArray[0][j] +=
FILTER_FACTOR * (getProbability(j) - filterLabelProbArray[0][j]);
}
// Low pass filter each stage into the next.
for (int i = 1; i < FILTER_STAGES; ++i) {
for (int j = 0; j < numLabels; ++j) {
filterLabelProbArray[i][j] +=
FILTER_FACTOR * (filterLabelProbArray[i - 1][j] - filterLabelProbArray[i][j]);
}
}
// Copy the last stage filter output back to `labelProbArray`.
for (int j = 0; j < numLabels; ++j) {
setProbability(j, filterLabelProbArray[FILTER_STAGES - 1][j]);
}
}
Prints top-K labels, to be shown in UI as the results.
private String printTopKLabels() {
for (int i = 0; i < getNumLabels(); ++i) {
sortedLabels.add(
new AbstractMap.SimpleEntry<>(labelList.get(i), getNormalizedProbability(i)));
if (sortedLabels.size() > RESULTS_TO_SHOW) {
sortedLabels.poll();
}
}
String textToShow = "";
final int size = sortedLabels.size();
for (int i = 0; i < size; ++i) {
Map.Entry<String, Float> label = sortedLabels.poll();
textToShow = String.format("\n%s: %4.2f", label.getKey(), label.getValue()) + textToShow;
}
return textToShow;
}
At the first time when application gets launched score the image classification is 0.06 and then again if we called classifyImage() on some event click score gets increased to 0.13 and with same process it keeps increasing until it reached to 0.86(saturation).
I am not sure why its happening but it happened for both type of TfLite models inceptionV3 and MobileNet.
The results are filtered by the applyFilter method. It is a simple low pass filter, so the scores gradually arrive at their medium-term average. Comment out the call to applyFilter and it should respond instantly, but maybe too jittery for some applications.

Temple image detection not working correctly

I have a picture that contains 2 icons at the bottom of the picture i'm already croping the picture to get only the buttom and comparing it with the icon picture opencv temple image detecter it works perfectly when the icon is there but the problem is when i delete the icon still the rectangle appears and it appears in wrong place all i want if there is a match show the rectangle if there is not don't show it
here is my code
public class Test123 extends AppCompatActivity {
ImageView a,b,c;
String resultImgPath,baseDir,lol;
Button x;
private static final String TAG = "Test123";
String aa,bb;
static {
if(!OpenCVLoader.initDebug()){
Log.e(TAG, "OpenCV not loaded");
} else {
Log.e(TAG, "OpenCV loaded");
}
}
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_test123);
System.loadLibrary("opencv_java3");
a = (ImageView)findViewById(R.id.imageView);
b = (ImageView)findViewById(R.id.imageView2);
c = (ImageView)findViewById(R.id.imageView3);
x = (Button) findViewById(R.id.button);
baseDir = Environment.getExternalStorageDirectory().getAbsolutePath();
resultImgPath = baseDir+"/Test/result.jpg";
aa = baseDir+"/Test/d.jpg";
bb = baseDir+"/Test/dd1.jpg";
lol = baseDir+"/Test/c.jpg";
x.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
Bitmap bmImg = BitmapFactory.decodeFile(aa);
int fromHere = (int) (bmImg.getHeight() * 0.06);
final Bitmap croppedBitmap = Bitmap.createBitmap(bmImg, (int) (bmImg.getWidth() * 0.3), (int) (bmImg.getHeight() * 0.94), (int) (bmImg.getWidth() * 0.6), fromHere);
String root = Environment.getExternalStorageDirectory().toString();
File myDir = new File(root + "/Test");
myDir.mkdirs();
String fname = "c.jpg";
File file = new File(myDir, fname);
if (file.exists())
file.delete();
try {
FileOutputStream out = new FileOutputStream(file);
croppedBitmap.compress(Bitmap.CompressFormat.JPEG, 90, out);
out.flush();
out.close();
} catch (Exception e) {
e.printStackTrace();
}
a.setImageBitmap(croppedBitmap);
Bitmap bmImg2 = BitmapFactory.decodeFile(bb);
b.setImageBitmap(bmImg2);
matchingDemo(lol, bb, resultImgPath, Imgproc.TM_SQDIFF);
}
});
}
public void matchingDemo(String imgPath,String templatePath,String resPath, int matchType){
// to read the entered image from its path and make a mat object
Mat img = Imgcodecs.imread(imgPath);
Mat templ = Imgcodecs.imread(templatePath);
// Create the result matrix
int result_cols = img.cols() - templ.cols() + 1;
int result_rows = img.rows() - templ.rows() + 1;
Mat result = new Mat(result_rows, result_cols, CvType.CV_8UC3);
// performing matching and do normalization
Imgproc.matchTemplate(img, templ, result, matchType);
int type = Imgproc.THRESH_TOZERO;
Imgproc.threshold(result, result, 0.8, 1., type);
Core.normalize(result, result, 0, 1, Core.NORM_MINMAX, -1, new Mat());
// / finding the best match from minMaxLoc
Core.MinMaxLocResult mmr = Core.minMaxLoc(result);
double bb = mmr.maxVal;
Log.e("hey",bb+"");
Point matchLoc;
if (matchType == Imgproc.TM_SQDIFF || matchType == Imgproc.TM_SQDIFF_NORMED) {
matchLoc = mmr.minLoc;
} else {
matchLoc = mmr.maxLoc;
}
// draw a rectangle on searched object
Imgproc.rectangle(img, matchLoc, new Point(matchLoc.x + templ.cols(),
matchLoc.y + templ.rows()), new Scalar(0, 255, 0));
// store the result image here
Imgcodecs.imwrite(resPath, img);
Mat image = new Mat();
image =Imgcodecs.imread(resPath);
Bitmap bm = Bitmap.createBitmap(image.cols(),image.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(image, bm);
c.setImageBitmap(bm);
image.release();
}
}
EDIT
i know that i need something here
if(......................)
Imgproc.rectangle(img, matchLoc, new Point(matchLoc.x + templ.cols(),
matchLoc.y + templ.rows()), new Scalar(0, 255, 0));
}
I tried to put minVal inside the if block but every picture gives me different numbers i tried with normalize and without it i can put the right number on 1 picture but other picture gives me different numbers so it is not detecting at all if the icon is visible or if it is not it givin the same issue drawing in wrong place i just need 1 number or something to draw if it is there to not draw if there is no match i don't want to have value if there is no match

How to view a selected images into video format in android

I am selecting a list of images from my SD card and the selected images as to view in a video format for the user. I don't know what to do is any idea or link to refer. Please help me. Thanks in advance.
#Harini- please follow up this link or you can try below code
try {
File file = this.getFullPath("", "video.mp4");
SequenceEncoder encoder = new SequenceEncoder(file);
// only 5 frames in total
for (int i = 1; i <= 5; i++) {
// getting bitmap from drawable path
int bitmapResId = this.getResources().getIdentifier("image" + i, "drawable", this.getPackageName());
Bitmap bitmap = this.getBitmapFromResources(this.getResources(), bitmapResId);
encoder.encodeNativeFrame(this.pictureFromBitmap(bitmap));
}
encoder.finish();
} catch (IOException e) {
e.printStackTrace();
}
// get full SD path
File getFullPath(String filePatho, String fileName) {
File extBaseDir = Environment.getExternalStorageDirectory();
if (filePatho == null || filePatho.length() == 0 || filePatho.charAt(0) != '/') filePatho = "/" + filePatho;
makeDirectory(filePatho);
File file = new File(extBaseDir.getAbsoluteFile() + filePatho);
return new File(file.getAbsolutePath() + "/" + fileName); // file;
}
// convert from Bitmap to Picture (jcodec native structure)
public Picture pictureFromBitmap(Bitmap src) {
Picture dst = Picture.create((int) src.getWidth(), (int) src.getHeight(), ColorSpace.RGB);
pictureFromBitmap(src, dst);
return dst;
}
public void pictureFromBitmap(Bitmap src, Picture dst) {
int[] dstData = dst.getPlaneData(0);
int[] packed = new int[src.getWidth() * src.getHeight()];
src.getPixels(packed, 0, src.getWidth(), 0, 0, src.getWidth(), src.getHeight());
for (int i = 0, srcOff = 0, dstOff = 0; i < src.getHeight(); i++) {
for (int j = 0; j < src.getWidth(); j++, srcOff++, dstOff += 3) {
int rgb = packed[srcOff];
dstData[dstOff] = (rgb >> 16) & 0xff;
dstData[dstOff + 1] = (rgb >> 8) & 0xff;
dstData[dstOff + 2] = rgb & 0xff;
}
}
}
http://wptrafficanalyzer.in/blog/image-slideshow-using-viewflipper-in-android/
Hope this may help you,Or you can search for image slider in android to create image slide show

Categories

Resources