I am trying to port a tensorflow model to tensorflow lite to use it in an android application. The conversion is successful and everything runs except for Internal error: Failed to run on the given Interpreter: input must be 5-dimensional. The input in the original model was input_shape=(20, 320, 240, 1), which is 20 320 x 240 grayscale images (therefore ...,1). Here is the important code:
List<Mat> preprocessedFrames = preprocFrames(buf);
//has length of 20 -> no problem there (shouldn't affect dimensionality either...)
int[] output = new int[2];
float[][][] inputMatrices = new float[preprocessedFrames.toArray().length][320][240];
for(int i = 0; i < preprocessedFrames.toArray().length; i++) {
Mat inpRaw = preprocessedFrames.get(i);
Bitmap data = Bitmap.createBitmap(inpRaw.cols(), inpRaw.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(inpRaw, data);
int[][] pixels = pixelsFromBitmap(data);
float[][] inputMatrix = inputMatrixFromIntPixels(pixels);
// returns float[][] with floats from 0 to 1
inputMatrices[i] = inputMatrix;
}
try{
detector.run(inputMatrices, output);
Debug("results: " + output.toString());
}
The model gives me an output of 2 neurons translating into 2 labels.
The model code is the following:
model = tf.keras.Sequential(name='detector')
model.add(tf.keras.layers.Conv3D(filters=(56), input_shape=(20, 320, 240, 1), strides=(2,2,2), kernel_size=(3,11,11), padding='same', activation="relu"))
model.add(tf.keras.layers.AveragePooling3D(pool_size=(1,4,4)))
model.add(tf.keras.layers.Conv3D(filters=(72), kernel_size=(4,7,7), strides=(1,2,2), padding='same'))
model.add(tf.keras.layers.Conv3D(filters=(81), kernel_size=(2,4,4), strides=(2,2,2), padding='same'))
model.add(tf.keras.layers.Conv3D(filters=(100), kernel_size=(1,2,2), strides=(3,2,2), padding='same'))
model.add(tf.keras.layers.Conv3D(filters=(128), kernel_size=(1,2,2), padding='same'))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(768, activation='tanh', kernel_regularizer=tf.keras.regularizers.l2(0.011)))
model.add(tf.keras.layers.Dropout(rate=0.1))
model.add(tf.keras.layers.Dense(256, activation='sigmoid', kernel_regularizer=tf.keras.regularizers.l2(0.012)))
model.add(tf.keras.layers.Dense(2, activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.00001), loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=['accuracy'])
EDIT: I printed out the first input tensor as follows:
int[] shape = detector.getInputTensor(0).shape();
for(int r = 0; r < shape.length; r++){
Log.d("********" + r, "*******: " + r + " : " + shape[r]);
}
With that I first get the output [1,20,320,240,1]and after that I only get [20,320,240]. I am really quite desperate now...
So, I figured it out by myself and it seems like I really only had to make the input 5 dimensional by putting the content into a first dimension and every single pixel into a fifth dimension. I don't know why, but I will accept that xD.
float[][] output = new float[1][2];
float[][][][][] inputMatrices = new float[1][preprocessedFrames.toArray().length][320][240][1];
for(int i = 0; i < preprocessedFrames.toArray().length; i++) {
Mat inpRaw = preprocessedFrames.get(i);
Bitmap data = Bitmap.createBitmap(inpRaw.cols(), inpRaw.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(inpRaw, data);
int[][] pixels = pixelsFromBitmap(data);
float[][] inputMatrix = inputMatrixFromIntPixels(pixels);
for (int j = 0; j < inputMatrix.length - 1; j++) {
for(int k = 0; k < inputMatrix[0].length - 1; k++) {
inputMatrices[0][i][k][j][0] = inputMatrix[j][k];
}
}
}
Related
I'm trying to convert an YUV image to grayscale, so basically I just need the Y values.
To do so I wrote this little piece of code (with frame being the YUV image):
imageConversionTime = System.currentTimeMillis();
size = frame.getSize();
byte nv21ByteArray[] = frame.getImage();
int lol;
for (int i = 0; i < size.width; i++) {
for (int j = 0; j < size.height; j++) {
lol = size.width*j + i;
yMatrix.put(j, i, nv21ByteArray[lol]);
}
}
bitmap = Bitmap.createBitmap(size.width, size.height, Bitmap.Config.ARGB_8888);
Utils.matToBitmap(yMatrix, bitmap);
imageConversionTime = System.currentTimeMillis() - imageConversionTime;
However, this takes about 13500 ms. I need it to be A LOT faster (on my computer it takes 8.5 ms in python) (I work on a Motorola Moto E 4G 2nd generation, not super powerful but it should be enough for converting images right?).
Any suggestions?
Thanks in advance.
First of all I would assign size.width and size.height to a variable. I don't think the compiler will optimize this by default, but I am not sure about this.
Furthermore Create a byte[] representing the result instead of using a Matrix.
Then you could do something like this:
int[] grayScalePixels = new int[size.width * size.height];
int cntPixels = 0;
In your inner loop set
grayScalePixels[cntPixels] = nv21ByteArray[lol];
cntPixels++;
To get your final image do the following:
Bitmap grayScaleBitmap = Bitmap.createBitmap(grayScalePixels, size.width, size.height, Bitmap.Config.ARGB_8888);
Hope it works properly (I have not tested it, however at least the shown principle should be applicable -> relying on a byte[] instead of Matrix)
Probably 2 years too late but anyways ;)
To convert to gray scale, all you need to do is set the u/v values to 128 and leave the y values as is. Note that this code is for YUY2 format. You can refer to this document for other formats.
private void convertToBW(byte[] ptrIn, String filePath) {
// change all u and v values to 127 (cause 128 will cause byte overflow)
byte[] ptrOut = Arrays.copyOf(ptrIn, ptrIn.length);
for (int i = 0, ptrInLength = ptrOut.length; i < ptrInLength; i++) {
if (i % 2 != 0) {
ptrOut[i] = (byte) 127;
}
}
convertToJpeg(ptrOut, filePath);
}
For NV21/NV12, I think the loop would change to:
for (int i = ptrOut.length/2, ptrInLength = ptrOut.length; i < ptrInLength; i++) {}
Note: (didn't try this myself)
Also I would suggest to profile your utils method and createBitmap functions separately.
I use the following procedure for transferring an array of float numbers to a RenderScript Kernel and it works fine.
float[] w = new float[10];
Allocation w_rs = Allocation.createSized(rs, Element.F32(rs), 10);
w_rs.copy1DRangeFrom(0, 10, w);
I want to use a similar procedure for transferring Float4 values as follows
Float4[] w = new Float4[10];
for (int i = 0; i < 10; i++) {
w[i] = new Float4(i, 2*i, 3*i, 4*i);
}
Allocation w_rs = Allocation.createSized(rs, Element.F32_4(rs), 10);
w_rs.copy1DRangeFromUnchecked(0, 10, w);
Which results in the following error
Object passed is not an Array of primitives
Apparently, w should be array of primitives. But I want w to be array of Float4.
You can simply use:
float[] w = new float[4 * 10];
for (int i = 0; i < 10; i++) {
w[i * 4 + 0] = i;
w[i * 4 + 1] = i*2;
w[i * 4 + 2] = i*3;
w[i * 4 + 3] = i*4;
}
Allocation w_rs = Allocation.createSized(rs, Element.F32_4(rs), 10);
w_rs.copyFrom(w);
// Or
w_rs.copy1DRangeFrom(0,40,w);
Painless :)
Reference: RenderScript: parallel computing on Android, the easy way
Deeper explanation
Inside RenderScript Java source code, you'll see this middleware function:
public void copy1DRangeFromUnchecked(int off, int count, Object array) {
copy1DRangeFromUnchecked(off, count, array,
validateObjectIsPrimitiveArray(array, false),
java.lang.reflect.Array.getLength(array));
}
The validateObjectIsPrimitiveArray is performed on any copy-method invocation. You can pass only raw arrays.
I'm trying to deal with ECG signal processing in android. I want to implement simple digital filters (lowpass, highpass)
I've got a transfer function:
here is what i've found:
wikipedia - lowpass filter - it looks quite easy here.
for i from 1 to n
y[i] := y[i-1] + α * (x[i] - y[i-1])
but there is nothing about transfer function which I want to use.
I also found the following matlab code
%% Low Pass Filter H(z) = (1 - 2z^(-6) + z^(-12)) / (1 - 2z^(-1) + z^(-2))
b = [1 0 0 0 0 0 -2 0 0 0 0 0 1];
a = [1 -2 1];
h_l = filter(b,a,[1 zeros(1,12)]);
ecg_l = conv (ecg ,h_l);
but there is no function like filter and conv in java (or I missed something).
Also I was looking on stackoverflow for an answer. But I didn't found anything about transfer function.
so can someone help me? I just want to move on with my project.
Given a time-domain recurrence equation (such as the one you quoted from wikipedia), the corresponding transfer function in the z-domain can relatively easily be obtained by using the following properties:
Where X(z) and Y(z) are the z-transforms of the time-domain input sequence x and output sequence y respectively.
Going the other way around, given a transfer function which can be expressed as a ratio of polynomials in z, such as:
the recurrence equation of the transfer function can be written as:
There are of course many different ways to implement such a recurrence equation, but a simple filter implementation following the Direct Form II would be along the line of:
// Implementation of an Infinite Impulse Response (IIR) filter
// with recurrence equation:
// y[n] = -\sum_{i=1}^M a_i y[n-i] + \sum_{i=0}^N b_i x[n-i]
public class IIRFilter {
public IIRFilter(float a_[], float b_[]) {
// initialize memory elements
int N = Math.max(a_.length, b_.length);
memory = new float[N-1];
for (int i = 0; i < memory.length; i++) {
memory[i] = 0.0f;
}
// copy filter coefficients
a = new float[N];
int i = 0;
for (; i < a_.length; i++) {
a[i] = a_[i];
}
for (; i < N; i++) {
a[i] = 0.0f;
}
b = new float[N];
i = 0;
for (; i < b_.length; i++) {
b[i] = b_[i];
}
for (; i < N; i++) {
b[i] = 0.0f;
}
}
// Filter samples from input buffer, and store result in output buffer.
// Implementation based on Direct Form II.
// Works similar to matlab's "output = filter(b,a,input)" command
public void process(float input[], float output[]) {
for (int i = 0; i < input.length; i++) {
float in = input[i];
float out = 0.0f;
for (int j = memory.length-1; j >= 0; j--) {
in -= a[j+1] * memory[j];
out += b[j+1] * memory[j];
}
out += b[0] * in;
output[i] = out;
// shift memory
for (int j = memory.length-1; j > 0; j--) {
memory[j] = memory[j - 1];
}
memory[0] = in;
}
}
private float[] a;
private float[] b;
private float[] memory;
}
which you could use to implement your specific transfer function like so:
float g = 1.0f/32.0f; // overall filter gain
float[] a = {1, -2, 1};
float[] b = {g, 0, 0, 0, 0, 0, -2*g, 0, 0, 0, 0, 0, g};
IIRFilter filter = new IIRFilter(a, b);
filter.process(input, output);
Note that you can alternatively also factorize the numerator and denominator into 2nd order polynomials and obtain a cascade of 2nd order filters (known as biquad filters).
In order to align the intensity values of two grayscale Images (as a first step for further processing) I wrote a Java method that:
converts the bitmaps of the two images into two int[] arrays containing the bitmap's intensities (I just take the red component here, since it's grayscale, i.e. r=g=b ).
public static int[] bmpToData(Bitmap bmp){
int width = bmp.getWidth();
int height = bmp.getHeight();
int anzpixel = width*height;
int [] pixels = new int[anzpixel];
int [] data = new int[anzpixel];
bmp.getPixels(pixels, 0, width, 0, 0, width, height);
for (int i = 0 ; i < anzpixel ; i++) {
int p = pixels[i];
int r = (p & 0xff0000) >> 16;
//int g = (p & 0xff00) >> 8;
//int b = p & 0xff;
data[i] = r;
}
return data;
}
aligns the cumulated intensity distributions of Bitmap 2 to that of Bitmap 1
//aligns the intensity distribution of a grayscale picture moving (given by int[] //data2) the the intensity distribution of a reference picture fixed (given by // int[] data1)
public static int[] histMatch(int[] data1, int[] data2){
int anzpixel = data1.length;
int[] histogram_fixed = new int[256];
int[] histogram_moving = new int[256];
int[] cumhist_fixed = new int[256];
int[] cumhist_moving = new int[256];
int i=0;
int j=0;
//read intensities of fixed und moving in histogram
for (int n = 0; n < anzpixel; n++) {
histogram_fixed[data1[n]]++;
histogram_moving[data2[n]]++;
}
// calc cumulated distributions
cumhist_fixed[0]=histogram_fixed[0];
cumhist_moving[0]=histogram_moving[0];
for ( i=1; i < 256; ++i ) {
cumhist_fixed[i] = cumhist_fixed[i-1]+histogram_fixed[i];
cumhist_moving[i] = cumhist_moving[i-1]+histogram_moving [i];
}
// look-up-table lut[]. For each quantile i of the moving picture search the
// value j of the fixed picture where the quantile is the same as that of moving
int[] lut = new int[anzpixel];
j=0;
for ( i=0; i < 256; ++i ){
while(cumhist_fixed[j]< cumhist_moving[i]){
j++;
}
// check, whether the distance to the next-lower intensity is even lower, and if so, take this value
if ((j!=0) && ((cumhist_fixed[j-1]- cumhist_fixed[i]) < (cumhist_fixed[j]- cumhist_fixed[i]))){
lut[i]= (j-1);
}
else {
lut[i]= (j);
}
}
// apply the lut[] to moving picture.
i=0;
for (int n = 0; n < anzpixel; n++) {
data2[n]=(int) lut[data2[n]];
}
return data2;
}
converts the int[] arrays back to Bitmap.
public static Bitmap dataToBitmap(int[] data, int width, int heigth) {
int index=0;
Bitmap bmp = Bitmap.createBitmap(width, heigth, Bitmap.Config.ARGB_8888);
for (int x = 0; x < width; x++) {
for (int y = 0; y < heigth; y++) {
index=y*width+x;
int c = data[index];
bmp.setPixel(x,y,Color.rgb(c, c, c));
}
}
return bmp;
}
While the core procedure 2) is straightforward and fast, the conversion steps 1) and 3) are rather inefficient. It would be more than cool to do the whole thing in Renderscript. But, honestly, I am completely lost in doing so because of missing documentation and, while there are many impressing examples on what Renderscript COULD perform, I don't see a way to benefit from these possibilities (no books, no docu). Any advice is highly appreciated!
As a starting point, use Android Studio to "Import Sample..." and select Basic Render Script. This will give you a working project that we will now modify.
First, let's add more Allocation references to MainActivity. We will use them to communicate image data, histograms and the LUT between Java and Renderscript.
private Allocation mInAllocation;
private Allocation mInAllocation2;
private Allocation[] mOutAllocations;
private Allocation mHistogramAllocation;
private Allocation mHistogramAllocation2;
private Allocation mLUTAllocation;
Then in onCreate() load another image, which you will also need to add to /res/drawables/.
mBitmapIn2 = loadBitmap(R.drawable.cat_480x400);
In createScript() create additional allocations:
mInAllocation2 = Allocation.createFromBitmap(mRS, mBitmapIn2);
mHistogramAllocation = Allocation.createSized(mRS, Element.U32(mRS), 256);
mHistogramAllocation2 = Allocation.createSized(mRS, Element.U32(mRS), 256);
mLUTAllocation = Allocation.createSized(mRS, Element.U32(mRS), 256);
And now the main part (in RenderScriptTask):
/*
* Invoke histogram kernel for both images
*/
mScript.bind_histogram(mHistogramAllocation);
mScript.forEach_compute_histogram(mInAllocation);
mScript.bind_histogram(mHistogramAllocation2);
mScript.forEach_compute_histogram(mInAllocation2);
/*
* Variables copied verbatim from your code.
*/
int []histogram_fixed = new int[256];
int []histogram_moving = new int[256];
int[] cumhist_fixed = new int[256];
int[] cumhist_moving = new int[256];
int i=0;
int j=0;
// copy computed histograms to Java side
mHistogramAllocation.copyTo(histogram_fixed);
mHistogramAllocation2.copyTo(histogram_moving);
// your code again...
// calc cumulated distributions
cumhist_fixed[0]=histogram_fixed[0];
cumhist_moving[0]=histogram_moving[0];
for ( i=1; i < 256; ++i ) {
cumhist_fixed[i] = cumhist_fixed[i-1]+histogram_fixed[i];
cumhist_moving[i] = cumhist_moving[i-1]+histogram_moving [i];
}
// look-up-table lut[]. For each quantile i of the moving picture search the
// value j of the fixed picture where the quantile is the same as that of moving
int[] lut = new int[256];
j=0;
for ( i=0; i < 256; ++i ){
while(cumhist_fixed[j]< cumhist_moving[i]){
j++;
}
// check, whether the distance to the next-lower intensity is even lower, and if so, take this value
if ((j!=0) && ((cumhist_fixed[j-1]- cumhist_fixed[i]) < (cumhist_fixed[j]- cumhist_fixed[i]))){
lut[i]= (j-1);
}
else {
lut[i]= (j);
}
}
// copy the LUT to Renderscript side
mLUTAllocation.copyFrom(lut);
mScript.bind_LUT(mLUTAllocation);
// Apply LUT to the destination image
mScript.forEach_apply_histogram(mInAllocation2, mInAllocation2);
/*
* Copy to bitmap and invalidate image view
*/
//mOutAllocations[index].copyTo(mBitmapsOut[index]);
// copy back to Bitmap in preparation for viewing the results
mInAllocation2.copyTo((mBitmapsOut[index]));
Couple notes:
In your part of the code I also fixed LUT allocation size - only 256 locations are needed,
As you can see, I left the computation of cumulative histogram and LUT on Java side. These are rather difficult to efficiently parallelize due to data dependencies and small scale of the calculations, but considering the latter I don't think it's a problem.
Finally, the Renderscript code. The only non-obvious part is the use of rsAtomicInc() to increase values in histogram bins - this is necessary due to potentially many threads attempting to increase the same bin concurrently.
#pragma version(1)
#pragma rs java_package_name(com.example.android.basicrenderscript)
#pragma rs_fp_relaxed
int32_t *histogram;
int32_t *LUT;
void __attribute__((kernel)) compute_histogram(uchar4 in)
{
volatile int32_t *addr = &histogram[in.r];
rsAtomicInc(addr);
}
uchar4 __attribute__((kernel)) apply_histogram(uchar4 in)
{
uchar val = LUT[in.r];
uchar4 result;
result.r = result.g = result.b = val;
result.a = in.a;
return(result);
}
Hello i want to convert the color in image, i'm using per-pixel methods but it seems very slow
src.getPixels(pixels, 0, width, 0, 0, width, height);
// RGB values
int R;
for (int i = 0; i < pixels.length; i++) {
// Get RGB values as ints
// Set pixel color
pixels[i] = color;
}
// Set pixels
src.setPixels(pixels, 0, width, 0, 0, width, height);
my question, is there any way i can do it using openCV? change pixel to the color i want ?
I recommend this excellent article on how to access/modify an opencv image buffer. I recommend
"the efficient way":
int i,j;
uchar* p;
for( i = 0; i < nRows; ++i)
{
p = I.ptr<uchar>(i);
for ( j = 0; j < nCols; ++j)
{
p[j] = table[p[j]];
}
Or "the iterator-safe method":
MatIterator_<Vec3b> it, end;
for( it = I.begin<Vec3b>(), end = I.end<Vec3b>(); it != end; ++it)
{
(*it)[0] = table[(*it)[0]];
(*it)[1] = table[(*it)[1]];
(*it)[2] = table[(*it)[2]];
}
For further optimizations, using cv::LUT() (where possible) can give huge speedups, but it is more intensive to design/code.
You can access Pixels by using:
img.at<Type>(y, x);
So to change an RGB Value you can use:
// read color
Vec3b intensity = img.at<Vec3b>(y, x);
// compute new color using intensity.val[0] etc. to access color values
// write new color
img.at<Vec3b>(y, x) = intensity;
#Boyko mentioned an Article from OpenCV concerning fast access to the image pixels if you want to iterate over all Pixel. The Method I would prefer from this Article is the iterator Method, as it is only slightly slower than direct pointer access but safer to use.
Example Code:
Mat& AssignNewColors(Mat& img)
{
// accept only char type matrices
CV_Assert(img.depth() != sizeof(uchar));
const int channels = img.channels();
switch(channels)
{
// case 1: skipped here
case 3:
{
// Read RGG Pixels
Mat_<Vec3b> _img = img;
for( int i = 0; i < img.rows; ++i)
for( int j = 0; j < img.cols; ++j )
{
_img(i,j)[0] = computeNewColor(_img(i,j)[0]);
_img(i,j)[1] = computeNewColor(_img(i,j)[1]);
_img(i,j)[2] = computeNewColor(_img(i,j)[2]);
}
img = _img;
break;
}
}
return img;
}