Unable to load tensorflow tflite model in android studio - android

I have trained a TensorFlow model and convert it to TensorFlow lite using the below code:
# Convert the model
import tensorflow as tf
import numpy as np
# path to the SavedModel directory is TFLITE_PATH
converter = tf.lite.TFLiteConverter.from_saved_model(TFLITE_PATH)
tflite_model = converter.convert()
# Save the model.
with open('model_1.tflite', 'wb') as f:
f.write(tflite_model)
Attaching my model_1.tflite model in case you want to investigate.
I have tested it inside my python environment, where it is producing output using the below script:
import numpy as np
import tensorflow as tf
MODEL_PATH = "model_1.tflite"
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path=MODEL_PATH)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
Print required input shape for the model
print(input_shape)
[ 1 320 320 3]
Providing input details to the interpreter
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
Print the output that we get from the model
print(output_data)
[[[0.01350823 0.02189949 0.9918406 0.9821147 ]
[0.33122188 0.11993879 0.9528857 0.90357083]
[0.04370229 0.13977486 0.5076436 0.9069242 ]
[0.36508453 0.00325416 0.63923967 0.1383895 ]
[0.12694997 0.01493323 0.4414968 0.14510964]
[0.21113579 0.00826943 0.5027399 0.13861066]
[0.28166008 0.9081802 0.57174915 1.0400366 ]
[0.38398495 0.9090722 0.6709249 1.0427872 ]
[0.561202 0.32376498 0.8054305 0.6049366 ]
[0.3257156 0.65075576 0.43758994 0.80955625]]]
But when I am going to load it inside Android studio it is giving me the error.
Note: when I downloaded a pre-trained TensorFlow model from here(https://github.com/am15h/tflite_flutter_plugin) and called it, it is working fine but I am unable to load my customized trained model and it is giving me the below error:
[VERBOSE-2:dart_isolate.cc(1137)] Unhandled exception:
Bad state: failed precondition
#0 checkState (package:quiver/check.dart:73:5)
#1 Tensor.setTo (package:tflite_flutter/src/tensor.dart:150:5)
#2 Interpreter.runForMultipleInputs (package:tflite_flutter/src/interpreter.dart:194:33)
#3 Classifier.predict (package:bewizor/tflite/classifier.dart:139:18)
#4 IsolateUtils.entryPoint (package:bewizor/tflite/tfutils/isolate_utils.dart:45:51)
<asynchronous suspension>
Below is the comparison of the output that I ran using the Netron app.
On the left-hand side, Netron view of working pre-trained model, whereas on the right-hand side, Netron view of a failed customized trained model
Can you please help to understand what I am lacking here and what are the things that I can try out to resolve this?
Why pre-trained tflite model was working? and why not my current custom model?
Is the error related to my model or the way I am calling it inside android studio should be changed?
Things that I have tried out to resolve this?
Try to make a model in a way to take uint8 as an input.(Idea is to make this looks like the model that is working fine but I don't think it is making an impact on model working but yes it is helpful to reduce the size of my model) Used the below code for this.
# Convert the model
converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL_PATH) # path to the SavedModel directory
converter.optimizations = [tf.lite.Optimize.DEFAULT]
num_calibration_steps = 100
def representative_dataset_gen():
for _ in range(num_calibration_steps):
input_shape = [1, 320, 320, 3]
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
# Get sample input data as a numpy array in a method of your choosing.
yield [input_data]
converter.representative_dataset = representative_dataset_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.uint8 # or tf.int8
converter.inference_output_type = tf.uint8 # or tf.int8
converter.experimental_new_converter = False
quantized_tflite_model = converter.convert()
tflite_model_name = 'model_2_uint_type.tflite'
if tf.__version__.startswith('1.'):
open(tflite_model_name, "wb").write(quantized_tflite_model)
if tf.__version__.startswith('2.'):
with open(tflite_model_name, 'wb') as f:
f.write(quantized_tflite_model)
Also sharing .dart file code which we are using to call the model inside android studio
import 'dart:math';
import 'dart:ui';
import 'package:bewizor/tflite/recognition.dart';
import 'package:flutter/material.dart';
import 'package:image/image.dart' as imageLib;
import 'package:tflite_flutter/tflite_flutter.dart';
import 'package:tflite_flutter_helper/tflite_flutter_helper.dart';
import 'stats.dart';
/// Classifier
class Classifier {
/// Instance of Interpreter
Interpreter _interpreter;
/// Labels file loaded as list
List<String> _labels;
// static const String MODEL_FILE_NAME = "tfModels/detect.tflite";
static const String MODEL_FILE_NAME = "tfModels/detect_new.tflite";
static const String LABEL_FILE_NAME = "tfModels/label_map.pbtxt";
/// Input size of image (height = width = 300)
static const int INPUT_SIZE = 300;
/// Result score threshold
static const double THRESHOLD = 0.5;
/// [ImageProcessor] used to pre-process the image
ImageProcessor imageProcessor;
/// Padding the image to transform into square
int padSize;
/// Shapes of output tensors
List<List<int>> _outputShapes;
/// Types of output tensors
List<TfLiteType> _outputTypes;
/// Number of results to show
static const int NUM_RESULTS = 5;
Classifier({
Interpreter interpreter,
List<String> labels,
}) {
loadModel(interpreter: interpreter);
loadLabels(labels: labels);
}
/// Loads interpreter from asset
void loadModel({Interpreter interpreter}) async {
try {
_interpreter = interpreter ??
await Interpreter.fromAsset(
MODEL_FILE_NAME,
options: InterpreterOptions()..threads = 4,
);
var outputTensors = _interpreter.getOutputTensors();
_outputShapes = [];
_outputTypes = [];
outputTensors.forEach((tensor) {
_outputShapes.add(tensor.shape);
_outputTypes.add(tensor.type);
});
} catch (e) {
print("Error while creating interpreter: $e");
}
}
/// Loads labels from assets
void loadLabels({List<String> labels}) async {
try {
_labels =
labels ?? await FileUtil.loadLabels("assets/" + LABEL_FILE_NAME);
} catch (e) {
print("Error while loading labels: $e");
}
}
/// Pre-process the image
TensorImage getProcessedImage(TensorImage inputImage) {
padSize = max(inputImage.height, inputImage.width);
if (imageProcessor == null) {
imageProcessor = ImageProcessorBuilder()
.add(ResizeWithCropOrPadOp(padSize, padSize))
.add(ResizeOp(INPUT_SIZE, INPUT_SIZE, ResizeMethod.BILINEAR))
.build();
}
inputImage = imageProcessor.process(inputImage);
return inputImage;
}
/// Runs object detection on the input image
Map<String, dynamic> predict(imageLib.Image image) {
var predictStartTime = DateTime.now().millisecondsSinceEpoch;
if (_interpreter == null) {
print("Interpreter not initialized");
return null;
}
var preProcessStart = DateTime.now().millisecondsSinceEpoch;
// Create TensorImage from image
TensorImage inputImage = TensorImage.fromImage(image);
// Pre-process TensorImage
inputImage = getProcessedImage(inputImage);
var preProcessElapsedTime =
DateTime.now().millisecondsSinceEpoch - preProcessStart;
// // TensorBuffers for output tensors
TensorBuffer outputLocations = TensorBufferFloat(_outputShapes[0]);
TensorBuffer outputClasses = TensorBufferFloat(_outputShapes[1]);
TensorBuffer outputScores = TensorBufferFloat(_outputShapes[2]);
TensorBuffer numLocations = TensorBufferFloat(_outputShapes[3]);
// Inputs object for runForMultipleInputs
// Use [TensorImage.buffer] or [TensorBuffer.buffer] to pass by reference
List<Object> inputs = [inputImage.buffer];
// Outputs map
Map<int, Object> outputs = {
0: outputLocations.buffer,
1: outputClasses.buffer,
2: outputScores.buffer,
3: numLocations.buffer,
};
var inferenceTimeStart = DateTime.now().millisecondsSinceEpoch;
// run inference
_interpreter.runForMultipleInputs(inputs, outputs);
var inferenceTimeElapsed =
DateTime.now().millisecondsSinceEpoch - inferenceTimeStart;
// Maximum number of results to show
int resultsCount = min(NUM_RESULTS, numLocations.getIntValue(0));
// Using labelOffset = 1 as ??? at index 0
int labelOffset = 1;
// Using bounding box utils for easy conversion of tensorbuffer to List<Rect>
List<Rect> locations = BoundingBoxUtils.convert(
tensor: outputLocations,
valueIndex: [1, 0, 3, 2],
boundingBoxAxis: 2,
boundingBoxType: BoundingBoxType.BOUNDARIES,
coordinateType: CoordinateType.RATIO,
height: INPUT_SIZE,
width: INPUT_SIZE,
);
List<Recognition> recognitions = [];
for (int i = 0; i < resultsCount; i++) {
// Prediction score
var score = outputScores.getDoubleValue(i);
// Label string
var labelIndex = outputClasses.getIntValue(i) + labelOffset;
var label = _labels.elementAt(labelIndex);
if (score > THRESHOLD) {
// inverse of rect
// [locations] corresponds to the image size 300 X 300
// inverseTransformRect transforms it our [inputImage]
Rect transformedRect = imageProcessor.inverseTransformRect(
locations[i], image.height, image.width);
recognitions.add(
Recognition(i, label, score, transformedRect),
);
}
}
var predictElapsedTime =
DateTime.now().millisecondsSinceEpoch - predictStartTime;
return {
"recognitions": recognitions,
"stats": Stats(
totalPredictTime: predictElapsedTime,
inferenceTime: inferenceTimeElapsed,
preProcessingTime: preProcessElapsedTime)
};
}
/// Gets the interpreter instance
Interpreter get interpreter => _interpreter;
/// Gets the loaded labels
List<String> get labels => _labels;
}

Related

How to Use Edit images in OpenAi Kotlin Client

I am using openAi client with android kotlin (implementation com.aallam.openai:openai-client:2.1.3).
Is the path wrong or is the library missing?
val imgURL = Uri.parse("android.resource://" + packageName + "/" + R.drawable.face3)
try {
val images = openAI.image(
edit = ImageEditURL( // or 'ImageEditJSON'
image = FilePath(imgURL.toString()), // <-
mask = FilePath(imgURL.toString()), // <-
prompt = "a sunlit indoor lounge area with a pool containing a flamingo",
n = 1,
size = ImageSize.is1024x1024
)
);
} catch (e: Exception) {
println("error is here:"+e)
}
As can be seen, it wants a path from me, but it does not succeed even though I give the path.
I would suggest updating to version 3 of openai-kotlin, and use Okio's Source to provide files.
Assuming the images are in the res/raw folder, your example would be something like this:
val request = ImageEdit(
image = FileSource(
name = "image.png",
source = resources.openRawResource(R.raw.image).source()
),
mask = FileSource(
name = "mask.png",
source = resources.openRawResource(R.raw.mask).source()
),
prompt = "a sunlit indoor lounge area with a pool containing a flamingo",
n = 1,
size = ImageSize.is1024x1024,
)
val response = client.imageURL(request)

issue in invoking tflite model in Android

i am trying to use already trained model as tflite model in android but getting below error when executing the tflite model for the output:
**A/libc: Fatal signal 8 (SIGFPE), code 1 (FPE_INTDIV), fault addr 0xb7bd4543 in tid 12009 (ing.tensorflow3), pid 12009 (ing.tensorflow3)**
below is the code:
//calling
bitmap = getBitmapFromAsset("aval1.png");
imageViewInput.setImageBitmap(bitmap);
testFunctionInference(bitmap);
//method body
public void testFunctionInference(Bitmap strName){
try {
//____________________________________
ImageProcessor imageProcessor =
new ImageProcessor.Builder()
.add(new ResizeOp(1, 1, ResizeOp.ResizeMethod.BILINEAR))
.build();
Log.w("testFunc:","after image processor");
// Create a TensorImage object. This creates the tensor of the corresponding
// tensor type (uint8 in this case) that the TensorFlow Lite interpreter needs.
TensorImage tensorImage = new TensorImage(DataType.FLOAT32);
// Analysis code for every frame
// Preprocess the image
tensorImage.load(strName);
Log.w("testFunc:","265 L no.");
tensorImage = imageProcessor.process(tensorImage);
Log.w("testFunc:","before inputBuffer0");
// Creates inputs for reference.
TensorBuffer inputFeature0 = TensorBuffer.createFixedSize(new int[]{1, 640*480*3}, DataType.FLOAT32);
MappedByteBuffer tfliteModel
= FileUtil.loadMappedFile(this,"converted_model.tflite");
Interpreter tflite = new Interpreter(tfliteModel);
Object a=tensorImage.getBuffer();
Log.w("testFunc:","278");
tflite.run(tensorImage.getBuffer(), inputFeature0.getBuffer());
} catch (IOException e) {
// TODO Handle the exception
}
}
anyone please assist in getting this issue resolved.
To get a detailed log, you can use debug version of nightly-SNAPSHOT.
https://www.tensorflow.org/lite/guide/android#use_the_tensorflow_lite_aar_from_mavencentral
dependencies {
implementation 'org.tensorflow:tensorflow-lite:0.0.0-nightly-debug-SNAPSHOT'
}
But maybe it's better to check if you provided inputs correctly since you used DataType.FLOAT32, your model should have inputs with float32.

ZXing BarcodeReader Don't Decode Some Barcodes

I am developing a barcode reader with Xamarin.Forms. And I'm trying to scan the image on Android device.
First I select the image from the gallery with Xamarin.Essentials MediaPicker and from the path of this image I get an RGBLuminance with the Dependency class.
Then I am trying to decode this RGBLuminance with the Decode() method of the ZXing BarcodeReaderGeneric class.
The application successfully decodes the barcodes in some images. However, sometimes it returns null when decoding. I might have made a mistake while converting the image to Bitmap or creating the RGBLuminanceSource.
I would like to find out how a class that can decode both color, black and white and grayscale images should be.
public RGBLuminanceSource GetRGBLuminanceSource(string imagePath)
{
if (File.Exists(imagePath))
{
Android.Graphics.Bitmap bitmap = BitmapFactory.DecodeFile(imagePath);
List<byte> rgbBytesList = new List<byte>();
for (int y = 0; y < bitmap.Height; y++)
{
for (int x = 0; x < bitmap.Width; x++)
{
var c = new Android.Graphics.Color(bitmap.GetPixel(x, y));
rgbBytesList.AddRange(new[] { c.A, c.R, c.G, c.B });
}
}
byte[] rgbBytes = rgbBytesList.ToArray();
return new RGBLuminanceSource(rgbBytes, bitmap.Width, bitmap.Height, RGBLuminanceSource.BitmapFormat.RGB32);
}
return null;
}
Command in the ViewModel class:
public ICommand PickCommand => new Command(PickImage);
private async void PickImage()
{
var pickResult = await MediaPicker.PickPhotoAsync(new MediaPickerOptions
{
Title = "Select a barcode."
});
var path = pickResult.FullPath;
var RGBLuminance = DependencyService.Get<ILuminance>().GetRGBLuminanceSource(path);
var reader = new BarcodeReaderGeneric();
var result = reader.Decode(RGBLuminance);
}
I am using this code in xamarin.android and i never had issues with it:
var scanner = new MobileBarcodeScanner();
var result = await scanner.Scan(_context, MobileBarcodeScanningOptions.Default);
It opens the camera, user takes a pic of barcode and result.Text contains the scanned barcode.

Xamarin multiplatform - Video from jpeg sequence

I want to know how I can display video from jpegs in Xamarin (all platforms).
My jpegs are being streamed from a http client stream sent by a popular video surveillance management software.
My jpegs are in the form of byte[] and I get about 10 jpegs/second. This format is imposed.
I tried rapidly changing the Source on a Image but it results in severe fliquering on Android. This seems to work on Windows phone but not so good performance.
How can I create a videoplayer for each one? Unless I am wrond, the existing components cannot do this.
Best,
Thank you Jason! Works great, very fluid rendering!!
Simply add the SkiaSharp.Views.Forms with NuGet to the project and voila!
Here is what that would look like in code (shared project):
// Content page initialization
private void InitUI() {
Title = "Xamavideo";
var button = new Button
{
Text = "Connect!"
};
Label label = new Label
{
Text = ""
};
var scroll = new ScrollView();
scroll.BackgroundColor = Color.Black;
Content = scroll;
var stack = new StackLayout
{
Padding = 40,
Spacing = 10
};
//Add a SKCanvasView item to the stack
var videoCanvas = new SKCanvasView
{
HeightRequest = 400,
WidthRequest = 600,
};
videoCanvas.PaintSurface += OnCanvasViewPaintSurface;
stack.Children.Add(videoCanvas);
}
//Create the event handler
void OnCanvasViewPaintSurface(object sender, SKPaintSurfaceEventArgs args)
{
SKImageInfo info = args.Info;
SKSurface surface = args.Surface;
// using (var stream = new SKManagedStream(fileStream))
if (lastFrame == null) return;
using (var canvas = surface.Canvas)
// use KBitmap.Decode to decode the byte[] in jpeg format
using (var bitmap = SKBitmap.Decode(lastFrame))
using (var paint = new SKPaint())
{
// clear the canvas / fill with black
canvas.DrawColor(SKColors.Black);
canvas.DrawBitmap(bitmap, SKRect.Create(640, 480), paint);
}
}
void UpdateFrame(VideoClient client){
//Use this to update the canvas:
byte[] lastFrame = client.imageBytes;
videoCanvas.InvalidateSurface();
}

Cocos2d-js: Error Message: "Invalid Native Object" using runAction()

I get some "Invalid Native Object"-errors in Android (Nexus 5, Android 4.4.4). In the browser version there are no errors.
I put the code part of my app into a fresh helloworld app. Line 59, where the error appears is marked below.
This is the logcat message from ADB:
D/cocos2d-x debug info(32165): jsb: ERROR: File /Applications/MAMP/htdocs/test_actions/frameworks/runtime-src/proj.android/../../js-bindings/bindings/auto/jsb_cocos2dx_auto.cpp: Line: 3955, Function: js_cocos2dx_Node_runAction
D/cocos2d-x debug info(32165): Invalid Native Object
D/cocos2d-x debug info(32165): JS: assets/src/app.js:59:Error: Invalid Native Object
D/cocos2d-x debug info(32165):
This is the code.
var HelloWorldLayer = cc.Layer.extend({
sprite:null,
ctor:function () {
var self = this;
//////////////////////////////
// 1. super init first
this._super();
/////////////////////////////
// 2. add a menu item with "X" image, which is clicked to quit the program
// you may modify it.
// ask director the window size
var size = cc.director.getWinSize();
// Load sprite frames to frame cache, add texture node
cc.spriteFrameCache.addSpriteFrames(res.murbiks_plist);
var murbiksImages = cc.SpriteBatchNode.create(cc.textureCache.addImage(res.murbiks_png));
self.addChild(murbiksImages);
var anims = {};
var loadFrames = function(name,cnt) {
var frames = [];
for (var i = 1; i <= cnt; i++) {
str = name + (i < 10 ? ("0" + i) : i);
frames.push(cc.spriteFrameCache.getSpriteFrame(str));
}
var anim = cc.Animation.create(frames, 0.06);
anim.retain();
anims[name] = cc.animate(anim);
}
loadFrames("mostafa_fly",9);
loadFrames("mostafa_land",7);
var mostafa = cc.Sprite.create(res.murbiks_single_png);
mostafa.attr({
x: 0,
y: 0,
scale: 1.9,
rotation: 0
});
mostafa.retain();
self.addChild(mostafa, 5);
var animAction = mostafa.runAction(cc.repeatForever(anims.mostafa_fly)),
bezierMostafa = [
cc.p(0,0),
cc.p(200,520),
cc.p(500,220)
];
var mostafaAction = mostafa.runAction(
cc.sequence(
cc.bezierTo(2.5, bezierMostafa),
cc.callFunc(function() {
self.stopAction(animAction);
animAction = mostafa.runAction(anims.mostafa_land); // LINE 59, INVALID NATIVE OBJECT
})
)
);
return true;
}
});
What could that be?
murbiksImages should call retain() too
generally, "invalid native object" means some object is deleted in native code, which usually happens when object didn't call retain
and, SpriteBatchNode is deprecated in cocos2d-x3.0
you'd better not use it, it will be deprecated in cocos2d-js too

Categories

Resources