I have an audio classification trained model with tensorflow and I want to integrated with Android application.
How can I change the 1d float array to 1 * 236 * 40 dimension?
I tried the following code but wasn’t work
‘’'
var byteBuffer : ByteBuffer = ByteBuffer.allocate(4 * 236 *40)
for(element in meanMFCCValues){
val valArray = element
val inpShapeDim: IntArray = intArrayOf(1,meanMFCCValues[0].size,1)
val valInTnsrBuffer: TensorBuffer = TensorBuffer.createDynamic(imageDataType)
valInTnsrBuffer.loadArray(valArray, inpShapeDim)
val valInBuffer : ByteBuffer = valInTnsrBuffer.getBuffer()
byteBuffer.put(valInBuffer)
}
byteBuffer.rewind()
’’'
Is there any way that help me understanding the conversion of model input??
The error is: java.lang.IllegalArgumentException: The size of the array to be loaded does not match the specified shape.
Thanks for your help
Related
I am trying to get a text classifying model to work in android the model has trained using the python below and then converted into a tflite file. I am to figure out what the shap should be as. The text inputed is processed pn a server into a bag of words format which is passed into the model. Is there any guidance on wht i can do to get it work
`
import json
import pickle
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout
from tensorflow.keras.optimizers import SGD
import random
from keras.models import load_model
# create an object of WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
# importing the GL Bot corpus file for pre-processing
words=[]
classes = []
documents = []
ignore_words = ['?', '!']
data_file = open("higherCompText.json").read()
intents = json.loads(data_file,strict=False,)
# preprocessing the json data
# tokenization
nltk.download('punkt')
nltk.download('wordnet')
for intent in intents['intents']:
for pattern in intent['patterns']:
#tokenize each word
w = nltk.word_tokenize(pattern)
words.extend(w)
#add documents in the corpus
documents.append((w, intent['tag']))
# add to our classes list
if intent['tag'] not in classes:
classes.append(intent['tag'])
# lemmatize, lower each word and remove duplicates
words = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_words]
words = sorted(list(set(words)))
# sort classes
classes = sorted(list(set(classes)))
# documents = combination between patterns and intents
print (len(documents), "documents")
# classes = intents
print (len(classes), "classes", classes)
# words = all words, vocabulary
print (len(words), "unique lemmatized words", words)
# creating a pickle file to store the Python objects which we will use while predicting
pickle.dump(words,open('words.pkl','wb'))
pickle.dump(classes,open('classes.pkl','wb'))
# create our training data
training = []
# create an empty array for our output
output_empty = [0] * len(classes)
# training set, bag of words for each sentence
for doc in documents:
# initialize our bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# lemmatize each word - create base word, in attempt to represent related words
pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words]
# create our bag of words array with 1, if word match found in current pattern
for w in words:
bag.append(1) if w in pattern_words else bag.append(0)
# output is a '0' for each tag and '1' for current tag (for each pattern)
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row])
# shuffle features and converting it into numpy arrays
random.shuffle(training)
training = np.array(training)
##
### create train and test lists
train_x = list(training[:,0])
print(len(train_x))
train_y = list(training[:,1])
print("Training data created")
# Create NN model to predict the responses
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model
sgd = SGD(learning_rate=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
#fitting and saving the model
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('chatbot.h5', hist) # we will pickle this model to use in the future
print("\n")
print("*"*50)
print("\nModel Created Successfully!")
`
`
fun toTheArray(someString:String): IntArray {
var someArray = someString.removeSurrounding("[", "]")
.takeIf(String::isNotEmpty) // this handles the case of "[]"
?.split(", ")
?: emptyList() // in the case of "[]"
var intArray:MutableList<Int> = mutableListOf()
Log.d("TRYING","${someArray[0].split(",")}")
someArray = someArray[0].split(",")
Log.d("I AM TRYING","$someArray")
for (each in someArray){
Log.d("I AM TRYING","$each")
intArray.add(each.toInt())
}
return intArray.toIntArray()
}
fun classifyTag(intArray:IntArray,context: Context){
Log.d("array?", "${intArray.size}")
var count = 0
var byteBuffer = ByteBuffer.allocate(39122)
for (each in intArray){
Log.d("array?", "$count")
count++
byteBuffer.putFloat(each.toFloat())
Log.d("array?", "$byteBuffer")
}
byteBuffer.rewind()
val model = Chatbot.newInstance(context)
val inputFeature0 = TensorBuffer.createFrom(byteBuffer.,DataType.FLOAT32)
Log.d("shape", byteBuffer.toString())
Log.d("shape", inputFeature0.buffer.toString())
Log.d("size", intArray.size.toString())
Log.d("shape", inputFeature0.shape.size.toString())
inputFeature0.loadArray(intArray)
//byteBuffer = inputFeature0.buffer
inputFeature0.loadBuffer(byteBuffer)
// Runs model inference and gets result.
val outputs = model.process(inputFeature0).outputFeature0AsTensorBuffer
Log.d("Disaster","$outputs")
// Releases model resources if no longer used.
model.close()
}
`
I've tried everything i can thing of and can't seem to find any answers online as the examples i've found online all use image classifying modles
I am writing an instrument tuner app (for now starting with Guitar). For pitch detection I'm using TarsosDSP. It does detect the pitch correctly, however it is quite shaky - for example, I'll hit the (correctly tuned) D string on my Guitar, it correctly recognizes it as a D, but after a short moment it cycles through a bunch of random notes very quickly. I'm not sure how to best solve this. Here is my code which is responsible for detecting the pitch:
val dispatcher: AudioDispatcher = AudioDispatcherFactory.fromDefaultMicrophone(44100, 4096, 3072)
val pdh = PitchDetectionHandler { res, _ ->
val pitchInHz: Float = res.pitch
runOnUiThread { processing.closestNote(pitchInHz)}
}
val pitchProcessor: AudioProcessor =
PitchProcessor(PitchProcessor.PitchEstimationAlgorithm.FFT_YIN,
44100F, 4096, pdh)
dispatcher.addAudioProcessor(pitchProcessor)
val audioThread = Thread(dispatcher, "Audio Thread")
audioThread.start()
I have then written a function which is supposed to detect the closest note to the current pitch. In addition I tried to get the results "less shaky" by also writing a function which is supposed to find the closest pitch in hz and then using that result for the closestNote function thinking that this way I may get less different results (even though it should be the same, and I also don't notice any difference). Here are the two functions:
...
private val allNotes = arrayOf("A", "A#", "B", "C", "C#", "D", "D#", "E", "F", "F#", "G", "G#")
private val concertPitch = 440
...
/** detects closest note in A = 440hz with with equal temperament formula:
* pitch(i) = pitch(0) * 2^(i/12)
* therefore formula to derive interval between two pitches:
* i = 12 * log2 * (pitch(i)/pitch(o))
*/
fun closestNote(pitchInHz: Float) {
(myCallback as MainActivity).noteSize() //adjusts the font size of note
if (pitchInHz != -1F) {
val roundHz = closestPitch(pitchInHz)
val i = (round(log2(roundHz / concertPitch) * 12)).toInt()
val closestNote = allNotes[(i % 12 + 12) % 12]
myCallback?.updateNote(closestNote) // updates note text
}
}
private fun closestPitch(pitchInHz: Float): Float {
val i = (round(log2(pitchInHz / concertPitch) * 12)).toInt()
val closestPitch = concertPitch * 2.toDouble().pow(i.toDouble() / 12)
return closestPitch.toFloat()
}
Any ideas how I can get more consistent results? Thanks!
Solved it myself: TarsosDSP calculates a probability with every note being played. I set my closestNote function to only update the text if the probability is > 0.91 (I found that value to offer "stability" in terms of text not changing after hitting a string and still correctly recognizing the note without hitting the string multiple times/too hard, also tested it with an unplugged, non hollow body electric Guitar)
I'm trying to use the Indexed access operator as it explained in the following link:
Indexed access operator
It is written there that it works exactly as set & get but code isn't compiled when trying for example to compile the following:
var vv : Array<Int> = Array(6 ,{ 5*it })
vv[1, 4] =5
that is exactly like the pattern in the link:
a[i, j] = b a.set(i, j, b)
After reading comments I understand that a[i,j] means setting a value to two dimensional array, but still the following code doesn't work:
val rows = 3
val cols = 4
var arr = Array(rows) { IntArray(cols) }
arr[2,3] = 5
In Android-Kotlin I am getting float number from backend (for example num = 10000000.47)
When I try to String.format it and add that number in my balanceTextview it shows it with exponent (something like 1.0E10).
I want to show number normally without exponent and with 2 decimals. (Without presicion loss!)
Tried to use DecimalFormat("#.##") but it didn't help me. Maybe I'm doing something wrong?
num = 10000000.47f
val dec = DecimalFormat("#.##")
var result = dec.format(num)
my result is: 10000000
It losts my decimal places
The issue is your number type. According to the documentation:
For variables initialized with fractional numbers, the compiler infers the Double type. To explicitly specify the Float type for a value, add the suffix f or F. If such a value contains more than 6-7 decimal digits, it will be rounded.
With an example that shows how information may get lost:
val pi = 3.14 // Double
val e = 2.7182818284 // Double
val eFloat = 2.7182818284f // Float, actual value is 2.7182817
If the value is specified as Double instead of Float, i.e.
val num = 10000000.47
instead of
val num = 10000000.47f
then your approach works as expected, but could be shortened to:
"%.2f".format(num)
(note that the shorter version will also print "100" as "100.00" which is different from your approach but potentially still desired behaviour)
If you receive a Float from the backend then the information is already lost on your side. Otherwise you should be able to fix the issue by improved parsing.
The extension function format is only available in the JVM. In Kotlin/native, you can use this instead:
fun Float.toPrecision(precision: Int) =
this.toDouble().toPrecision(precision)
fun Double.toPrecision(precision: Int) =
if (precision < 1) {
"${this.roundToInt()}"
} else {
val p = 10.0.pow(precision)
val v = (abs(this) * p).roundToInt()
val i = floor(v / p)
var f = "${floor(v - (i * p)).toInt()}"
while (f.length < precision) f = "0$f"
val s = if (this < 0) "-" else ""
"$s${i.toInt()}.$f"
}
I am using Object detection api to train on my custom data for a 2 class problem.
I am using SSD Mobilenet v2. I am converting the model to TF lite and I am trying to execute it on python interpreter.
The value of score and class is somewhat confusing for me and I am unable to make a valid justification for the same. I am getting the following values for score.
[[ 0.9998122 0.2795332 0.7827836 1.8154384 -1.1171713 0.152002
-0.90076405 1.6943774 -1.1098632 0.6275915 ]]
I am getting the following values for class:
[[ 0. 1.742706 0.5762139 -0.23641224 -2.1639721 -0.6644413
-0.60925585 0.5485272 -0.9775026 1.4633082 ]]
How can I get a score of greater than 1 or less than 0 for e.g. -1.1098632 or 1.6943774.
Also the classes should be integers ideally 1 or 2 as it is a 2 class object detection problem
I am using the following code
import numpy as np
import tensorflow as tf
import cv2
# Load TFLite model and allocate tensors.
interpreter = tf.contrib.lite.Interpreter(model_path="C://Users//Admin//Downloads//tflitenew//detect.tflite")
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
print(input_details)
print(output_details)
input_shape = input_details[0]['shape']
print(input_shape)
# change the following line to feed into your own data.
#input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
input_data = cv2.imread("C:/Users/Admin/Pictures/fire2.jpg")
#input_data = cv2.imread("C:/Users/Admin/Pictures/images4.jpg")
#input_data = cv2.imread("C:\\Users\\Admin\\Downloads\\FlareModels\\lessimages\\video5_image_178.jpg")
input_data = cv2.resize(input_data, (300, 300))
input_data = np.expand_dims(input_data, axis=0)
input_data = (2.0 / 255.0) * input_data - 1.0
input_data=input_data.astype(np.float32)
interpreter.reset_all_variables()
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data_scores = []
output_data_scores = interpreter.get_tensor(output_details[2]['index'])
print(output_data_scores)
output_data_class = []
output_data_class = interpreter.get_tensor(output_details[1]['index'])
print(output_data_class)
Looks like the problem is caused by the wrong input image channel order. Opencv imread reads images in 'BGR' format. You can try adding
input_data = cv2.cvtColor(input_data, cv2.COLOR_BGR2RGB)
to get a 'RGB' format image and then see if the result is reasonable.
Reference: ref
The output of tflite model requires post-processing. The model returns a fixed number (here, 10 detections) by default. Use the output tensor at index 3 to get the number of valid boxes, num_det. (i.e. top num_det detections are valid, ignore the rest)
num_det = int(interpreter.get_tensor(output_details[3]['index']))
boxes = interpreter.get_tensor(output_details[0]['index'])[0][:num_det]
classes = interpreter.get_tensor(output_details[1]['index'])[0][:num_det]
scores = interpreter.get_tensor(output_details[2]['index'])[0][:num_det]
Next, the box coordinates need to be scaled to the image size and adjusted so that the box is within the image (some visualization APIs require this).
df = pd.DataFrame(boxes)
df['ymin'] = df[0].apply(lambda y: max(1,(y*img_height)))
df['xmin'] = df[1].apply(lambda x: max(1,(x*img_width)))
df['ymax'] = df[2].apply(lambda y: min(img_height,(y*img_height)))
df['xmax'] = df[3].apply(lambda x: min(img_width,(x * img_width)))
boxes_scaled = df[['ymin', 'xmin', 'ymax', 'xmax']].to_numpy()
Here's a link to an inference script with input preprocessing, output post-processing and mAP evaluation.