getting symbols in string Android ndk - android

in ndk i want to convert some ascii values to string.I am getting the result but there is some symbols after the string
this is the answer i am getting
sint#j8na8̀
My code is
jstring Java_com_magsonwink_utils_security_En_invokeNativeFunction(
JNIEnv* env, jobject javaThis) {
int i = 0;
int a[3]= {
115,
105,
110,
};
char b[3];
for (i = 0; i < 3; i++) {
b[i] = (char) a[i];
}
jstring result = (*env)->NewStringUTF(env, b);
return result;
}

Don't you have to null-terminate the string?
Try
jstring Java_com_magsonwink_utils_security_En_invokeNativeFunction(
JNIEnv* env, jobject javaThis) {
int i = 0;
int a[3]= {
115,
105,
110,
};
char b[4];
for (i = 0; i < 3; i++) {
b[i] = (char) a[i];
}
b[3] = 0;
jstring result = (*env)->NewStringUTF(env, b);
return result;
}

Related

How to pass a Data from native c binary to android model using JNI?

I am trying to pass a value from C++ header file named recognizer.h to Android through my JNI implementation in jni_face_rec.cpp.
recognizer.h
#pragma once
#include <dlib/dnn.h>
#include <dlib/string.h>
#include <jni_common/jni_fileutils.h>
#include <jni_common/jni_utils.h>
#include <dlib/image_processing.h>
#include <dlib/image_processing/frontal_face_detector.h>
#include <dlib/opencv/cv_image.h>
#include <dlib/image_loader/load_image.h>
#include <glog/logging.h>
#include <jni.h>
#include <memory>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <string>
#include <vector>
#include <unordered_map>
#include <time.h>
#include <dirent.h>
using namespace dlib;
using namespace std;
// ResNet network copied from dnn_face_recognition_ex.cpp in dlib/examples
template <template <int,template<typename>class,int,typename> class block, int N, template<typename>class BN, typename SUBNET>
using residual = add_prev1<block<N,BN,1,tag1<SUBNET>>>;
template <template <int,template<typename>class,int,typename> class block, int N, template<typename>class BN, typename SUBNET>
using residual_down = add_prev2<avg_pool<2,2,2,2,skip1<tag2<block<N,BN,2,tag1<SUBNET>>>>>>;
template <int N, template <typename> class BN, int stride, typename SUBNET>
using block = BN<con<N,3,3,1,1,relu<BN<con<N,3,3,stride,stride,SUBNET>>>>>;
template <int N, typename SUBNET> using ares = relu<residual<block,N,affine,SUBNET>>;
template <int N, typename SUBNET> using ares_down = relu<residual_down<block,N,affine,SUBNET>>;
template <typename SUBNET> using alevel0 = ares_down<256,SUBNET>;
template <typename SUBNET> using alevel1 = ares<256,ares<256,ares_down<256,SUBNET>>>;
template <typename SUBNET> using alevel2 = ares<128,ares<128,ares_down<128,SUBNET>>>;
template <typename SUBNET> using alevel3 = ares<64,ares<64,ares<64,ares_down<64,SUBNET>>>>;
template <typename SUBNET> using alevel4 = ares<32,ares<32,ares<32,SUBNET>>>;
using anet_type = loss_metric<fc_no_bias<128,avg_pool_everything<
alevel0<
alevel1<
alevel2<
alevel3<
alevel4<
max_pool<3,3,2,2,relu<affine<con<32,7,7,2,2,
input_rgb_image_sized<150>
>>>>>>>>>>>>;
class DLibFaceRecognizer {
private:
std::string landmark_model;
std::string model_dir_path;
std::string image_dir_path;
std::string dnn_model;
anet_type net;
dlib::shape_predictor sp;
std::unordered_map<int, dlib::full_object_detection> mFaceShapeMap;
dlib::frontal_face_detector face_detector;
std::vector<dlib::rectangle> rects;
std::vector<std::string> rec_names;
std::vector<matrix<float,0,1>> rec_face_descriptors;
std::vector<dlib::rectangle> rec_rects;
std::vector<std::string> rec_labels;
std::vector<matrix<float,0,1>> match_face_descriptors;
bool is_training;
inline void init() {
LOG(INFO) << "init DLibFaceRecognizer";
face_detector = dlib::get_frontal_face_detector();
landmark_model = model_dir_path + "/shape_predictor_5_face_landmarks.dat";
dnn_model = model_dir_path + "/dlib_face_recognition_resnet_model_v1.dat";
image_dir_path = model_dir_path + "/images";
is_training = false;
}
public:
inline void train() {
LOG(INFO) << "train DLibFaceRecognizer";
struct dirent *entry;
DIR *dp;
dp = opendir((image_dir_path).c_str());
if (dp == NULL) {
LOG(INFO) << ("Opendir: Path does not exist or could not be read.");
}
std::vector<matrix<rgb_pixel>> faces;
std::vector<std::string> names;
// load images from dlib image directory and extract faces
while ((entry = readdir(dp))) {
std::string filename = entry->d_name;
if (filename=="." || filename=="..") continue;
cv::Mat file_image = cv::imread(image_dir_path + "/" + filename, CV_LOAD_IMAGE_COLOR);
LOG(INFO) << "Load image " << (entry->d_name);
dlib::cv_image<dlib::bgr_pixel> img(file_image);
std::vector<dlib::rectangle> frects = face_detector(img);
if (frects.size()==1) {
auto face = frects[0];
auto shape = sp(img, face);
matrix<rgb_pixel> face_chip;
extract_image_chip(img, get_face_chip_details(shape,150,0.25), face_chip);
faces.push_back(move(face_chip));
names.push_back(filename);
LOG(INFO) << "Added image " << filename;
} else if (frects.size()==0) {
LOG(INFO) << "No face found in image " << filename;
} else {
LOG(INFO) << "More than one face found in image " << filename;
}
}
closedir(dp);
is_training = true;
// calculate face descriptors and set global vars
LOG(INFO) << "Calculating face descriptors " << jniutils::currentDateTime();
rec_face_descriptors = net(faces);
LOG(INFO) << "Calculated face descriptors " << jniutils::currentDateTime()<<" Size "<<rec_face_descriptors.size();
rec_names = names;
is_training = false;
}
DLibFaceRecognizer() { init(); }
DLibFaceRecognizer(const std::string& dlib_rec_example_dir)
: model_dir_path(dlib_rec_example_dir) {
init();
if (!landmark_model.empty() && jniutils::fileExists(landmark_model) && !dnn_model.empty() && jniutils::fileExists(dnn_model)) {
// load the model weights
dlib::deserialize(landmark_model) >> sp;
dlib::deserialize(dnn_model) >> net;
LOG(INFO) << "Models loaded";
}
}
inline int rec(const cv::Mat& image) {
if (is_training) return 0;
if (image.empty())
return 0;
if (image.channels() == 1) {
cv::cvtColor(image, image, CV_GRAY2BGR);
}
CHECK(image.channels() == 3);
dlib::cv_image<dlib::bgr_pixel> img(image);
std::vector<matrix<rgb_pixel>> faces;
std::vector<dlib::rectangle> frects = face_detector(img);
for (auto face : frects)
{
auto shape = sp(img, face);
matrix<rgb_pixel> face_chip;
extract_image_chip(img, get_face_chip_details(shape,150,0.25), face_chip);
faces.push_back(move(face_chip));
}
if (faces.size() == 0)
{
LOG(INFO) << "No faces found in image!";
}
LOG(INFO) << "calculating face descriptor in image..." << jniutils::currentDateTime();
std::vector<matrix<float,0,1>> face_descriptors = net(faces);
LOG(INFO) << "face descriptors in camera image calculated "<<jniutils::currentDateTime()<<" Size "<<face_descriptors.size();
rec_rects.clear();
rec_labels.clear();
for (size_t i = 0; i < face_descriptors.size(); ++i) {
for (size_t j = 0; j < rec_face_descriptors.size(); ++j) {
if (length(face_descriptors[i]-rec_face_descriptors[j]) < 0.6) {
LOG(INFO) << "HELLO "<< rec_names[j]<<" FOUND ";
//LOG(INFO) << "128 input"<< face_descriptors[i] ;
//LOG(INFO) << "128 enroll"<< rec_face_descriptors[j];
LOG(INFO) << "Length" << length(face_descriptors[i]-rec_face_descriptors[j]);
LOG(INFO) << "Row : " << face_descriptors[i].NR<< ",Column : "<<face_descriptors[i].NC ;
dlib::rectangle r = frects[i];
rec_rects.push_back(r);
rec_labels.push_back(rec_names[j]);
match_face_descriptors.push_back(face_descriptors[i]);
LOG(INFO) << "128 enroll last"<< match_face_descriptors[i];
}
}
}
return rec_rects.size();
}
virtual inline int det(const cv::Mat& image) {
if (is_training) return 0;
if (image.empty())
return 0;
if (image.channels() == 1) {
cv::cvtColor(image, image, CV_GRAY2BGR);
}
CHECK(image.channels() == 3);
// TODO : Convert to gray image to speed up detection
// It's unnecessary to use color image for face/landmark detection
dlib::cv_image<dlib::bgr_pixel> img(image);
std::vector<matrix<rgb_pixel>> faces;
rects = face_detector(img);
return rects.size();
}
inline std::vector<dlib::rectangle> getRecResultRects() { return rec_rects; }
inline std::vector<std::string> getRecResultLabels() { return rec_labels; }
inline std::vector<dlib::rectangle> getDetResultRects() { return rects; }
//inline std::vector<matrix<float,0,1>> getExtractedResult() { return match_face_descriptors; }
};
and the JNI File is jni_face_rec.cpp:
#include <android/bitmap.h>
#include <jni_common/jni_bitmap2mat.h>
#include <jni_common/jni_primitives.h>
#include <jni_common/jni_fileutils.h>
#include <jni_common/jni_utils.h>
#include <recognizer.h>
#include <jni.h>
using namespace cv;
extern JNI_VisionDetRet* g_pJNI_VisionDetRet;
namespace {
#define JAVA_NULL 0
using RecPtr = DLibFaceRecognizer*;
class JNI_FaceRec {
public:
JNI_FaceRec(JNIEnv* env) {
jclass clazz = env->FindClass(CLASSNAME_FACE_REC);
mNativeContext = env->GetFieldID(clazz, "mNativeFaceRecContext", "J");
env->DeleteLocalRef(clazz);
}
RecPtr getRecognizerPtrFromJava(JNIEnv* env, jobject thiz) {
RecPtr const p = (RecPtr)env->GetLongField(thiz, mNativeContext);
return p;
}
void setRecognizerPtrToJava(JNIEnv* env, jobject thiz, jlong ptr) {
env->SetLongField(thiz, mNativeContext, ptr);
}
jfieldID mNativeContext;
};
// Protect getting/setting and creating/deleting pointer between java/native
std::mutex gLock;
std::shared_ptr<JNI_FaceRec> getJNI_FaceRec(JNIEnv* env) {
static std::once_flag sOnceInitflag;
static std::shared_ptr<JNI_FaceRec> sJNI_FaceRec;
std::call_once(sOnceInitflag, [env]() {
sJNI_FaceRec = std::make_shared<JNI_FaceRec>(env);
});
return sJNI_FaceRec;
}
RecPtr const getRecPtr(JNIEnv* env, jobject thiz) {
std::lock_guard<std::mutex> lock(gLock);
return getJNI_FaceRec(env)->getRecognizerPtrFromJava(env, thiz);
}
// The function to set a pointer to java and delete it if newPtr is empty
void setRecPtr(JNIEnv* env, jobject thiz, RecPtr newPtr) {
std::lock_guard<std::mutex> lock(gLock);
RecPtr oldPtr = getJNI_FaceRec(env)->getRecognizerPtrFromJava(env, thiz);
if (oldPtr != JAVA_NULL) {
DLOG(INFO) << "setMapManager delete old ptr : " << oldPtr;
delete oldPtr;
}
if (newPtr != JAVA_NULL) {
DLOG(INFO) << "setMapManager set new ptr : " << newPtr;
}
getJNI_FaceRec(env)->setRecognizerPtrToJava(env, thiz, (jlong)newPtr);
}
} // end unnamespace
#ifdef __cplusplus
extern "C" {
#endif
#define DLIB_FACE_JNI_METHOD(METHOD_NAME) \
Java_com_tzutalin_dlib_FaceRec_##METHOD_NAME
void JNIEXPORT
DLIB_FACE_JNI_METHOD(jniNativeClassInit)(JNIEnv* env, jclass _this) {}
jobjectArray getRecResult(JNIEnv* env, RecPtr faceRecognizer,
const int& size) {
LOG(INFO) << "getRecResult";
jobjectArray jDetRetArray = JNI_VisionDetRet::createJObjectArray(env, size);
for (int i = 0; i < size; i++) {
jobject jDetRet = JNI_VisionDetRet::createJObject(env);
env->SetObjectArrayElement(jDetRetArray, i, jDetRet);
dlib::rectangle rect = faceRecognizer->getRecResultRects()[i];
std::string label = faceRecognizer->getRecResultLabels()[i];
//std::matrix<float,0,1> value = faceRecognizer->getExtractedResult()[i];
g_pJNI_VisionDetRet->setRect(env, jDetRet, rect.left(), rect.top(),
rect.right(), rect.bottom());
g_pJNI_VisionDetRet->setLabel(env, jDetRet, label);
//g_pJNI_VisionDetRet->setValue(env, jDetRet, value);
}
return jDetRetArray;
}
jobjectArray getDetResult(JNIEnv* env, RecPtr faceRecognizer,
const int& size) {
LOG(INFO) << "getDetResult";
jobjectArray jDetRetArray = JNI_VisionDetRet::createJObjectArray(env, size);
for (int i = 0; i < size; i++) {
jobject jDetRet = JNI_VisionDetRet::createJObject(env);
env->SetObjectArrayElement(jDetRetArray, i, jDetRet);
dlib::rectangle rect = faceRecognizer->getDetResultRects()[i];
std::string label = "face";
g_pJNI_VisionDetRet->setRect(env, jDetRet, rect.left(), rect.top(),
rect.right(), rect.bottom());
g_pJNI_VisionDetRet->setLabel(env, jDetRet, label);
}
return jDetRetArray;
}
JNIEXPORT jobjectArray JNICALL
DLIB_FACE_JNI_METHOD(jniBitmapDetect)(JNIEnv* env, jobject thiz,
jobject bitmap) {
LOG(INFO) << "jniBitmapFaceDet";
cv::Mat rgbaMat;
cv::Mat bgrMat;
jniutils::ConvertBitmapToRGBAMat(env, bitmap, rgbaMat, true);
cv::cvtColor(rgbaMat, bgrMat, cv::COLOR_RGBA2BGR);
RecPtr mRecPtr = getRecPtr(env, thiz);
jint size = mRecPtr->det(bgrMat);
LOG(INFO) << "det face size: " << size;
return getDetResult(env, mRecPtr, size);
}
JNIEXPORT jobjectArray JNICALL
DLIB_FACE_JNI_METHOD(jniBitmapRec)(JNIEnv* env, jobject thiz,
jobject bitmap) {
LOG(INFO) << "jniBitmapFaceDet";
cv::Mat rgbaMat;
cv::Mat bgrMat;
jniutils::ConvertBitmapToRGBAMat(env, bitmap, rgbaMat, true);
cv::cvtColor(rgbaMat, bgrMat, cv::COLOR_RGBA2BGR);
RecPtr mRecPtr = getRecPtr(env, thiz);
jint size = mRecPtr->rec(bgrMat);
LOG(INFO) << "rec face size: " << size;
return getRecResult(env, mRecPtr, size);
}
jint JNIEXPORT JNICALL DLIB_FACE_JNI_METHOD(jniInit)(JNIEnv* env, jobject thiz,
jstring jDirPath) {
LOG(INFO) << "jniInit";
std::string dirPath = jniutils::convertJStrToString(env, jDirPath);
RecPtr mRecPtr = new DLibFaceRecognizer(dirPath);
setRecPtr(env, thiz, mRecPtr);
return JNI_OK;
}
jint JNIEXPORT JNICALL DLIB_FACE_JNI_METHOD(jniTrain)(JNIEnv* env, jobject thiz) {
LOG(INFO) << "jniTrain";
RecPtr mRecPtr = getRecPtr(env, thiz);
mRecPtr->train();
return JNI_OK;
}
jint JNIEXPORT JNICALL
DLIB_FACE_JNI_METHOD(jniDeInit)(JNIEnv* env, jobject thiz) {
LOG(INFO) << "jniDeInit";
setRecPtr(env, thiz, JAVA_NULL);
return JNI_OK;
}
#ifdef __cplusplus
}
#endif
and the Model class defined in Android is VisonDecRec.java
import android.graphics.Point;
import java.util.ArrayList;
/**
* A VisionDetRet contains all the information identifying the location and confidence value of the detected object in a bitmap.
*/
public final class VisionDetRet {
private String mLabel;
private float mConfidence;
private int mLeft;
private int mTop;
private int mRight;
private int mBottom;
private ArrayList<Point> mLandmarkPoints = new ArrayList<>();
private float[] extractedValue;
VisionDetRet() {
}
/**
* #param label Label name
* #param confidence A confidence factor between 0 and 1. This indicates how certain what has been found is actually the label.
* #param l The X coordinate of the left side of the result
* #param t The Y coordinate of the top of the result
* #param r The X coordinate of the right side of the result
* #param b The Y coordinate of the bottom of the result
* #param extractedValue The Extracted 128 as float value
*/
public VisionDetRet(String label, float confidence, int l, int t, int r, int b, float[] extractedValue) {
mLabel = label;
mLeft = l;
mTop = t;
mRight = r;
mBottom = b;
mConfidence = confidence;
this.extractedValue = extractedValue;
}
/**
* #return The X coordinate of the left side of the result
*/
public int getLeft() {
return mLeft;
}
/**
* #return The Y coordinate of the top of the result
*/
public int getTop() {
return mTop;
}
/**
* #return The X coordinate of the right side of the result
*/
public int getRight() {
return mRight;
}
/**
* #return The Y coordinate of the bottom of the result
*/
public int getBottom() {
return mBottom;
}
/**
* #return A confidence factor between 0 and 1. This indicates how certain what has been found is actually the label.
*/
public float getConfidence() {
return mConfidence;
}
/**
* #return The label of the result
*/
public String getLabel() {
return mLabel;
}
/**
* Add landmark to the list. Usually, call by jni
* #param x Point x
* #param y Point y
* #return true if adding landmark successfully
*/
public boolean addLandmark(int x, int y) {
return mLandmarkPoints.add(new Point(x, y));
}
/**
* Return the list of landmark points
* #return ArrayList of android.graphics.Point
*/
public ArrayList<Point> getFaceLandmarks() {
return mLandmarkPoints;
}
#Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("Left:");
sb.append(mLabel);
sb.append(", Top:");
sb.append(mTop);
sb.append(", Right:");
sb.append(mRight);
sb.append(", Bottom:");
sb.append(mBottom);
sb.append(", Label:");
sb.append(mLabel);
return sb.toString();
}
public float[] getExtractedValue() {
return extractedValue;
}
public void setExtractedValue(float[] extractedValue) {
this.extractedValue = extractedValue;
}
}
I would like to get the value of the match_face_descriptors vector (defined below) as a float[] in Java.
std::vector<matrix<float,0,1>> match_face_descriptors;
I can export it as a comma-separated String but would prefer a float[].
In the absence of further information I assume you want to preserve the vector-of-matrices structure as a float[][] where every float[] corresponds to one matrix.
// This will convert the passed vector of matrices into a Java float[][], preserving
// the outside structure.
jobject convert_to_float(JNIEnv* env, const std::vector<dlib::matrix<float,0,1>> & match_face_descriptors) {
// We first create the outer float[][] array.
jclass cls_floatA = env->FindClass("[F");
jobjectArray ret = env->NewObjectArray(match_face_descriptors.size(), cls_floatA,NULL);
// Now create float[] instances and assign them to ret[i]
for (int i = 0; i < match_face_descriptors.size(); i++) {
auto& mat = match_face_descriptors[i];
// mat.begin() returns a pointer to the first element of the one-row
// matrix, so we can pass it to SetFloatArrayRegion, which expects a float*.
jfloatArray elem = env->NewFloatArray(mat.size());
env->SetFloatArrayRegion(elem, 0, mat.size(), mat.begin());
// ret[i] = elem
env->SetObjectArrayElement(ret, i, elem);
// FIXME: This is not necessary if you can guarantee match_face_descriptors
// is small enough. It is included for completeness.
env->DeleteLocalRef(elem);
}
return ret;
}

Java.lang.UnsatisfiedLinkError: No implementation found for int [duplicate]

This question already has answers here:
Android NDK C++ JNI (no implementation found for native...)
(11 answers)
Closed 5 years ago.
I executed youtube watch me android application project. I just add some classes in my project and build with ndk. I got the error like
java.lang.UnsatisfiedLinkError: No implementation found for int com.ephronsystem.mobilizerapp.Ffmpeg.encodeVideoFrame(byte[]) (tried Java_com_ephronsystem_mobilizerapp_Ffmpeg_encodeVideoFrame and Java_com_ephronsystem_mobilizerapp_Ffmpeg_encodeVideoFrame___3B).
My code:
package com.ephronsystem.mobilizerapp;
public class Ffmpeg {
static {
System.loadLibrary("ffmpeg");
}
public static native boolean init(int width, int height, int audio_sample_rate, String rtmpUrl);
public static native void shutdown();
// Returns the size of the encoded frame.
public static native int encodeVideoFrame(byte[] yuv_image);
public static native int encodeAudioFrame(short[] audio_data, int length);
}
This is ffmpeg-jni.c
#include <android/log.h>
#include <string.h>
#include <jni.h>
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/opt.h"
#ifdef __cplusplus
extern "C" {
#endif
JNIEXPORT jboolean JNICALL Java_com_ephronsystem_mobilizerapp_Ffmpeg_init(JNIEnv *env, jobject thiz,
jint width, jint height,
jint audio_sample_rate,
jstring rtmp_url);
JNIEXPORT void JNICALL Java_com_ephronsystem_mobilizerapp_Ffmpeg_shutdown(JNIEnv *env,
jobject thiz
);
JNIEXPORT jint JNICALL Java_com_ephronsystem_mobilizerapp_Ffmpeg_encodeVideoFrame(JNIEnv
*env,
jobject thiz,
jbyteArray
yuv_image);
JNIEXPORT jint JNICALL Java_com_ephronsystem_mobilizerapp_Ffmpeg_encodeAudioFrame(JNIEnv *env,
jobject thiz,
jshortArray audio_data,
jint length);
#ifdef __cplusplus
}
#endif
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, "ffmpeg-jni", __VA_ARGS__)
#define URL_WRONLY 2
static AVFormatContext *fmt_context;
static AVStream *video_stream;
static AVStream *audio_stream;
static int pts
= 0;
static int last_audio_pts = 0;
// Buffers for UV format conversion
static unsigned char *u_buf;
static unsigned char *v_buf;
static int enable_audio = 1;
static int64_t audio_samples_written = 0;
static int audio_sample_rate = 0;
// Stupid buffer for audio samples. Not even a proper ring buffer
#define AUDIO_MAX_BUF_SIZE 16384 // 2x what we get from Java
static short audio_buf[AUDIO_MAX_BUF_SIZE];
static int audio_buf_size = 0;
void AudioBuffer_Push(const short *audio, int num_samples) {
if (audio_buf_size >= AUDIO_MAX_BUF_SIZE - num_samples) {
LOGI("AUDIO BUFFER OVERFLOW: %i + %i > %i", audio_buf_size, num_samples,
AUDIO_MAX_BUF_SIZE);
return;
}
for (int i = 0; i < num_samples; i++) {
audio_buf[audio_buf_size++] = audio[i];
}
}
int AudioBuffer_Size() { return audio_buf_size; }
short *AudioBuffer_Get() { return audio_buf; }
void AudioBuffer_Pop(int num_samples) {
if (num_samples > audio_buf_size) {
LOGI("Audio buffer Pop WTF: %i vs %i", num_samples, audio_buf_size);
return;
}
memmove(audio_buf, audio_buf + num_samples, num_samples * sizeof(short));
audio_buf_size -= num_samples;
}
void AudioBuffer_Clear() {
memset(audio_buf, 0, sizeof(audio_buf));
audio_buf_size = 0;
}
static void log_callback(void *ptr, int level, const char *fmt, va_list vl) {
char x[2048];
vsnprintf(x, 2048, fmt, vl);
LOGI(x);
}
JNIEXPORT jboolean JNICALL Java_com_ephronsystem_mobilizerapp_Ffmpeg_init(JNIEnv *env, jobject thiz,
jint width, jint height,
jint audio_sample_rate_param,
jstring rtmp_url) {
avcodec_register_all();
av_register_all();
av_log_set_callback(log_callback);
fmt_context = avformat_alloc_context();
AVOutputFormat *ofmt = av_guess_format("flv", NULL, NULL);
if (ofmt) {
LOGI("av_guess_format returned %s", ofmt->long_name);
} else {
LOGI("av_guess_format fail");
return JNI_FALSE;
}
fmt_context->oformat = ofmt;
LOGI("creating video stream");
video_stream = av_new_stream(fmt_context, 0);
if (enable_audio) {
LOGI("creating audio stream");
audio_stream = av_new_stream(fmt_context, 1);
}
// Open Video Codec.
// ======================
AVCodec *video_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!video_codec) {
LOGI("Did not find the video codec");
return JNI_FALSE; // leak!
} else {
LOGI("Video codec found!");
}
AVCodecContext *video_codec_ctx = video_stream->codec;
video_codec_ctx->codec_id = video_codec->id;
video_codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
video_codec_ctx->level = 31;
video_codec_ctx->width = width;
video_codec_ctx->height = height;
video_codec_ctx->pix_fmt = PIX_FMT_YUV420P;
video_codec_ctx->rc_max_rate = 0;
video_codec_ctx->rc_buffer_size = 0;
video_codec_ctx->gop_size = 12;
video_codec_ctx->max_b_frames = 0;
video_codec_ctx->slices = 8;
video_codec_ctx->b_frame_strategy = 1;
video_codec_ctx->coder_type = 0;
video_codec_ctx->me_cmp = 1;
video_codec_ctx->me_range = 16;
video_codec_ctx->qmin = 10;
video_codec_ctx->qmax = 51;
video_codec_ctx->keyint_min = 25;
video_codec_ctx->refs = 3;
video_codec_ctx->trellis = 0;
video_codec_ctx->scenechange_threshold = 40;
video_codec_ctx->flags |= CODEC_FLAG_LOOP_FILTER;
video_codec_ctx->me_method = ME_HEX;
video_codec_ctx->me_subpel_quality = 6;
video_codec_ctx->i_quant_factor = 0.71;
video_codec_ctx->qcompress = 0.6;
video_codec_ctx->max_qdiff = 4;
video_codec_ctx->time_base.den = 10;
video_codec_ctx->time_base.num = 1;
video_codec_ctx->bit_rate = 3200 * 1000;
video_codec_ctx->bit_rate_tolerance = 0;
video_codec_ctx->flags2 |= 0x00000100;
fmt_context->bit_rate = 4000 * 1000;
av_opt_set(video_codec_ctx, "partitions", "i8x8,i4x4,p8x8,b8x8", 0);
av_opt_set_int(video_codec_ctx, "direct-pred", 1, 0);
av_opt_set_int(video_codec_ctx, "rc-lookahead", 0, 0);
av_opt_set_int(video_codec_ctx, "fast-pskip", 1, 0);
av_opt_set_int(video_codec_ctx, "mixed-refs", 1, 0);
av_opt_set_int(video_codec_ctx, "8x8dct", 0, 0);
av_opt_set_int(video_codec_ctx, "weightb", 0, 0);
if (fmt_context->oformat->flags & AVFMT_GLOBALHEADER)
video_codec_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
LOGI("Opening video codec");
AVDictionary *vopts = NULL;
av_dict_set(&vopts, "profile", "main", 0);
//av_dict_set(&vopts, "vprofile", "main", 0);
av_dict_set(&vopts, "rc-lookahead", 0, 0);
av_dict_set(&vopts, "tune", "film", 0);
av_dict_set(&vopts, "preset", "ultrafast", 0);
av_opt_set(video_codec_ctx->priv_data, "tune", "film", 0);
av_opt_set(video_codec_ctx->priv_data, "preset", "ultrafast", 0);
av_opt_set(video_codec_ctx->priv_data, "tune", "film", 0);
int open_res = avcodec_open2(video_codec_ctx, video_codec, &vopts);
if (open_res < 0) {
LOGI("Error opening video codec: %i", open_res);
return JNI_FALSE; // leak!
}
// Open Audio Codec.
// ======================
if (enable_audio) {
AudioBuffer_Clear();
audio_sample_rate = audio_sample_rate_param;
AVCodec *audio_codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
if (!audio_codec) {
LOGI("Did not find the audio codec");
return JNI_FALSE; // leak!
} else {
LOGI("Audio codec found!");
}
AVCodecContext *audio_codec_ctx = audio_stream->codec;
audio_codec_ctx->codec_id = audio_codec->id;
audio_codec_ctx->codec_type = AVMEDIA_TYPE_AUDIO;
audio_codec_ctx->bit_rate = 128000;
audio_codec_ctx->bit_rate_tolerance = 16000;
audio_codec_ctx->channels = 1;
audio_codec_ctx->profile = FF_PROFILE_AAC_LOW;
audio_codec_ctx->sample_fmt = AV_SAMPLE_FMT_FLT;
audio_codec_ctx->sample_rate = 44100;
LOGI("Opening audio codec");
AVDictionary *opts = NULL;
av_dict_set(&opts, "strict", "experimental", 0);
open_res = avcodec_open2(audio_codec_ctx, audio_codec, &opts);
LOGI("audio frame size: %i", audio_codec_ctx->frame_size);
if (open_res < 0) {
LOGI("Error opening audio codec: %i", open_res);
return JNI_FALSE; // leak!
}
}
const jbyte *url = (*env)->GetStringUTFChars(env, rtmp_url, NULL);
// Point to an output file
if (!(ofmt->flags & AVFMT_NOFILE)) {
if (avio_open(&fmt_context->pb, url, URL_WRONLY) < 0) {
LOGI("ERROR: Could not open file %s", url);
return JNI_FALSE; // leak!
}
}
(*env)->ReleaseStringUTFChars(env, rtmp_url, url);
LOGI("Writing output header.");
// Write file header
if (avformat_write_header(fmt_context, NULL) != 0) {
LOGI("ERROR: av_write_header failed");
return JNI_FALSE;
}
pts = 0;
last_audio_pts = 0;
audio_samples_written = 0;
// Initialize buffers for UV format conversion
int frame_size = video_codec_ctx->width * video_codec_ctx->height;
u_buf = (unsigned char *) av_malloc(frame_size / 4);
v_buf = (unsigned char *) av_malloc(frame_size / 4);
LOGI("ffmpeg encoding init done");
return JNI_TRUE;
}
JNIEXPORT void JNICALL Java_com_ephronsystem_mobilizerapp_Ffmpeg_shutdown(JNIEnv
*env,
jobject thiz
) {
av_write_trailer(fmt_context);
avio_close(fmt_context
->pb);
avcodec_close(video_stream
->codec);
if (enable_audio) {
avcodec_close(audio_stream
->codec);
}
av_free(fmt_context);
av_free(u_buf);
av_free(v_buf);
fmt_context = NULL;
u_buf = NULL;
v_buf = NULL;
}
JNIEXPORT jint JNICALL Java_com_ephronsystem_mobilizerapp_Ffmpeg_encodeVideoFrame(JNIEnv
*env,
jobject thiz,
jbyteArray
yuv_image) {
int yuv_length = (*env)->GetArrayLength(env, yuv_image);
unsigned char *yuv_data = (*env)->GetByteArrayElements(env, yuv_image, 0);
AVCodecContext *video_codec_ctx = video_stream->codec;
//LOGI("Yuv size: %i w: %i h: %i", yuv_length, video_codec_ctx->width, video_codec_ctx->height);
int frame_size = video_codec_ctx->width * video_codec_ctx->height;
const unsigned char *uv = yuv_data + frame_size;
// Convert YUV from NV12 to I420. Y channel is the same so we don't touch it,
// we just have to deinterleave UV.
for (
int i = 0;
i < frame_size / 4; i++) {
v_buf[i] = uv[i * 2];
u_buf[i] = uv[i * 2 + 1];
}
AVFrame source;
memset(&source, 0, sizeof(AVFrame));
source.data[0] =
yuv_data;
source.data[1] =
u_buf;
source.data[2] =
v_buf;
source.linesize[0] = video_codec_ctx->
width;
source.linesize[1] = video_codec_ctx->width / 2;
source.linesize[2] = video_codec_ctx->width / 2;
// only for bitrate regulation. irrelevant for sync.
source.
pts = pts;
pts++;
int out_length = frame_size + (frame_size / 2);
unsigned char *out = (unsigned char *) av_malloc(out_length);
int compressed_length = avcodec_encode_video(video_codec_ctx, out, out_length, &source);
(*env)->
ReleaseByteArrayElements(env, yuv_image, yuv_data,
0);
// Write to file too
if (compressed_length > 0) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.
pts = last_audio_pts;
if (video_codec_ctx->coded_frame && video_codec_ctx->coded_frame->key_frame) {
pkt.flags |= 0x0001;
}
pkt.
stream_index = video_stream->index;
pkt.
data = out;
pkt.
size = compressed_length;
if (
av_interleaved_write_frame(fmt_context,
&pkt) != 0) {
LOGI("Error writing video frame");
}
} else {
LOGI("??? compressed_length <= 0");
}
last_audio_pts++;
av_free(out);
return
compressed_length;
}
JNIEXPORT jint JNICALL Java_com_ephronsystem_mobilizerapp_Ffmpeg_encodeAudioFrame(JNIEnv
*env,
jobject thiz,
jshortArray
audio_data,
jint length
) {
if (!enable_audio) {
return 0;
}
short *audio = (*env)->GetShortArrayElements(env, audio_data, 0);
//LOGI("java audio buffer size: %i", length);
AVCodecContext *audio_codec_ctx = audio_stream->codec;
unsigned char *out = av_malloc(128000);
AudioBuffer_Push(audio, length
);
int total_compressed = 0;
while (
AudioBuffer_Size()
>= audio_codec_ctx->frame_size) {
AVPacket pkt;
av_init_packet(&pkt);
int compressed_length = avcodec_encode_audio(audio_codec_ctx, out, 128000,
AudioBuffer_Get());
total_compressed +=
compressed_length;
audio_samples_written += audio_codec_ctx->
frame_size;
int new_pts = (audio_samples_written * 1000) / audio_sample_rate;
if (compressed_length > 0) {
pkt.
size = compressed_length;
pkt.
pts = new_pts;
last_audio_pts = new_pts;
//LOGI("audio_samples_written: %i comp_length: %i pts: %i", (int)audio_samples_written, (int)compressed_length, (int)new_pts);
pkt.flags |= 0x0001;
pkt.
stream_index = audio_stream->index;
pkt.
data = out;
if (
av_interleaved_write_frame(fmt_context,
&pkt) != 0) {
LOGI("Error writing audio frame");
}
}
AudioBuffer_Pop(audio_codec_ctx
->frame_size);
}
(*env)->
ReleaseShortArrayElements(env, audio_data, audio,
0);
av_free(out);
return
total_compressed;
}
This error generally occurs when your native library can't be found by JVM during execution. Your native code must be compiled into .so file and make it available for JVM during run time.
You may find more details on java.library.path and linking here
in all of this methods, as a second parameter you take jobject thizz - that means you try to implement non-static method that will get reference to jobject that this method is running against.
Try changing those to jclass clazz to mark them as you would like to implement those as static methods

occurred starting process 'command 'C:\*********\Sdk\ndk-bundle\ndk-build.cmd'' in android studio

I am using yt-watchme-master for live streaming videos from our mobile to Youtube.
I used one third party library :- https://github.com/youtube/yt-watchme
Here, they are using c++ code that's why I installed NDK in my android studio but still errors are coming up.
Can some one help me please?
Below is the entire code given.
#include <android/log.h>
#include <string.h>
#include <jni.h>
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/opt.h"
#ifdef __cplusplus
extern "C" {
#endif
JNIEXPORT jboolean
JNICALL Java_com_google_android_apps_watchme_Ffmpeg_init(JNIEnv *env, jobject thiz,
jint width, jint height,
jint audio_sample_rate,
jstring rtmp_url);
JNIEXPORT void JNICALL Java_com_google_android_apps_watchme_Ffmpeg_shutdown(JNIEnv
*env,
jobject thiz
);
JNIEXPORT jintJNICALL
Java_com_google_android_apps_watchme_Ffmpeg_encodeVideoFrame(JNIEnv
*env,
jobject thiz,
jbyteArray
yuv_image);
JNIEXPORT jint
JNICALL Java_com_google_android_apps_watchme_Ffmpeg_encodeAudioFrame(JNIEnv *env,
jobject thiz,
jshortArray audio_data,
jint length);
#ifdef __cplusplus
}
#endif
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO, "ffmpeg-jni", __VA_ARGS__)
#define URL_WRONLY 2
static AVFormatContext *fmt_context;
static AVStream *video_stream;
static AVStream *audio_stream;
static int pts
= 0;
static int last_audio_pts = 0;
// Buffers for UV format conversion
static unsigned char *u_buf;
static unsigned char *v_buf;
static int enable_audio = 1;
static int64_t audio_samples_written = 0;
static int audio_sample_rate = 0;
// Stupid buffer for audio samples. Not even a proper ring buffer
#define AUDIO_MAX_BUF_SIZE 16384 // 2x what we get from Java
static short audio_buf[AUDIO_MAX_BUF_SIZE];
static int audio_buf_size = 0;
void AudioBuffer_Push(const short *audio, int num_samples) {
if (audio_buf_size >= AUDIO_MAX_BUF_SIZE - num_samples) {
LOGI("AUDIO BUFFER OVERFLOW: %i + %i > %i", audio_buf_size, num_samples,
AUDIO_MAX_BUF_SIZE);
return;
}
for (int i = 0; i < num_samples; i++) {
audio_buf[audio_buf_size++] = audio[i];
}
}
int AudioBuffer_Size() { return audio_buf_size; }
short *AudioBuffer_Get() { return audio_buf; }
void AudioBuffer_Pop(int num_samples) {
if (num_samples > audio_buf_size) {
LOGI("Audio buffer Pop WTF: %i vs %i", num_samples, audio_buf_size);
return;
}
memmove(audio_buf, audio_buf + num_samples, num_samples * sizeof(short));
audio_buf_size -= num_samples;
}
void AudioBuffer_Clear() {
memset(audio_buf, 0, sizeof(audio_buf));
audio_buf_size = 0;
}
static void log_callback(void *ptr, int level, const char *fmt, va_list vl) {
char x[2048];
vsnprintf(x, 2048, fmt, vl);
LOGI(x);
}
JNIEXPORT jboolean
JNICALL Java_com_google_android_apps_watchme_Ffmpeg_init(JNIEnv *env, jobject thiz,
jint width, jint height,
jint audio_sample_rate_param,
jstring rtmp_url) {
avcodec_register_all();
av_register_all();
av_log_set_callback(log_callback);
fmt_context = avformat_alloc_context();
AVOutputFormat *ofmt = av_guess_format("flv", NULL, NULL);
if (ofmt) {
LOGI("av_guess_format returned %s", ofmt->long_name);
} else {
LOGI("av_guess_format fail");
return JNI_FALSE;
}
fmt_context->oformat = ofmt;
LOGI("creating video stream");
video_stream = av_new_stream(fmt_context, 0);
if (enable_audio) {
LOGI("creating audio stream");
audio_stream = av_new_stream(fmt_context, 1);
}
// Open Video Codec.
// ======================
AVCodec *video_codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!video_codec) {
LOGI("Did not find the video codec");
return JNI_FALSE; // leak!
} else {
LOGI("Video codec found!");
}
AVCodecContext *video_codec_ctx = video_stream->codec;
video_codec_ctx->codec_id = video_codec->id;
video_codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
video_codec_ctx->level = 31;
video_codec_ctx->width = width;
video_codec_ctx->height = height;
video_codec_ctx->pix_fmt = PIX_FMT_YUV420P;
video_codec_ctx->rc_max_rate = 0;
video_codec_ctx->rc_buffer_size = 0;
video_codec_ctx->gop_size = 12;
video_codec_ctx->max_b_frames = 0;
video_codec_ctx->slices = 8;
video_codec_ctx->b_frame_strategy = 1;
video_codec_ctx->coder_type = 0;
video_codec_ctx->me_cmp = 1;
video_codec_ctx->me_range = 16;
video_codec_ctx->qmin = 10;
video_codec_ctx->qmax = 51;
video_codec_ctx->keyint_min = 25;
video_codec_ctx->refs = 3;
video_codec_ctx->trellis = 0;
video_codec_ctx->scenechange_threshold = 40;
video_codec_ctx->flags |= CODEC_FLAG_LOOP_FILTER;
video_codec_ctx->me_method = ME_HEX;
video_codec_ctx->me_subpel_quality = 6;
video_codec_ctx->i_quant_factor = 0.71;
video_codec_ctx->qcompress = 0.6;
video_codec_ctx->max_qdiff = 4;
video_codec_ctx->time_base.den = 10;
video_codec_ctx->time_base.num = 1;
video_codec_ctx->bit_rate = 3200 * 1000;
video_codec_ctx->bit_rate_tolerance = 0;
video_codec_ctx->flags2 |= 0x00000100;
fmt_context->bit_rate = 4000 * 1000;
av_opt_set(video_codec_ctx, "partitions", "i8x8,i4x4,p8x8,b8x8", 0);
av_opt_set_int(video_codec_ctx, "direct-pred", 1, 0);
av_opt_set_int(video_codec_ctx, "rc-lookahead", 0, 0);
av_opt_set_int(video_codec_ctx, "fast-pskip", 1, 0);
av_opt_set_int(video_codec_ctx, "mixed-refs", 1, 0);
av_opt_set_int(video_codec_ctx, "8x8dct", 0, 0);
av_opt_set_int(video_codec_ctx, "weightb", 0, 0);
if (fmt_context->oformat->flags & AVFMT_GLOBALHEADER)
video_codec_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
LOGI("Opening video codec");
AVDictionary *vopts = NULL;
av_dict_set(&vopts, "profile", "main", 0);
//av_dict_set(&vopts, "vprofile", "main", 0);
av_dict_set(&vopts, "rc-lookahead", 0, 0);
av_dict_set(&vopts, "tune", "film", 0);
av_dict_set(&vopts, "preset", "ultrafast", 0);
av_opt_set(video_codec_ctx->priv_data, "tune", "film", 0);
av_opt_set(video_codec_ctx->priv_data, "preset", "ultrafast", 0);
av_opt_set(video_codec_ctx->priv_data, "tune", "film", 0);
int open_res = avcodec_open2(video_codec_ctx, video_codec, &vopts);
if (open_res < 0) {
LOGI("Error opening video codec: %i", open_res);
return JNI_FALSE; // leak!
}
// Open Audio Codec.
// ======================
if (enable_audio) {
AudioBuffer_Clear();
audio_sample_rate = audio_sample_rate_param;
AVCodec *audio_codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
if (!audio_codec) {
LOGI("Did not find the audio codec");
return JNI_FALSE; // leak!
} else {
LOGI("Audio codec found!");
}
AVCodecContext *audio_codec_ctx = audio_stream->codec;
audio_codec_ctx->codec_id = audio_codec->id;
audio_codec_ctx->codec_type = AVMEDIA_TYPE_AUDIO;
audio_codec_ctx->bit_rate = 128000;
audio_codec_ctx->bit_rate_tolerance = 16000;
audio_codec_ctx->channels = 1;
audio_codec_ctx->profile = FF_PROFILE_AAC_LOW;
audio_codec_ctx->sample_fmt = AV_SAMPLE_FMT_FLT;
audio_codec_ctx->sample_rate = 44100;
LOGI("Opening audio codec");
AVDictionary *opts = NULL;
av_dict_set(&opts, "strict", "experimental", 0);
open_res = avcodec_open2(audio_codec_ctx, audio_codec, &opts);
LOGI("audio frame size: %i", audio_codec_ctx->frame_size);
if (open_res < 0) {
LOGI("Error opening audio codec: %i", open_res);
return JNI_FALSE; // leak!
}
}
const jbyte *url = (*env)->GetStringUTFChars(env, rtmp_url, NULL);
// Point to an output file
if (!(ofmt->flags & AVFMT_NOFILE)) {
if (avio_open(&fmt_context->pb, url, URL_WRONLY) < 0) {
LOGI("ERROR: Could not open file %s", url);
return JNI_FALSE; // leak!
}
}
(*env)->ReleaseStringUTFChars(env, rtmp_url, url);
LOGI("Writing output header.");
// Write file header
if (avformat_write_header(fmt_context, NULL) != 0) {
LOGI("ERROR: av_write_header failed");
return JNI_FALSE;
}
pts = 0;
last_audio_pts = 0;
audio_samples_written = 0;
// Initialize buffers for UV format conversion
int frame_size = video_codec_ctx->width * video_codec_ctx->height;
u_buf = (unsigned char *) av_malloc(frame_size / 4);
v_buf = (unsigned char *) av_malloc(frame_size / 4);
LOGI("ffmpeg encoding init done");
return JNI_TRUE;
}
JNIEXPORT void JNICALL
Java_com_google_android_apps_watchme_Ffmpeg_shutdown(JNIEnv
*env,
jobject thiz
) {
av_write_trailer(fmt_context);
avio_close(fmt_context
->pb);
avcodec_close(video_stream
->codec);
if (enable_audio) {
avcodec_close(audio_stream
->codec);
}
av_free(fmt_context);
av_free(u_buf);
av_free(v_buf);
fmt_context = NULL;
u_buf = NULL;
v_buf = NULL;
}
JNIEXPORT jintJNICALL
Java_com_google_android_apps_watchme_Ffmpeg_encodeVideoFrame(JNIEnv
*env,
jobject thiz,
jbyteArray
yuv_image) {
int yuv_length = (*env)->GetArrayLength(env, yuv_image);
unsigned char *yuv_data = (*env)->GetByteArrayElements(env, yuv_image, 0);
AVCodecContext *video_codec_ctx = video_stream->codec;
//LOGI("Yuv size: %i w: %i h: %i", yuv_length, video_codec_ctx->width, video_codec_ctx->height);
int frame_size = video_codec_ctx->width * video_codec_ctx->height;
const unsigned char *uv = yuv_data + frame_size;
// Convert YUV from NV12 to I420. Y channel is the same so we don't touch it,
// we just have to deinterleave UV.
for (
int i = 0;
i < frame_size / 4; i++) {
v_buf[i] = uv[i * 2];
u_buf[i] = uv[i * 2 + 1];
}
AVFrame source;
memset(&source, 0, sizeof(AVFrame));
source.data[0] =
yuv_data;
source.data[1] =
u_buf;
source.data[2] =
v_buf;
source.linesize[0] = video_codec_ctx->
width;
source.linesize[1] = video_codec_ctx->width / 2;
source.linesize[2] = video_codec_ctx->width / 2;
// only for bitrate regulation. irrelevant for sync.
source.
pts = pts;
pts++;
int out_length = frame_size + (frame_size / 2);
unsigned char *out = (unsigned char *) av_malloc(out_length);
int compressed_length = avcodec_encode_video(video_codec_ctx, out, out_length, &source);
(*env)->
ReleaseByteArrayElements(env, yuv_image, yuv_data,
0);
// Write to file too
if (compressed_length > 0) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.
pts = last_audio_pts;
if (video_codec_ctx->coded_frame && video_codec_ctx->coded_frame->key_frame) {
pkt.flags |= 0x0001;
}
pkt.
stream_index = video_stream->index;
pkt.
data = out;
pkt.
size = compressed_length;
if (
av_interleaved_write_frame(fmt_context,
&pkt) != 0) {
LOGI("Error writing video frame");
}
} else {
LOGI("??? compressed_length <= 0");
}
last_audio_pts++;
av_free(out);
return
compressed_length;
}
JNIEXPORT jintJNICALL
Java_com_google_android_apps_watchme_Ffmpeg_encodeAudioFrame(JNIEnv
*env,
jobject thiz,
jshortArray
audio_data,
jint length
) {
if (!enable_audio) {
return 0;
}
short *audio = (*env)->GetShortArrayElements(env, audio_data, 0);
//LOGI("java audio buffer size: %i", length);
AVCodecContext *audio_codec_ctx = audio_stream->codec;
unsigned char *out = av_malloc(128000);
AudioBuffer_Push(audio, length
);
int total_compressed = 0;
while (
AudioBuffer_Size()
>= audio_codec_ctx->frame_size) {
AVPacket pkt;
av_init_packet(&pkt);
int compressed_length = avcodec_encode_audio(audio_codec_ctx, out, 128000,
AudioBuffer_Get());
total_compressed +=
compressed_length;
audio_samples_written += audio_codec_ctx->
frame_size;
int new_pts = (audio_samples_written * 1000) / audio_sample_rate;
if (compressed_length > 0) {
pkt.
size = compressed_length;
pkt.
pts = new_pts;
last_audio_pts = new_pts;
//LOGI("audio_samples_written: %i comp_length: %i pts: %i", (int)audio_samples_written, (int)compressed_length, (int)new_pts);
pkt.flags |= 0x0001;
pkt.
stream_index = audio_stream->index;
pkt.
data = out;
if (
av_interleaved_write_frame(fmt_context,
&pkt) != 0) {
LOGI("Error writing audio frame");
}
}
AudioBuffer_Pop(audio_codec_ctx
->frame_size);
}
(*env)->
ReleaseShortArrayElements(env, audio_data, audio,
0);
av_free(out);
return
total_compressed;
}

Android ndk; How to create a 2d boolean array in c/c++ and return it to the Java?

So I've worked with the ndk/c/c++ a little bit, and I know about the jbooleanArray var type, but my question is how would I go about creating a 2d boolean array and returning it to the Java to be used? Here's an example:
#include "retrieveboolarray.h"
#define LOG_TAG "!"
//macros to call the Log functions to allow for debugging. the tag is declared on the line above
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG,LOG_TAG,__VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
JNIEXPORT jint JNICALL Java_com_example_ndktesting_generateBoolArrays_retrieveBoolArray(
JNIEnv * env, jobject obj, jint refNum) {
//refNum will eventually be used to access certain arrays
return refNum;
}
so right now it's returning an integer, how would I generate a 2d bool array and return it to the Java?
update: here is some code I've tried previously, still with no luck unfortunately
JNIEXPORT jobjectArray JNICALL Java_com_example_ndktesting_generateBoolArrays_retrieveBoolArray(
JNIEnv * env, jobject obj, jint refNum) {
jboolean flag = JNI_TRUE;
jclass stringClass = (*env)->FindClass(env, "java/lang/Boolean");
jobjectArray row;
jobjectArray rows;
jboolean hello = 1;
int MAXRESPONSE = 5;
int MAXTEST = 5;
jsize i, j;
for (i = 0; i < 5; i++) {
row = (*env)->NewObjectArray(env, MAXRESPONSE, stringClass, 0);
for (j = 0; j < 3; j++) {
(*env)->SetObjectArrayElement(env, row, j, hello);
}
if (flag == JNI_TRUE) {
flag = JNI_FALSE;
rows = (*env)->NewObjectArray(env, MAXTEST,
(*env)->GetObjectClass(env, row), 0);
}
(*env)->SetObjectArrayElement(env, rows, i, row);
}
return rows;
}
hopefully someone has an idea on how to fix this so I can make/return a bool array.
Essentially, you need to use a jobjectArray that contains jbooleanArray instances. See here for a similar example.
Update:
I wrote the code using C++ and converted to C here, but other than that it is tested and works.
JNIEXPORT jobjectArray JNICALL Java_com_example_ndktesting_generateBoolArrays_retrieveBoolArray(
JNIEnv* env, jobject obj, jint refNum) {
jclass arrayClass = (*env)->FindClass(env, "[Z");
int numColumns = 5, numRows = 5;
jobjectArray twoDArray = (*env)->NewObjectArray(env, numColumns, arrayClass, 0);
jbooleanArray column;
// Initialization of the source array
jboolean source[numColumns][numRows];
for (int i = 0; i < numColumns; ++i) {
for (int j = 0; j < numRows; ++j)
source[i][j] = true;
}
for (int i = 0; i < numColumns; ++i) {
column = (*env)->NewBooleanArray(env, numRows);
(*env)->SetBooleanArrayRegion(env, column, 0, numRows, source[i]);
(*env)->SetObjectArrayElement(env, twoDArray, i, column);
}
return twoDArray;
}
In your Java code, you will get an Object[]. You can cast each of its elements to a boolean[].

encrypt and decrypt utf8 string in android NDK jni

i write a simple string encrypt and decrypt with split string to two var and increment assci code and after swap right and left of string.
when i encrypt this text = "this is test" this is work well, but when encrypt utf char don't word like this string = "تست تست تست تست"
encrypt code is :
JNIEXPORT jstring JNICALL Java_com_test_ndk_MainActivity_encrypt(JNIEnv* env, jobject thiz, jstring dec) {
const char *nativeString = (*env)->GetStringUTFChars(env, dec, 0);
char *newstr;
char *left;
char *right;
int decLenght = strlen(nativeString);
int middl = decLenght / 2;
int i;
newstr = substr(nativeString, 0, middl);
int lenght = strlen(newstr);
left = malloc(lenght);
for (i = 0; i < lenght; i++) {
left[i] = newstr[i] + 1;
}
left[lenght] = '\0';
newstr = substr(nativeString, middl, decLenght - middl);
lenght = strlen(newstr);
right = malloc(lenght);
for (i = 0; i < lenght; i++) {
right[i] = newstr[i] - 1;
}
right[lenght] = '\0';
strcat(right, left);
(*env)->ReleaseStringUTFChars(env, dec, nativeString);
return (*env)->NewStringUTF(env, right);
}
and decrypt code is :
JNIEXPORT jstring JNICALL Java_com_test_ndk_MainActivity_decrypt(JNIEnv* env, jobject thiz, jstring enc) {
const char *nativeString = (*env)->GetStringUTFChars(env, enc, 0);
char *newstr;
char *left;
char *right;
int encLenght = strlen(nativeString);
int middl = encLenght / 2;
int i;
if (encLenght % 2 != 0) {
middl++;
}
newstr = substr(nativeString, 0, middl);
int lenght = strlen(newstr);
left = malloc(lenght);
for (i = 0; i < lenght; i++) {
left[i] = (char) ((int) newstr[i] + 1);
}
left[lenght] = '\0';
newstr = substr(nativeString, middl, encLenght - middl);
lenght = strlen(newstr);
right = malloc(lenght);
for (i = 0; i < lenght; i++) {
right[i] = (char) ((int) newstr[i] - 1);
}
right[lenght] = '\0';
strcat(right, left);
(*env)->ReleaseStringUTFChars(env, enc, nativeString);
return (*env)->NewStringUTF(env, right);
}
substr function :
char* substr(const char *source, unsigned int start, unsigned int end) {
return strndup(source + start, end);
}
Does anyone have solution .
UTF-8 is not trivial for manipulations. For your encode/decode, you can use GetStringChars() (or the more efficient, but also more restrictive GetStringCritical()) and operate the resulting 16-bit jchar array.

Categories

Resources