Files are not playing locally after splitting in android - android

Hi i am splitting the video files each one as 1MB using below jave code and they are splitting fine,But when i go to their physical local path and try to playing they are not playing and i am using FFMpeg also for this scenerio but that is also not working for me showing exception like No such file or Dictionary
please help me some one how can i resolve this problem
code:
public static List<File> splitFile(File f) {
try {
int partCounter = 1;
List<File> result = new ArrayList<>();
int sizeOfFiles = 1024 * 1024;// 1MB
byte[] buffer = new byte[sizeOfFiles];
// create a buffer of bytes sized as the one chunk size
BufferedInputStream bis = new BufferedInputStream(new FileInputStream(f));
String name = f.getName();
int tmp = 0;
while ((tmp = bis.read(buffer)) > 0) {
File newFile = new File(f.getParent(), name + "." + String.format("%03d", partCounter++));
// naming files as <inputFileName>.001, <inputFileName>.002, ...
FileOutputStream out = new FileOutputStream(newFile);
out.write(buffer, 0, tmp);//tmp is chunk size. Need it for the last chunk,
// which could be less then 1 mb.
result.add(newFile);
}
return result;
} catch (Throwable throwable) {
throwable.printStackTrace();
}
return null;
}
FFMpegCode:-
public void getSplitCommand(String inputFileUrl, String outputFileUrl) {
inputFileUrl= /storage/emulated/0/1492848702.mp4;
outputFileUrl= /storage/emulated/0/1492848702.mp4;
;
String cmd[] = new String[]{"-i ",inputFileUrl+" ","-c ","copy ","-map ","0 ",
"-segment_time ","8 ","-f ","segment/sdcard/Download/output%03d.mp4"};
executeBinaryCommand(fFmpeg, cmd);
}
/**
* executeBinaryCommand
*
* #param ffmpeg
* #param command
*/
public void executeBinaryCommand(FFmpeg ffmpeg, String[] command) {
try {
if (ffmpeg != null) {
ffmpeg.execute(command,
new ExecuteBinaryResponseHandler() {
#Override
public void onFailure(String response) {
System.out.println("failure====>" + response.toString());
}
#Override
public void onSuccess(String response) {
System.out.println("resposense====>" + response.toString());
}
#Override
public void onProgress(String response) {
System.out.println("on progress");
}
#Override
public void onStart() {
System.out.println("start");
}
#Override
public void onFinish() {
System.out.println("Finish");
}
});
}
} catch (FFmpegCommandAlreadyRunningException exception) {
exception.printStackTrace();
}
}

Use [FFMPEG][2] library to split video. your splitting method might be damage video. This will not happened with FFMPEG
ffmpeg -i INPUT.mp4 -acodec copy -f segment -vcodec copy -reset_timestamps 1 -map 0 OUTPUT%d.mp4
[2]: https://github.com/WritingMinds/ffmpeg-android-java/tree/master/FFmpegAndroid i have split video into per 8 seconds. Dont split into size. Split always in duration

Related

ML Kit Android Studio java, how to Multible Images Text Recognition?

I have a folder with about 10 images which I like to OCR extract text.
That works excellent for 1 picture, but my java skills are not good enough to implement that for multiple images.
I'm really appreciate if someone could show me a clean solution for that.
Thanks a lot
br Lukas
TextView output1;
ArrayList<Bitmap> bitmapArray = new ArrayList<Bitmap>();
TextRecognizer recognizer = TextRecognition.getClient(TextRecognizerOptions.DEFAULT_OPTIONS);
private void OCR_list()
{
String path = Environment.getExternalStorageDirectory().toString()+"/folder_with_images";
File directory = new File(path);
File[] files = directory.listFiles();
for (int i = 0; i < files.length; i++) {
output1.setText(output1.getText() + ", " + files[i].getName());
File imgFile = files[i];
if (imgFile.exists()) {
bitmapArray.add(BitmapFactory.decodeFile(imgFile.getAbsolutePath()));
} else {
output1.setText(output1.getText()+"\n Bitmap not found!");
return;
}
}
InputImage image = InputImage.fromBitmap(bitmapArray.get(0), 0);
recognizer.process(image)
.addOnSuccessListener(
new OnSuccessListener<Text>() {
#Override
public void onSuccess(Text texts) {
processTextRecognitionResult(texts);
}
})
.addOnFailureListener(
new OnFailureListener() {
#Override
public void onFailure(#NonNull Exception e) {
e.printStackTrace();
}
});
Edit:
I solved it now this way, but looks awful:
private void new_Recognition(InputImage image) {
recognizer.process(image)
.addOnSuccessListener(
new OnSuccessListener<Text>() {
#Override
public void onSuccess(Text texts) {
processTextRecognitionResult(texts);
bitmapArray.remove(0);
if (!bitmapArray.isEmpty()) {
InputImage image = InputImage.fromBitmap(bitmapArray.get(0), 0);
new_Recognition(image);
}
}
})
.addOnFailureListener(
new OnFailureListener() {
#Override
public void onFailure(#NonNull Exception e) {
e.printStackTrace();
}
});
}
You can iterate on inputs directly, and recognition tasks will be queued up and then processed in order internally.
for (Bitmap input : inputs) {
recognizer.process(input)
.addOnSuccessListener(text -> ...)
}

ffmpeg Add watermark/text in video on react native

Right now i'm stuck on android part,here is my code
I don't want to define font styling or box it just i found this code somewhere so i just copy pasted it as i don't have any knowledge of ffmpeg, I just want a simple text of right aligned in 2 lines on top of right of the video like this.
I am getting the error in this part of code as the video is getting generated but it does not play and it is of always 262B size
String[] cmd = new String[] {
"-i", path, "-vf", String.format("drawtext=\"fontfile=/systems/fonts/DroidSans.ttf: text='%s': " + "box=1: boxcolor=black#0.5: boxborder=5: x=(w-text_w)/t: y=(h-text_h)/2\"", text), "-codec:a", "aac", out.getAbsolutePath()
};
This is the full code
#ReactMethod
public void embedTextOnVideo(String text, String path, int fontSize, String fontColor, final Callback successCallback, final Callback errorCallback)
{
FFmpeg ffmpeg = FFmpeg.getInstance(_reactContext);
try
{
ffmpeg.loadBinary(new LoadBinaryResponseHandler() {
#Override
public void onStart() {}
#Override
public void onFailure() {}
#Override
public void onSuccess() {}
#Override
public void onFinish() {}
});
} catch (FFmpegNotSupportedException e) {
// Handle if FFmpeg is not supported by device
}
File out = getOutputFile(TYPE_VIDEO);
String[] cmd = new String[] {
"-i", path, "-vf", String.format("drawtext=\"fontfile=/systems/fonts/DroidSans.ttf: text='%s': " + "box=1: boxcolor=black#0.5: boxborder=5: x=(w-text_w)/t: y=(h-text_h)/2\"", text), "-codec:a", "aac", out.getAbsolutePath()
};
try {
ffmpeg.execute(cmd, new ExecuteBinaryResponseHandler() {
#Override
public void onStart() {}
#Override
public void onProgress(String message) {}
#Override
public void onFailure(String message) {
errorCallback.invoke("Error ffmpeg executing with message:\n\t" + message);
}
#Override
public void onSuccess(String message) {
successCallback.invoke("Successfully output file with message:\n\t");
}
#Override
public void onFinish() {}
});
} catch (FFmpegCommandAlreadyRunningException e) {
// Handle if FFmpeg is already running
}
}
#Nullable
private Throwable writeDataToFile(byte[] data, File file) {
try {
FileOutputStream fos = new FileOutputStream(file);
fos.write(data);
fos.close();
} catch (FileNotFoundException e) {
return e;
} catch (IOException e) {
return e;
}
return null;
}
#Nullable
private File getOutputFile(int type) {
File storageDir = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_DCIM;
// Create storage dir if it does not exist
if (!storageDir.exists()) {
if (!storageDir.mkdirs()) {
Log.e(TAG, "Failed to create directory:" + storageDir.getAbsolutePath());
return null;
}
}
// media file name
String fileName = String.format("%s", new SimpleDateFormat("yyyyMMdd_HHmmss").format(new Date()));
enter code hereif (type == TYPE_VIDEO) {
fileName = String.format("VID_%s.mp4", fileName);
} else {
Log.e(TAG, "Unsupported media type:" + type);
return null;
}
return new File(String.format("%s%s%s", storageDir.getPath(), File.separator, fileName));
}

Android Amazon Rekognition Text Detection doesn't work with bytes but does with S3 Objects

I'm trying to detect text in a photo taken with the Camera, but with no luck.
The code I'm using is:
AWSCredentials credentials = new AWSCredentials() {
#Override
public String getAWSAccessKeyId() {
return "some access key id";
}
#Override
public String getAWSSecretKey() {
return "some secret key";
}
};
File file = new File(photoFilePath);
int size = (int) file.length();
byte[] bytes = new byte[size];
try {
BufferedInputStream buf = new BufferedInputStream(new FileInputStream(file));
buf.read(bytes, 0, bytes.length);
buf.close();
} catch (FileNotFoundException e) {
Timber.e(e);
} catch (IOException e) {
Timber.e(e);
}
AmazonRekognition rekognitionClient = new AmazonRekognitionClient(credentials);
byte [] base64 = android.util.Base64.encode(bytes, Base64.DEFAULT);
Image image = new Image().withBytes(ByteBuffer.wrap(base64));
DetectTextRequest detectTextRequest = new DetectTextRequest().withImage(image);
Observable.create((Observable.OnSubscribe<String>) observer -> {
try {
DetectTextResult result = rekognitionClient.detectText(detectTextRequest);
List<TextDetection> labels = result.getTextDetections();
String alllabels = "";
for (TextDetection detection : labels) {
alllabels += detection.getDetectedText();
}
observer.onNext(alllabels);
observer.onCompleted();
} catch (AmazonServiceException e) {
Timber.e(e);
observer.onError(e);
} catch (AmazonClientException e) {
Timber.e(e);
observer.onError(e);
}
})
.observeOn(AndroidSchedulers.mainThread())
.subscribeOn(Schedulers.io())
.subscribe(new Subscriber<String>() {
#Override
public void onNext(String item) {
System.out.println("Next: " + item);
}
#Override
public void onError(Throwable error) {
System.err.println("Error: " + error.getMessage());
}
#Override
public void onCompleted() {
System.out.println("Sequence complete.");
}
});
}
This produces an exception with the message
Failed to upload image; the format is not supported
When not encoding the bytes in base64 - it yields weird outputs, where each text detected is separated by a comma, like
S, !!:, 8, anons SAr, !!:, S, 8, anons, SAr,
or
8B, 8B
What might be wrong in with my example?
When using references to S3 Objects even with the same photo - everything works fine.
It appears that the server is rejecting your request due to unsupported formats. Please note that AWS Rekognition only supports the PNG and JPEG Image formats, and has specific requirements on pixel resolution etc [See https://docs.aws.amazon.com/rekognition/latest/dg/limits.html for more information on the image requirements]
I have had success in the past with jpg images using the following code
InputStream fin1 = getResources().openRawResource(getResources().getIdentifier("face1", "raw", getPackageName()));
ByteBuffer byteBuffer1 = ByteBuffer.wrap(IOUtils.toByteArray(fin1));
Image image1 = new Image();
image1.withBytes(byteBuffer1);
Please try it out and let me know if that fixes your issue.
The image was rotated and it's why it returned weird data...

FFMPEG in Android doesn't create output file.

I am currently using FFMPEG in my Android project for converting a video file to an audio file.
When I execute converting by FFMPEG library, no error occurs. However, the output file is not created in the folder which I already specified.
Here is my code for generating audio file.
OnConvertButtonClickListener convertButtonClickListener = new OnConvertButtonClickListener() {
#Override
public void onClick(int position) {
Converter.loadFFMpegBinary();
String cmd = CMD_HEAD + videoItems.get(position).getTitle() + CMD_STRICT;
String[] fileDir = videoItems.get(position).getTitle().split(File.separator);
String fileName = fileDir[fileDir.length-1];
String out_audio_file = FileManager.getHomeDir()+ File.separator+ fileName.substring(0, fileName.length()-3)+"aac";
Log.d("tag1", out_audio_file);
cmd = cmd+out_audio_file;
Log.e("tag1", cmd);
String[] command = cmd.split(" ");
Converter.execFFmpegBinary(command);
}
};
This is exeFFmpegBinary method code, and after executing this method, success is displayed in my Log window.
public static void execFFmpegBinary(final String[] command) {
try {
ffmpeg.execute(command, new ExecuteBinaryResponseHandler() {
#Override
public void onFailure(String s) {
Log.d("execFFmpegBinary", "fail");
}
#Override
public void onSuccess(String s) {
Log.d("execFFmpegBinary", "success");
}
#Override
public void onProgress(String s) {
Log.d("execFFmpegBinary", "progress");
}
#Override
public void onStart() {
Log.d("execFFmpegBinary", "start");
}
#Override
public void onFinish() {
Log.d("execFFmpegBinary", "finish");
}
});
} catch (FFmpegCommandAlreadyRunningException e) {
// do nothing for now
Log.d("execFFmpegBinary", "Exception");
}
}
Below is an example of my cmd.
-version -y -i /storage/emulated/0/DCIM/Camera/20180104_031417.mp4 -f aac -ab 192000 -vn /storage/emulated/0/Memento/20180104_031417.aac
Anyone knows why my output file doesn't be created?
I have added audio using ffmpeg may be this will help you.
class AddAudio {
private Context context;
private FFmpeg ffmpeg;
private ProgressDialog progressDialog;
private String videoPath;
private String videoWithoutAudioPath;
private String output;
AddAudio(Context context, String soundPath, String videoPath, String videoWithoutAudioPath, ProgressDialog progressDialog, FFmpeg ffmpeg) {
this.context = context;
this.videoPath = videoPath;
this.ffmpeg = ffmpeg;
this.progressDialog = progressDialog;
this.videoWithoutAudioPath = videoWithoutAudioPath;
String currentMilliSecond = String.valueOf(System.currentTimeMillis());
output = Environment.getExternalStorageDirectory().getAbsolutePath() + "/" + context.getResources().getString(R.string.app_name) + "/test/" + currentMilliSecond + ".mp4";
//String cmd = "-y -i " + videoWithoutAudioPath + " -i " + soundPath + " -c:v copy -map 0:v:0 -map 1:a:0 -c:a aac -shortest " + output;
String cmd = "-y -i " + videoWithoutAudioPath + " -i " + soundPath + " -c:v copy -map 0:v:0 -map 1:a:0 -c:a aac -shortest " + output;
encodeAudio(cmd);
}
private void encodeAudio(String cmd) {
String[] command = cmd.split(" ");
if (command.length != 0) {
execFFmpegBinary(command);
} else {
Toast.makeText(context, context.getString(R.string.empty_command_toast), Toast.LENGTH_LONG).show();
}
}
private void execFFmpegBinary(final String[] command) {
try {
ffmpeg.execute(command, new ExecuteBinaryResponseHandler() {
#Override
public void onFailure(String s) {
Log.e("Failure", s);
}
#Override
public void onSuccess(String s) {
Log.e("Success", s);
}
#Override
public void onProgress(String s) {
progressDialog.setMessage("Adding audio....");
}
#Override
public void onStart() {
progressDialog.setCancelable(false);
progressDialog.setCanceledOnTouchOutside(false);
progressDialog.show();
}
#Override
public void onFinish() {
/**
* delete original video since new video is made with sound
*/
if (deleteOriginalVideo() && deleteWorkingFolder(videoWithoutAudioPath)) {
progressDialog.dismiss();
Intent notificationIntent = new Intent(Utils.LOCAL_BROADCAST_CODE);
notificationIntent.putExtra(Utils.FILE_PATH, output);
LocalBroadcastManager.getInstance(context).sendBroadcast(notificationIntent);
}
}
});
} catch (FFmpegCommandAlreadyRunningException e) {
e.printStackTrace();
}
}
private boolean deleteOriginalVideo() {
boolean success = false;
File file = new File(videoPath);
if (file.exists()) {
success = file.delete();
}
return success;
}
private boolean deleteWorkingFolder(String deletePath) {
File file = new File(deletePath);
File folder = new File(file.getParent());
if (folder.isDirectory()) {
for (File child : folder.listFiles()) {
//noinspection ResultOfMethodCallIgnored
child.delete();
}
}
return folder.delete();
}
}
Always check the log from ffmpeg (assuming your script is working and it is actually being executed). Yours should show an error:
Requested output format 'aac' is not a suitable output format
Replace -f aac with -f adts, or omit the -f option if the output is a normal file name with the .aac extension.
Alternatively, since your MP4 input most likely already contains AAC audio consider stream copying it instead of re-encoding. To do so remove -ab 192000 and add -c:a copy.
(well, I don't have enough karma to post a comment and I'm pretty new to FFmpeg lib)
what I can think of now is always consider giving a SPACE(" ") while concatenating strings in commands for this FFmpeg lib...try following
change:
cmd = cmd+out_audio_file;
To: (mark the space between cmd and out_audio_file)
cmd = cmd+" "+out_audio_file;

Mix 2 audio files with android ffmpeg android

I am developing an android application for mixing 2 audio files.And i use android ffmpeg for that.I use following lib. from GitHub
https://github.com/guardianproject/android-ffmpeg-java
I use following code to mix 2 audio files from activity .
try {
File fileAppRoot = new File(getApplicationInfo().dataDir);
SoxController sxCon = new SoxController(fileAppRoot, new ShellUtils.ShellCallback() {
#Override
public void shellOut(String shellLine) {
System.out.println(shellLine);
}
#Override
public void processComplete(int exitValue) {
System.out.println("hello");
}
});
List<String> files=new ArrayList<String>();
files.add(Environment.getExternalStorageDirectory().getAbsolutePath()+"/Testing/me.mp3");
files.add(Environment.getExternalStorageDirectory().getAbsolutePath()+"/Testing/il.mp3");
sxCon.combineMix(files,Environment.getExternalStorageDirectory().getAbsolutePath()+"/Testing/ial.mp3");
but this return exit value 2 on processComplete and no new file generated for mix audio.
This will return following problem in logs no handler for file extension `mp3'
Thanks for any help on this..
You cannot mix files mp3 with this library.
It cans mix files ".wave" only.
Let's convert your mp3 file to wave file then uset this lib to mix files wave.
I hope this response is good for you.
Thanks,
https://github.com/bravobit/FFmpeg-Android
implementation 'nl.bravobit:android-ffmpeg:1.1.7'
public boolean mergeAudio(final Context context, File[] voiceFile, String file_name) {
final ProgressDialog asyncDialog = new ProgressDialog(context);
asyncDialog.setMessage("Audio Merging Start..");
asyncDialog.setCancelable(false);
final boolean[] isSuccess = {false};
if (file_name != null) {
file_name = Environment.getExternalStorageDirectory() + "/podmod/" + file_name + "_.mp3";
} else {
file_name = getMusicFilename();
}
File ffmpegFile = new File(file_name);
if (ffmpegFile.exists()) {
ffmpegFile.delete();
}
for (File f : voiceFile) {
if (!f.exists()) {
Log.d("AudioMergingFailure", "File ot Exist");
return isSuccess[0];
}
}
String s = "";
String s_index = "";
String fileSize = "n=" + voiceFile.length;
for (int i = 0; i < voiceFile.length; i++) {
s = s + "-i#" + voiceFile[i].getPath() + "#";
s_index = s_index + "[" + i + ":0]";
}
String str_cmd = s + "-filter_complex#" + s_index + "concat=" + fileSize + ":v=0:a=1[out]#-map#[out]#" + file_name;
Log.d("str_cmd", str_cmd);
String[] cmd = str_cmd.split("#");
final String finalFile_name = file_name;
try {
if (FFmpeg.getInstance(context).isSupported()) {
FFmpeg ffmpeg = FFmpeg.getInstance(context);
// to execute "ffmpeg -version" command you just need to pass "-version"
ffmpeg.execute(cmd, new ExecuteBinaryResponseHandler() {
#Override
public void onStart() {
asyncDialog.show();
}
#Override
public void onProgress(String message) {
}
#Override
public void onFailure(String message) {
Log.d("AudioMergingFailure", message);
asyncDialog.dismiss();
Toast.makeText(context, "Audio Merging Failed",
Toast.LENGTH_LONG).show();
}
#Override
public void onSuccess(String message) {
asyncDialog.dismiss();
Log.v("onSuccess", message);
File ffmpegFile_ = new File(finalFile_name);
Toast.makeText(context, "Audio onSuccess",
Toast.LENGTH_LONG).show();
isSuccess[0] = true;
}
#Override
public void onFinish() {
asyncDialog.dismiss();
}
});
} else {
asyncDialog.dismiss();
}
} catch (Exception e) {
asyncDialog.dismiss();
Log.d("NotException_", e.getMessage());
}
return isSuccess[0];
}
public static String getMusicFilename() {
return Environment.getExternalStorageDirectory() + "/podmod/Merged_Audio_" + getRandomNumber(0, 100000) + ".mp3";
}

Categories

Resources