I want to make animated video from list of images by applying transition animation between two images. I found many similar questions on SO like,
Android Screen capturing or make video from images
Android- How to make video using set of images from sd card?
All similar SO questions suggest to used animation for that, but how can we store that animated images to video file? Is there any Android library support this facility to make video of images?
Android do not support for AWT's BufferedBitmap nor AWTUtil, that is for Java SE. Currently the solution with SequenceEncoder has been integrated into jcodec's Android version. You can use it from package org.jcodec.api.SequenceEncoder.
Here is the solution for generating MP4 file from series of Bitmaps using jcodec:
try {
File file = this.GetSDPathToFile("", "output.mp4");
SequenceEncoder encoder = new SequenceEncoder(file);
// only 5 frames in total
for (int i = 1; i <= 5; i++) {
// getting bitmap from drawable path
int bitmapResId = this.getResources().getIdentifier("image" + i, "drawable", this.getPackageName());
Bitmap bitmap = this.getBitmapFromResources(this.getResources(), bitmapResId);
encoder.encodeNativeFrame(this.fromBitmap(bitmap));
}
encoder.finish();
} catch (IOException e) {
e.printStackTrace();
}
// get full SD path
File GetSDPathToFile(String filePatho, String fileName) {
File extBaseDir = Environment.getExternalStorageDirectory();
if (filePatho == null || filePatho.length() == 0 || filePatho.charAt(0) != '/')
filePatho = "/" + filePatho;
makeDirectory(filePatho);
File file = new File(extBaseDir.getAbsoluteFile() + filePatho);
return new File(file.getAbsolutePath() + "/" + fileName);// file;
}
// convert from Bitmap to Picture (jcodec native structure)
public Picture fromBitmap(Bitmap src) {
Picture dst = Picture.create((int)src.getWidth(), (int)src.getHeight(), ColorSpace.RGB);
fromBitmap(src, dst);
return dst;
}
public void fromBitmap(Bitmap src, Picture dst) {
int[] dstData = dst.getPlaneData(0);
int[] packed = new int[src.getWidth() * src.getHeight()];
src.getPixels(packed, 0, src.getWidth(), 0, 0, src.getWidth(), src.getHeight());
for (int i = 0, srcOff = 0, dstOff = 0; i < src.getHeight(); i++) {
for (int j = 0; j < src.getWidth(); j++, srcOff++, dstOff += 3) {
int rgb = packed[srcOff];
dstData[dstOff] = (rgb >> 16) & 0xff;
dstData[dstOff + 1] = (rgb >> 8) & 0xff;
dstData[dstOff + 2] = rgb & 0xff;
}
}
}
In case you need to change the fps, you may customize the SequenceEncoder.
You can use a pure java solution called JCodec ( http://jcodec.org ). Here's a CORRECTED simple class that does it using JCodec low-level API:
public class SequenceEncoder {
private SeekableByteChannel ch;
private Picture toEncode;
private RgbToYuv420 transform;
private H264Encoder encoder;
private ArrayList<ByteBuffer> spsList;
private ArrayList<ByteBuffer> ppsList;
private CompressedTrack outTrack;
private ByteBuffer _out;
private int frameNo;
private MP4Muxer muxer;
public SequenceEncoder(File out) throws IOException {
this.ch = NIOUtils.writableFileChannel(out);
// Transform to convert between RGB and YUV
transform = new RgbToYuv420(0, 0);
// Muxer that will store the encoded frames
muxer = new MP4Muxer(ch, Brand.MP4);
// Add video track to muxer
outTrack = muxer.addTrackForCompressed(TrackType.VIDEO, 25);
// Allocate a buffer big enough to hold output frames
_out = ByteBuffer.allocate(1920 * 1080 * 6);
// Create an instance of encoder
encoder = new H264Encoder();
// Encoder extra data ( SPS, PPS ) to be stored in a special place of
// MP4
spsList = new ArrayList<ByteBuffer>();
ppsList = new ArrayList<ByteBuffer>();
}
public void encodeImage(BufferedImage bi) throws IOException {
if (toEncode == null) {
toEncode = Picture.create(bi.getWidth(), bi.getHeight(), ColorSpace.YUV420);
}
// Perform conversion
for (int i = 0; i < 3; i++)
Arrays.fill(toEncode.getData()[i], 0);
transform.transform(AWTUtil.fromBufferedImage(bi), toEncode);
// Encode image into H.264 frame, the result is stored in '_out' buffer
_out.clear();
ByteBuffer result = encoder.encodeFrame(_out, toEncode);
// Based on the frame above form correct MP4 packet
spsList.clear();
ppsList.clear();
H264Utils.encodeMOVPacket(result, spsList, ppsList);
// Add packet to video track
outTrack.addFrame(new MP4Packet(result, frameNo, 25, 1, frameNo, true, null, frameNo, 0));
frameNo++;
}
public void finish() throws IOException {
// Push saved SPS/PPS to a special storage in MP4
outTrack.addSampleEntry(H264Utils.createMOVSampleEntry(spsList, ppsList));
// Write MP4 header and finalize recording
muxer.writeHeader();
NIOUtils.closeQuietly(ch);
}
public static void main(String[] args) throws IOException {
SequenceEncoder encoder = new SequenceEncoder(new File("video.mp4"));
for (int i = 1; i < 100; i++) {
BufferedImage bi = ImageIO.read(new File(String.format("folder/img%08d.png", i)));
encoder.encodeImage(bi);
}
encoder.finish();
}
}
Related
I am trying to print a logo (an image) from my app to my bluetooth printer. I have searched online and especially through this site for a solution, but I still have not got a solution to my ultimate problem which is to print an image to a mini printer. There are many proffered answers, many of which I have had to adapt to solving my own problem, but I have still not been able to get it done. I used a sample code print_image(String File)- shown below which receives file path (which should reference a drawable image resource) as a string, but unfortunately, I get "File doesn't exist" error from my code because apparently, the application cannot properly reference the path (my biggest headache).
So, I need help on how to properly locate the path as a string and pass on to the print_image(String File) module. I have run into a brick wall on this one. Thank you.
private void print_image(String file) {
File fl = new File(file);
if (fl.exists()) {
Bitmap bmp = BitmapFactory.decodeFile(file);
convertBitmap(bmp);
mService.write(PrinterCommands.SET_LINE_SPACING_24);
int offset = 0;
while (offset < bmp.getHeight()) {
mService.write(PrinterCommands.SELECT_BIT_IMAGE_MODE);
for (int x = 0; x < bmp.getWidth(); ++x) {
for (int k = 0; k < 3; ++k) {
byte slice = 0;
for (int b = 0; b < 8; ++b) {
int y = (((offset / 8) + k) * 8) + b;
int i = (y * bmp.getWidth()) + x;
boolean v = false;
if (i < dots.length()) {
v = dots.get(i);
}
slice |= (byte) ((v ? 1 : 0) << (7 - b));
}
mService.write(slice);
}
}
offset += 24;
mService.write(PrinterCommands.FEED_LINE);
mService.write(PrinterCommands.FEED_LINE);
mService.write(PrinterCommands.FEED_LINE);
mService.write(PrinterCommands.FEED_LINE);
mService.write(PrinterCommands.FEED_LINE);
mService.write(PrinterCommands.FEED_LINE);
}
mService.write(PrinterCommands.SET_LINE_SPACING_30);
} else {
Toast.makeText(this, "file doesn't exists", Toast.LENGTH_SHORT)
.show();
}
}
public String getURLForResource (int resourceId) {
return Uri.parse("android.resource://"+R.class.getPackage().getName()+"/" +resourceId).toString();
}
This is what I have done to call the print_image(File)
1. fileString = getURLForResource(R.drawable.imageName)
2. Pass the returned Uri (something like android.resource://com.myAppBase.com/4435664647) as a string
3. Call print_image(fileString)
So, at the point of testing if file exists in the print_image(file),
I get my message "file doesn't exists".
Please what am not doing right with getting the right path?
Try to do this using Uri, then convert Uri to file InputStream,Then convert InputStream to Bitmap.
InputStream inputStream = getContentResolver().openInputStream(uri);
BufferedInputStream bufferedInputStream = new BufferedInputStream(inputStream);
Bitmap bmp = BitmapFactory.decodeStream(bufferedInputStream);
Then print the bitmap.
Bitmap bmp = BitmapFactory.decodeFile(file);
You cannot use the File class for a resource. Better:
Bitmap bmp = BitmapFactory.decodeResource(getResources(), R.drawable.imageName);
I want to call a function and build a video out of list of images, and then save it locally on the device:
public void CreateAndSaveVideoFile(List<Bitmap> MyBitmapArray)
{
// ..
}
Trials:
Following java/xuggle - encode array of images into a movie, the link in the answer is a dead link
Following How to encode images into a video file in Java through programming?, The suggested library in the accepted answer does not support Android.
The next answer in the above has an approach for Android users however it is not clear for me the input and the output of that function (where did he give the images? and where did he get the video?) - I left a question comment
The next answer in the above provides a whole class, however the required library to be included has a corrupted file (when I try and download it from the provided link) - I left a question comment
Following Java: How do I create a movie from an array of images?, the suggested library in the top answer uses commands that I am not familiar with and I don't even know how to use them. Like:
Creating an MPEG-4 file from all the JPEG files in the current
directory:
mencoder mf://*.jpg -mf w=800:h=600:fps=25:type=jpg -ovc lavc \
-lavcopts vcodec=mpeg4:mbd=2:trell -oac copy -o output.avi
I don't know how can I use the above in a Java / Android project..
Can anyone help in guiding me or/and providing me with an approach to my task? Thanks in advance.
You can use jcodec SequenceEncoder to convert sequence of images to MP4 file.
Sample code :
import org.jcodec.api.awt.SequenceEncoder;
...
SequenceEncoder enc = new SequenceEncoder(new File("filename"));
// GOP size will be supported in 0.2
// enc.getEncoder().setKeyInterval(25);
for(...) {
BufferedImage image = ... // Obtain an image to encode
enc.encodeImage(image);
}
enc.finish();
It's a java library so it's easy to import it into Android project, you don't have to use NDK unlike ffmpeg.
Refer http://jcodec.org/ for sample code & downloads.
Using JCodec as demonstrated by Stanislav Vitvitskyy here.
public static void main(String[] args) throws IOException {
SequenceEncoder encoder = new SequenceEncoder(new File("video.mp4"));
for (int i = 1; i < 100; i++) {
BufferedImage bi = ImageIO.read(new File(String.format("img%08d.png", i)));
encoder.encodeImage(bi);
}
encoder.finish();}
Now to convert your Bitmap to BufferedImage you can use this class:
import java.awt.image.BufferedImage;
import java.awt.image.DataBufferByte;
import java.awt.image.DataBufferInt;
import java.io.IOException;
import java.io.InputStream;
/**
* Utility class for loading windows bitmap files
* <p>
* Based on code from author Abdul Bezrati and Pepijn Van Eeckhoudt
*/
public class BitmapLoader {
/**
* Static method to load a bitmap file based on the filename passed in.
* Based on the bit count, this method will either call the 8 or 24 bit
* bitmap reader methods
*
* #param file The name of the bitmap file to read
* #throws IOException
* #return A BufferedImage of the bitmap
*/
public static BufferedImage loadBitmap(String file) throws IOException {
BufferedImage image;
InputStream input = null;
try {
input = ResourceRetriever.getResourceAsStream(file);
int bitmapFileHeaderLength = 14;
int bitmapInfoHeaderLength = 40;
byte bitmapFileHeader[] = new byte[bitmapFileHeaderLength];
byte bitmapInfoHeader[] = new byte[bitmapInfoHeaderLength];
input.read(bitmapFileHeader, 0, bitmapFileHeaderLength);
input.read(bitmapInfoHeader, 0, bitmapInfoHeaderLength);
int nSize = bytesToInt(bitmapFileHeader, 2);
int nWidth = bytesToInt(bitmapInfoHeader, 4);
int nHeight = bytesToInt(bitmapInfoHeader, 8);
int nBiSize = bytesToInt(bitmapInfoHeader, 0);
int nPlanes = bytesToShort(bitmapInfoHeader, 12);
int nBitCount = bytesToShort(bitmapInfoHeader, 14);
int nSizeImage = bytesToInt(bitmapInfoHeader, 20);
int nCompression = bytesToInt(bitmapInfoHeader, 16);
int nColoursUsed = bytesToInt(bitmapInfoHeader, 32);
int nXPixelsMeter = bytesToInt(bitmapInfoHeader, 24);
int nYPixelsMeter = bytesToInt(bitmapInfoHeader, 28);
int nImportantColours = bytesToInt(bitmapInfoHeader, 36);
if (nBitCount == 24) {
image = read24BitBitmap(nSizeImage, nHeight, nWidth, input);
} else if (nBitCount == 8) {
image = read8BitBitmap(nColoursUsed, nBitCount, nSizeImage, nWidth, nHeight, input);
} else {
System.out.println("Not a 24-bit or 8-bit Windows Bitmap, aborting...");
image = null;
}
} finally {
try {
if (input != null)
input.close();
} catch (IOException e) {
}
}
return image;
}
/**
* Static method to read a 8 bit bitmap
*
* #param nColoursUsed Number of colors used
* #param nBitCount The bit count
* #param nSizeImage The size of the image in bytes
* #param nWidth The width of the image
* #param input The input stream corresponding to the image
* #throws IOException
* #return A BufferedImage of the bitmap
*/
private static BufferedImage read8BitBitmap(int nColoursUsed, int nBitCount, int nSizeImage, int nWidth, int nHeight, InputStream input) throws IOException {
int nNumColors = (nColoursUsed > 0) ? nColoursUsed : (1 & 0xff) << nBitCount;
if (nSizeImage == 0) {
nSizeImage = ((((nWidth * nBitCount) + 31) & ~31) >> 3);
nSizeImage *= nHeight;
}
int npalette[] = new int[nNumColors];
byte bpalette[] = new byte[nNumColors * 4];
readBuffer(input, bpalette);
int nindex8 = 0;
for (int n = 0; n < nNumColors; n++) {
npalette[n] = (255 & 0xff) << 24 |
(bpalette[nindex8 + 2] & 0xff) << 16 |
(bpalette[nindex8 + 1] & 0xff) << 8 |
(bpalette[nindex8 + 0] & 0xff);
nindex8 += 4;
}
int npad8 = (nSizeImage / nHeight) - nWidth;
BufferedImage bufferedImage = new BufferedImage(nWidth, nHeight, BufferedImage.TYPE_INT_ARGB);
DataBufferInt dataBufferByte = ((DataBufferInt) bufferedImage.getRaster().getDataBuffer());
int[][] bankData = dataBufferByte.getBankData();
byte bdata[] = new byte[(nWidth + npad8) * nHeight];
readBuffer(input, bdata);
nindex8 = 0;
for (int j8 = nHeight - 1; j8 >= 0; j8--) {
for (int i8 = 0; i8 < nWidth; i8++) {
bankData[0][j8 * nWidth + i8] = npalette[((int) bdata[nindex8] & 0xff)];
nindex8++;
}
nindex8 += npad8;
}
return bufferedImage;
}
/**
* Static method to read a 24 bit bitmap
*
* #param nSizeImage size of the image in bytes
* #param nHeight The height of the image
* #param nWidth The width of the image
* #param input The input stream corresponding to the image
* #throws IOException
* #return A BufferedImage of the bitmap
*/
private static BufferedImage read24BitBitmap(int nSizeImage, int nHeight, int nWidth, InputStream input) throws IOException {
int npad = (nSizeImage / nHeight) - nWidth * 3;
if (npad == 4 || npad < 0)
npad = 0;
int nindex = 0;
BufferedImage bufferedImage = new BufferedImage(nWidth, nHeight, BufferedImage.TYPE_4BYTE_ABGR);
DataBufferByte dataBufferByte = ((DataBufferByte) bufferedImage.getRaster().getDataBuffer());
byte[][] bankData = dataBufferByte.getBankData();
byte brgb[] = new byte[(nWidth + npad) * 3 * nHeight];
readBuffer(input, brgb);
for (int j = nHeight - 1; j >= 0; j--) {
for (int i = 0; i < nWidth; i++) {
int base = (j * nWidth + i) * 4;
bankData[0][base] = (byte) 255;
bankData[0][base + 1] = brgb[nindex];
bankData[0][base + 2] = brgb[nindex + 1];
bankData[0][base + 3] = brgb[nindex + 2];
nindex += 3;
}
nindex += npad;
}
return bufferedImage;
}
/**
* Converts bytes to an int
*
* #param bytes An array of bytes
* #param index
* #returns A int representation of the bytes
*/
private static int bytesToInt(byte[] bytes, int index) {
return (bytes[index + 3] & 0xff) << 24 |
(bytes[index + 2] & 0xff) << 16 |
(bytes[index + 1] & 0xff) << 8 |
bytes[index + 0] & 0xff;
}
/**
* Converts bytes to a short
*
* #param bytes An array of bytes
* #param index
* #returns A short representation of the bytes
*/
private static short bytesToShort(byte[] bytes, int index) {
return (short) (((bytes[index + 1] & 0xff) << 8) |
(bytes[index + 0] & 0xff));
}
/**
* Reads the buffer
*
* #param in An InputStream
* #param buffer An array of bytes
* #throws IOException
*/
private static void readBuffer(InputStream in, byte[] buffer) throws IOException {
int bytesRead = 0;
int bytesToRead = buffer.length;
while (bytesToRead > 0) {
int read = in.read(buffer, bytesRead, bytesToRead);
bytesRead += read;
bytesToRead -= read;
}
}
}
If the minimum version of you application Android SDK is greater or equal to 16 (Android 4.1) the best way of video encoding is use Android Media Codec API.
From Android 4.3 APIs.
When encoding video, Android 4.1 (SDK 16) required that you provide
the media with a ByteBuffer array, but Android 4.3 (SDK 18) now allows
you to use a Surface as the input to an encoder. For instance, this
allows you to encode input from an existing video file or using frames
generated from OpenGL ES.
Media Muxer added in Android 4.3 (SDK 18) so for convenient way of writing mp4 file with Media Muxer you should have SDK>=18.
Using Media Codec API way you will get hardware accelerated encoding and you are easily encode up to 60 FPS.
You can start from 1) How to encode Bitmaps into a video using MediaCodec?
or use 2) Google Grafika or 3) Bigflake.
Starting from Grafika RecordFBOActivity.java. Replace Choreographer event with you own containing bitmap to encode, remove On Screen drawing, load you bitmap as Open GL Texture and draw it on Media Codec Input Surface.
jCodec has added Android support.
You need to add these to your gradle...
implementation 'org.jcodec:jcodec:0.2.3'
implementation 'org.jcodec:jcodec-android:0.2.3'
...and
android {
...
configurations.all {
resolutionStrategy.force 'com.google.code.findbugs:jsr305:3.0.2'
}
}
I can confirm this works as expected, but with caveats. First being I tried some full size images and the file wrote, but gave an error on playback. When I scaled down, I would get an error if the width or height of the image was not even because it requires a multiple of 2 for YUV420J colorspace.
Also worthy of note, this makes your package HEAVY, heavy. My small project went over the dex limit by adding this and required enabling multidex.
FileChannelWrapper out = null;
File dir = what ever directory you use...
File file = new File(dir, "test.mp4");
try { out = NIOUtils.writableFileChannel(file.getAbsolutePath());
AndroidSequenceEncoder encoder = new AndroidSequenceEncoder(out, Rational.R(15, 1));
for (Bitmap bitmap : bitmaps) {
encoder.encodeImage(bitmap);
}
encoder.finish();
} finally {
NIOUtils.closeQuietly(out);
}
You can use Bitmp4 to convert sequence of images to MP4 file.
Sample code :
...
val encoder = MP4Encoder()
encoder.setFrameDelay(50)
encoder.setOutputFilePath(exportedFile.path)
encoder.setOutputSize(width, width)
startExport()
stopExport()
addFrame(bitmap) //called intervally
It's a java library so it's easy to import it into Android project, you don't have to use NDK unlike ffmpeg.
Refer https://github.com/dbof10/Bitmp4 for sample code & downloads.
I created a project that should be able to handle this. The code is light and fairly straight forward.
https://github.com/dburckh/bitmap2video
Abhishek V was right, more information about jcodec SequenceEncoder:
see Android make animated video from list of images
Recently I have built a real-time video system using raspberry pi and Android devices, met the same problem as yours. Instead of saving a list of image files, I used some real-time streaming protocols like RTP/RTCP to transfer data stream to user. If your requirement is something like this, maybe you could change your strategies.
Another suggestion is that you may explore some C/C++ libraries, using NDK/JNI to break the limitation of Java.
Hope the suggestions make sense to you :)
I'm editing an MP4 on Android using MediaExtractor to fetch audio and video tracks then creating a new file using MediaMuxer. It works fine. I can play the new MP4 on the phone (and other players) but am unable to stream the file on the web. When I stop the MediaMuxer it generates a log message
"The mp4 file will not be streamable."
I looked at the underlying native code (MPEG4Writer.cpp) and it would appear that the writer is having trouble calculating the needed moov box size. It tries to guess using some heuristic if a bit rate is not supplied as a parameter to the writer. The problem is the MediaMuxer doesn't provider the ability to set MPEG4Writer's parameters. Am I missing something or am I stuck looking a some other means of generating the file (or header)? Thanks.
In MPEG4Writer.cpp:
// The default MIN_MOOV_BOX_SIZE is set to 0.6% x 1MB / 2,
// where 1MB is the common file size limit for MMS application.
// The default MAX _MOOV_BOX_SIZE value is based on about 3
// minute video recording with a bit rate about 3 Mbps, because
// statistics also show that most of the video captured are going
// to be less than 3 minutes.
This is a bad assumption on how MediaMuxer might be used. We are recording a max of 15 seconds of higher res video and MIN_MOOV_BOX_SIZE is way too small. So to make the file streamable I have to rewrite the file to move the moov header before mdat and patch up some offsets. Here is my code. It's not great. Error paths aren't handled correctly and it makes assumptions about the order of the boxes.
public void fastPlay(String srcFile, String dstFile) {
RandomAccessFile inFile = null;
FileOutputStream outFile = null;
try {
inFile = new RandomAccessFile(new File(srcFile), "r");
outFile = new FileOutputStream(new File(dstFile));
int moovPos = 0;
int mdatPos = 0;
int moovSize = 0;
int mdatSize = 0;
byte[] boxSizeBuf = new byte[4];
byte[] pathBuf = new byte[4];
int boxSize;
int dataSize;
int bytesRead;
int totalBytesRead = 0;
int bytesWritten = 0;
// First find the location and size of the moov and mdat boxes
while (true) {
try {
boxSize = inFile.readInt();
bytesRead = inFile.read(pathBuf);
if (bytesRead != 4) {
Log.e(TAG, "Unexpected bytes read (path) " + bytesRead);
break;
}
String pathRead = new String(pathBuf, "UTF-8");
dataSize = boxSize - 8;
totalBytesRead += 8;
if (pathRead.equals("moov")) {
moovPos = totalBytesRead - 8;
moovSize = boxSize;
} else if (pathRead.equals("mdat")) {
mdatPos = totalBytesRead - 8;
mdatSize = boxSize;
}
totalBytesRead += inFile.skipBytes(dataSize);
} catch (IOException e) {
break;
}
}
// Read the moov box into a buffer. This has to be patched up. Ug.
inFile.seek(moovPos);
byte[] moovBoxBuf = new byte[moovSize]; // This shouldn't be too big.
bytesRead = inFile.read(moovBoxBuf);
if (bytesRead != moovSize) {
Log.e(TAG, "Couldn't read full moov box");
}
// Now locate the stco boxes (chunk offset box) inside the moov box and patch
// them up. This ain't purdy.
int pos = 0;
while (pos < moovBoxBuf.length - 4) {
if (moovBoxBuf[pos] == 0x73 && moovBoxBuf[pos + 1] == 0x74 &&
moovBoxBuf[pos + 2] == 0x63 && moovBoxBuf[pos + 3] == 0x6f) {
int stcoPos = pos - 4;
int stcoSize = byteArrayToInt(moovBoxBuf, stcoPos);
patchStco(moovBoxBuf, stcoSize, stcoPos, moovSize);
}
pos++;
}
inFile.seek(0);
byte[] buf = new byte[(int) mdatPos];
// Write out everything before mdat
inFile.read(buf);
outFile.write(buf);
// Write moov
outFile.write(moovBoxBuf, 0, moovSize);
// Write out mdat
inFile.seek(mdatPos);
bytesWritten = 0;
while (bytesWritten < mdatSize) {
int bytesRemaining = (int) mdatSize - bytesWritten;
int bytesToRead = buf.length;
if (bytesRemaining < bytesToRead) bytesToRead = bytesRemaining;
bytesRead = inFile.read(buf, 0, bytesToRead);
if (bytesRead > 0) {
outFile.write(buf, 0, bytesRead);
bytesWritten += bytesRead;
} else {
break;
}
}
} catch (IOException e) {
Log.e(TAG, e.getMessage());
} finally {
try {
if (outFile != null) outFile.close();
if (inFile != null) inFile.close();
} catch (IOException e) {}
}
}
private void patchStco(byte[] buf, int size, int pos, int moovSize) {
Log.e(TAG, "stco " + pos + " size " + size);
// We are inserting the moov box before the mdat box so all of
// offsets in the stco box need to be increased by the size of the moov box. The stco
// box is variable in length. 4 byte size, 4 byte path, 4 byte version, 4 byte flags
// followed by a variable number of chunk offsets. So subtract off 16 from size then
// divide result by 4 to get the number of chunk offsets to patch up.
int chunkOffsetCount = (size - 16) / 4;
int chunkPos = pos + 16;
for (int i = 0; i < chunkOffsetCount; i++) {
int chunkOffset = byteArrayToInt(buf, chunkPos);
int newChunkOffset = chunkOffset + moovSize;
intToByteArray(newChunkOffset, buf, chunkPos);
chunkPos += 4;
}
}
public static int byteArrayToInt(byte[] b, int offset)
{
return b[offset + 3] & 0xFF |
(b[offset + 2] & 0xFF) << 8 |
(b[offset + 1] & 0xFF) << 16 |
(b[offset] & 0xFF) << 24;
}
public void intToByteArray(int a, byte[] buf, int offset)
{
buf[offset] = (byte) ((a >> 24) & 0xFF);
buf[offset + 1] = (byte) ((a >> 16) & 0xFF);
buf[offset + 2] = (byte) ((a >> 8) & 0xFF);
buf[offset + 3] = (byte) (a & 0xFF);
}
Currently MediaMuxer does not create streamable MP4 files
You can try Intel INDE on https://software.intel.com/en-us/intel-inde and Media Pack for Android which is a part of INDE, tutorials on https://software.intel.com/en-us/articles/intel-inde-media-pack-for-android-tutorials. It has a sample that shows how to use media pack to create and stream files over the network
For example for camera streaming it have sample CameraStreamerActivity.java
public void onCreate(Bundle icicle) {
capture = new CameraCapture(new AndroidMediaObjectFactory(getApplicationContext()), progressListener);
parameters = new StreamingParameters();
parameters.Host = getString(R.string.streaming_server_default_ip);
parameters.Port = Integer.parseInt(getString(R.string.streaming_server_default_port));
parameters.ApplicationName = getString(R.string.streaming_server_default_app);
parameters.StreamName = getString(R.string.streaming_server_default_stream);
parameters.isToPublishAudio = false;
parameters.isToPublishVideo = true;
}
public void startStreaming() {
configureMediaStreamFormat();
capture.setTargetVideoFormat(videoFormat);
capture.setTargetAudioFormat(audioFormat);
capture.setTargetConnection(prepareStreamingParams());
capture.start();
}
In addition there are simular samples for files streaming or game process capturing and streaming
I've a c++ websocket server, and I want to ti send an openCV image (cv::Mat) to my Android client.
I understood that I should use base64 string, but I can't find out how to do it from my openCV frames.
I don't know how to convert a cv::Mat to a bytearray.
Thank you
Hi you can use this below code which works form me
C++ Client
Here we will send BGR raw byte in to socket by accessing Mat data pointer.
Before sending make sure that Mat is continues otherwise make it continues.
int sendImage(Mat frame){
int imgSize = frame.total()*frame.elemSize();
int bytes=0;
int clientSock;
const char* server_ip=ANDROID_IP;
int server_port=2000;
struct sockaddr_in serverAddr;
socklen_t serverAddrLen = sizeof(serverAddr);
if ((clientSock = socket(PF_INET, SOCK_STREAM, 0)) < 0) {
printf("\n--> socket() failed.");
return -1;
}
serverAddr.sin_family = PF_INET;
serverAddr.sin_addr.s_addr = inet_addr(server_ip);
serverAddr.sin_port = htons(server_port);
if (connect(clientSock, (sockaddr*)&serverAddr, serverAddrLen) < 0) {
printf("\n--> connect() failed.");
return -1;
}
frame = (frame.reshape(0,1)); // to make it continuous
/* start sending images */
if ((bytes = send(clientSock, frame.data, imgSize, 0)) < 0){
printf("\n--> send() failed");
return -1;
}
/* if something went wrong, restart the connection */
if (bytes != imgSize) {
cout << "\n--> Connection closed " << endl;
close(clientSock);
return -1;
}
return 0;
}
Java Server
You should know size of image going to receive.
Receives stream from socket and convert to byte array.
Convert byte array BGR and create Bitmap.
Code for Receiving byte array from C++ Server
public static byte imageByte[];
int imageSize=921600;//expected image size 640X480X3
InputStream in = server.getInputStream();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
byte buffer[] = new byte[1024];
int remainingBytes = imageSize; //
while (remainingBytes > 0) {
int bytesRead = in.read(buffer);
if (bytesRead < 0) {
throw new IOException("Unexpected end of data");
}
baos.write(buffer, 0, bytesRead);
remainingBytes -= bytesRead;
}
in.close();
imageByte = baos.toByteArray();
baos.close();
Code to Convert byte array to RGB bitmap image
int nrOfPixels = imageByte.length / 3; // Three bytes per pixel.
int pixels[] = new int[nrOfPixels];
for(int i = 0; i < nrOfPixels; i++) {
int r = imageByte[3*i];
int g = imageByte[3*i + 1];
int b = imageByte[3*i + 2];
if (r < 0)
r = r + 256; //Convert to positive
if (g < 0)
g = g + 256; //Convert to positive
if (b < 0)
b = b + 256; //Convert to positive
pixels[i] = Color.rgb(b,g,r);
}
Bitmap bitmap = Bitmap.createBitmap(pixels, 640, 480, itmap.Config.ARGB_8888);
Check this answer in question Serializing OpenCV Mat_ . If it is not a problem for you to use boost, it can solve your problem. Probably you will need some additional JNI magic on client side.
You can take into account which of the data are important for you: cols (number of columns), rows (number of columns), data (which contains the pixel information), type (data type and channel number).
You have to vectorize your matrix, because it is not neccessarrily continuous and take into account the variations of the size of a pixel in the memory.
Suppose:
cv::Mat m;
Then to allocate:
int depth; // measured in bytes
switch (m.depth())
{
// ... you might check for all of the possibilities
case CV_16U:
depth = 2;
}
char *array = new char[4 + 4 + 4 + m.cols * m.rows * m.channels() * depth]; // rows + cols + type + data
And than write the header information:
int *rows = array;
int *cols = &array[4];
int *type = &array[8];
*rows = m.rows;
*cols = m.cols;
*type = m.type;
And finally the data:
char *mPtr;
for (int i = 0; i < m.rows; i++)
{
mPtr = m.ptr<char>(i); // data type doesn't matter
for (int j = 0; j < m.cols; j++)
{
array[i * rows + j + 3 * 4] = mPtr[j];
}
}
Hopefully no bugs in the code.
I have been working on a project about video summarization on android platfrom. and I am stuck in encoding. I think;
first I must convert my frame into RGB Frame, then convert that RGB FRame into YUV Frame. Then encode the frame. After this operations, The output video was so weird. I think I missed something. Here is my las optimized code. Maybe someone has an experiement in this subject.
Its syntax is changed according to android ndk syntax:
jint Java_com_test_Test_encodeVideo(JNIEnv* env, jobject javaThis)
{
char *flname, *err, *info;
AVCodec *codec;
AVCodecContext *c= NULL;
int i,out_size, size, x, y,z, outbuf_size;
int frameCount=99;
FILE *f;
AVFrame *picture, *yuvFrame;
uint8_t *outbuf, *picture_buf;
PPMImage *img;
const char *destfilename = "/sdcard/new.mp4";
int numBytes;
uint8_t *buffer;
av_register_all();
// must be called before using avcodec lib
avcodec_init();
// register all the codecs
avcodec_register_all();
log_message("Video encoding\n");
// find the H263 video encoder
codec = avcodec_find_encoder(CODEC_ID_H263);
if (!codec) {
sprintf(err, "codec not found\n");
log_message(err);
}
c= avcodec_alloc_context();
picture= avcodec_alloc_frame();
yuvFrame= avcodec_alloc_frame();
// get first ppm context. it is because I need width and height values.
img = getPPM("/sdcard/frame1.ppm");
c->bit_rate = 400000;
// resolution must be a multiple of two
c->width = img->x;
c->height = img->y;
free(img);
// frames per second
c->time_base= (AVRational){1,25};
c->gop_size = 10; // emit one intra frame every ten frames
//c->max_b_frames=1;
c->pix_fmt = PIX_FMT_YUV420P;
// open it
if (avcodec_open(c, codec) < 0){
log_message("codec couldn't open");
return -1;
}
//destfilename = (*env)->GetStringUTFChars(env, dst, 0);
f = fopen(destfilename, "wb");
log_message(destfilename);
if (!f) {
sprintf(err, "could not open %s", destfilename);
log_message(err);
}
log_message("after destination file opening");
// alloc image and output buffer
outbuf_size = 100000;
outbuf = malloc(outbuf_size);
size = c->width * c->height;
picture_buf = malloc(size * 3); // size for RGB
picture->data[0] = picture_buf;
picture->data[1] = picture->data[0] + size;
picture->data[2] = picture->data[1] + size / 4;
picture->linesize[0] = c->width;
picture->linesize[1] = c->width / 2;
picture->linesize[2] = c->width / 2;
numBytes=avpicture_get_size(PIX_FMT_YUV420P, c->width,
c->height);
buffer=malloc(numBytes);
// Assign appropriate parts of buffer to image planes in FrameYUV
avpicture_fill((AVPicture *)yuvFrame, buffer, PIX_FMT_YUV420P,
c->width, c->height);
// encode the video
log_message("before for loop");
for(z=1;z<frameCount;z++) {
sprintf(flname,"/sdcard/frame%d.ppm",z);
// read the ppm file
img = getPPM(flname);
picture->data[0] = img->data;
// convert the rgb frame into yuv frame
rgb2yuv(picture,yuvFrame,c);
log_message("translation completed.");
// encode the image
out_size = avcodec_encode_video(c, outbuf, outbuf_size, yuvFrame);
sprintf(info,"encoding frame %3d (size=%5d)\n", z, out_size);
log_message(info);
fwrite(outbuf, 1, out_size, f);
free(img);
}
// get the delayed frames
for(; out_size; i++) {
//fflush(stdout);
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
sprintf(info,"write frame %3d (size=%5d)\n", i, out_size);
log_message(info);
fwrite(outbuf, 1, out_size, f);
}
// add sequence end code to have a real mpeg file
outbuf[0] = 0x00;
outbuf[1] = 0x00;
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
fclose(f);
free(picture_buf);
free(outbuf);
avcodec_close(c);
av_free(c);
av_free(picture);
av_free(yuvFrame);
}
int rgb2yuv(AVFrame *frameRGB, AVFrame *frameYUV, AVCodecContext *c)
{
char *err;
static struct SwsContext *img_convert_ctx;
log_message("conversion starts");
// Convert the image into YUV format from RGB format
if(img_convert_ctx == NULL) {
int w = c->width;
int h = c->height;
img_convert_ctx = sws_getContext(w, h, PIX_FMT_RGB24,w, h, c->pix_fmt, SWS_BICUBIC,NULL, NULL, NULL);
if(img_convert_ctx == NULL) {
sprintf(err, "Cannot initialize the conversion context!\n");
log_message(err);
return -1;
}
}
int ret = sws_scale(img_convert_ctx,frameRGB->data, frameRGB->linesize , 0,c->height,frameYUV->data, frameYUV->linesize );
return;
}