Spinning globe in Opengl-es - android

I found this code to generate a sphere in Opengl es. I am unable to understand the logic, could someone please give me some insights on this.
private void generateData() {
slicesBuffers = new FloatBuffer[slices];
normalsBuffers = new FloatBuffer[slices];
texCoordsBuffers = new FloatBuffer[slices];
for (int i = 0; i < slices; i++) {
float[] vertexCoords = new float[7 * (stacks + 1)];
float[] normalCoords = new float[4* (stacks + 1)];
float[] textureCoords = new float[10 * (stacks + 1)];
double alpha0 = i * (2 * Math.PI) / slices;
double alpha1 = (i + 1) * (2 * Math.PI) / slices;
float cosAlpha0 = (float) Math.cos(alpha0);
float sinAlpha0 = (float) Math.sin(alpha0);
float cosAlpha1 = (float) Math.cos(alpha1);
float sinAlpha1 = (float) Math.sin(alpha1);
for (int j = 0; j <= stacks; j++) {
double beta = j * Math.PI / stacks - Math.PI / 2;
float cosBeta = (float) Math.cos(beta);
float sinBeta = (float) Math.sin(beta);
Utils.setXYZ(vertexCoords, 6 * j,
radius * cosBeta * cosAlpha1,
radius * sinBeta,
radius * cosBeta * sinAlpha1);
Utils.setXYZ(vertexCoords, 6 * j + 3,
radius * cosBeta * cosAlpha0,
radius * sinBeta,
radius * cosBeta * sinAlpha0);
Utils.setXYZ(normalCoords, 6 * j,
cosBeta * cosAlpha1,
sinBeta,
cosBeta * sinAlpha1);
Utils.setXYZ(normalCoords, 6 * j + 3,
cosBeta * cosAlpha0,
sinBeta,
cosBeta * sinAlpha0);
Utils.setXY(textureCoords, 4 * j,
((float) (i + 1)) / slices,
((float) j) / stacks);
Utils.setXY(textureCoords, 4 * j + 2,
((float) i) / slices,
((float) j) / stacks);
}
slicesBuffers[i] = FloatBuffer.wrap(vertexCoords);
normalsBuffers[i] = FloatBuffer.wrap(normalCoords);
texCoordsBuffers[i] = FloatBuffer.wrap(textureCoords);
}
}
Thankyou

For the theory of sphere generation see:
en.wikipedia.org/wiki/Sphere (Vertex)
en.wikipedia.org/wiki/UV_mapping (Texture coordinate)
http://groups.google.com/group/android-developers/browse_thread/thread/0030261b82ed71e5/338fc1dcbfe6945f?lnk=raot(Normal surface)
Your code is right, it have a little bit issues, I have make some corrections:
public Sphere(int slices,int stacks, float radius, float H,float K,float Z, Bitmap image,Bitmap first,Bitmap second){
FloatBuffer[] slicesBuffers = new FloatBuffer[slices];
FloatBuffer[] normalsBuffers = new FloatBuffer[slices];
FloatBuffer[] texCoordsBuffers = new FloatBuffer[slices];
float[] total_vertexBuff;
float[] total_normalsBuff;
float[] total_textCoordsBuff;
int vertex_counter = 0;
int normals_counter = 0;
int texCoords_counter = 0;
int position_dst;
float tmp[];
for (int i = 0; i < slices; i++) {
float[] vertexCoords = new float[ 2 * 3 * (stacks + 1)];
float[] normalCoords = new float[ 2 * 3 *(stacks + 1)];
float[] textureCoords = new float[ 4 * (stacks + 1) ];
double alpha0 = i * (2 * Math.PI) / slices;
double alpha1 = (i + 1) * (2 * Math.PI) / slices;
float cosAlpha0 = (float) Math.cos(alpha0);
float sinAlpha0 = (float) Math.sin(alpha0);
float cosAlpha1 = (float) Math.cos(alpha1);
float sinAlpha1 = (float) Math.sin(alpha1);
for (int j = 0; j <= stacks; j++) {
double beta = j * Math.PI / stacks - Math.PI / 2;
float cosBeta = (float) Math.cos(beta);
float sinBeta = (float) Math.sin(beta);
setXYZ(vertexCoords, 6 * j, radius * cosBeta * cosAlpha1, radius * sinBeta, radius * cosBeta * sinAlpha1 );
setXYZ(vertexCoords, 6 * j + 3,radius * cosBeta * cosAlpha0,radius * sinBeta,radius * cosBeta * sinAlpha0);
vertex_counter += 2;
Log.d(TAG, "j:"+j);
setXYZ(normalCoords, 6 * j,cosBeta * cosAlpha1,sinBeta,cosBeta * sinAlpha1);
setXYZ(normalCoords, 6 * j + 3,cosBeta * cosAlpha0,sinBeta,cosBeta * sinAlpha0);
normals_counter += 2;
setXY(textureCoords, 4 * j,((float) (i + 1)) / slices,((float) j) / stacks);
setXY(textureCoords, 4 * j + 2,((float) i) / slices,((float) j) / stacks);
texCoords_counter += 2;
}
slicesBuffers[i] = FloatBuffer.wrap(vertexCoords);
normalsBuffers[i] = FloatBuffer.wrap(normalCoords);
texCoordsBuffers[i] = FloatBuffer.wrap(textureCoords);
}
total_vertexBuff = new float[vertex_counter * 3];
total_normalsBuff = new float[normals_counter * 3];
total_textCoordsBuff = new float[texCoords_counter * 2];
position_dst = 0;
// ricopio vertici
for (int i = 0; i < slicesBuffers.length; i++) {
for(int j = 0; j < slicesBuffers[i].capacity();j++,position_dst++)
total_vertexBuff[position_dst] = slicesBuffers[i].get(j);
}
position_dst = 0;
// ricopio normali
for (int i = 0; i < normalsBuffers.length; i++) {
for(int j = 0; j < normalsBuffers[i].capacity();j++,position_dst++)
total_normalsBuff[position_dst] = normalsBuffers[i].get(j);
}
position_dst = 0;
// ricopio coordinate texture
for (int i = 0; i < texCoordsBuffers.length; i++) {
for(int j = 0; j < texCoordsBuffers[i].capacity();j++,position_dst++)
total_textCoordsBuff[position_dst] = texCoordsBuffers[i].get(j);
}
this.image = image;
this.half_first = first;
this.half_second = second;
this.vertexBuffer = FloatBuffer.wrap(total_vertexBuff);
this.normalsBuffer = FloatBuffer.wrap(total_normalsBuff);
this.texCoordsBuffer = FloatBuffer.wrap(total_textCoordsBuff);
Log.d(TAG, "vertex_counter:"+vertex_counter);
Log.d(TAG, "texCoords_counter:"+texCoords_counter);
Log.d(TAG, "vertexBuffer:"+this.vertexBuffer.capacity());
Log.d(TAG, "texCoordsBuffer:"+this.texCoordsBuffer.capacity());
this.textures_ids = IntBuffer.allocate(2);
this.totalVertexCount = vertex_counter;
this.setPlaneBuffer();
return;
}
I really hope I help you.
Bye
pedr0

Related

interpolate a given array to be in new lenght

in order to interpolate 2 values, I can use
lerp(int a, int b) {
return (a + b) / 2;
}
Now imagine I've an array(1, 30, 100, 300) and I want to interpolate it to array in size N (N=10 for example).
If N == 7, then:
1,15,30,65,100,200,300
I've no idea how to interpolate 4 values to be 10. I need a method that looks like:
interpolate(fina int[] input, final int newSize) {
int[] res = new int[newSize];
...
return res;
}
that works even on my example above with newSize of 7, 10 or whatever.
Any idea how to implement it?
SOLVED.
public static double[] interpolate(double[] x, int newLength) {
double[] y = null;
if (newLength > 0) {
int N = x.length;
if (N == 1) {
y = new double[1];
y[0] = x[0];
return y;
} else if (newLength == 1) {
y = new double[1];
int ind = (int) Math.floor(N * 0.5 + 0.5);
ind = Math.max(1, ind);
ind = Math.min(ind, N);
y[0] = x[ind - 1];
return y;
} else {
y = new double[newLength];
double Beta = ((double) newLength) / N;
double newBeta = 1.0;
if (newLength > 2)
newBeta = (N - 2.0) / (newLength - 2.0);
y[0] = x[0];
y[1] = x[1];
y[newLength - 1] = x[N - 1];
double tmp, alpha;
int i, j;
for (i = 2; i <= newLength - 2; i++) {
tmp = 1.0 + (i - 1) * newBeta;
j = (int) Math.floor(tmp);
alpha = tmp - j;
y[i] = (1.0 - alpha) * x[Math.max(0, j)] + alpha * x[Math.min(N - 1, j + 1)];
}
}
}
return y;
}
/**
* Find the maximum of all elements in the array, ignoring elements that are NaN.
* #param data
* #return
*/
public static double max(double[] data) {
double max = Double.NaN;
for (int i = 0; i < data.length; i++) {
if (Double.isNaN(data[i]))
continue;
if (Double.isNaN(max) || data[i] > max)
max = data[i];
}
return max;
}
public static int max(int[] data) {
int max = data[0];
for (int i = 1; i < data.length; i++) {
if (data[i] > max)
max = data[i];
}
return max;
}

Provided data element number (0) should be multiple of the Mat channels count (1) Android OpenCV

I'm trying to find 4 corners from a rect object(paper sheet).
Mat source = new Mat();
Org.Opencv.Core.Point center;
public GetCorners(Bitmap _sourceImg)
{
Utils.BitmapToMat(_sourceImg, source);
}
//find corners
public void FindCorners()
{
center = new Org.Opencv.Core.Point(0, 0);
//Mat source = new Mat();
if (source == null)
{
Console.WriteLine("No IMG");
return;
}
Mat BlackWhite = new Mat();
Imgproc.CvtColor(source, BlackWhite, Imgproc. ColorBgr2gray); //ColorBgra2gray, 4);
Imgproc.Blur(BlackWhite, BlackWhite, new Size(3, 3));
Imgproc.Canny(BlackWhite, BlackWhite, 100, 100, 3, true);
Mat Lines = new Mat();
int treshold = 70;
int minLinsize = 30;
int lineGap = 10;
Imgproc.HoughLinesP(BlackWhite, Lines, 1, Math.PI / 180, treshold, minLinsize, lineGap);
for (int i = 0; i < Lines.Cols(); i++)
{
double[] Vector = Lines.Get(0, i);
double[] Value = new double[4];
Value[0] = 0;
Value[1] = ((float) Vector[1] - Vector[3]) / (Vector[0] - Vector[2]) * -Vector[0] + Vector[1];
Value[2] = source.Cols();
Value[3] = ((float)Vector[1] - Vector[3]) / (Vector[0] - Vector[2]) * (source.Cols() - Vector[2]) + Vector[3];
Lines.Put(0, i, Value);
}
Console.WriteLine("##Quantity {0} Founded##",Lines.Cols());
List<Org.Opencv.Core.Point> Corners = new List<Org.Opencv.Core.Point>();
for (int i = 0; i < Lines.Cols(); i++)
{
for (int j = 0 ; i < Lines.Cols(); j++)
{
Mat m1 = new Mat(),
m2 = new Mat();
double[] d1 = Lines.Get(0, i);
double[] d2 = Lines.Get(0, j);
m1.Put(0, j, d1);
m2.Put(0, j, d2);
try
{
//i'm getting exception here
Org.Opencv.Core.Point pt = ComputeInteresect(Lines.Get(0, i), Lines.Get(0, j)); //(m1, m2);
if (pt.X >= 0 && pt.Y >= 0)
{
Corners.Add(pt);
Console.WriteLine ("dobavleno {0} koordinat",Corners.Count);
}
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
}
}
}
and also i got a method that calculate intersects :
static Org.Opencv.Core.Point ComputeInteresect(double[] a, double[] b) //(Mat es,Mat es2)//
{
double x1 = a[0], y1 = a[1], x2 = a[2], y2 = a[3], x3 = b[0], y3 = b[1], x4 = b[2], y4 = b[3];
double denom = ((x1 - x2) * (y3 - y4)) - ((y1 - y2) * (x3 - x4));
Org.Opencv.Core.Point pt = new Org.Opencv.Core.Point();
if (denom != 0)
{
pt.X = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2)
* (x3 * y4 - y3 * x4))
/ denom;
pt.Y = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2)
* (x3 * y4 - y3 * x4))
/ denom;
return pt;
}
else
return new Org.Opencv.Core.Point(-1, -1);
}
And I don't understand why, but I'm getting this exception:
java.lang.UnsupportedOperationException:
Provided data element number (0)
should be multiple of the Mat channels
count (1)
I found,that this problem occurs, when an image is in RGB Format (and has 4 channels), but at first, I'm converting to gray (1 channel), via this method:
Imgproc.CvtColor(source, BlackWhite, Imgproc. ColorBgr2gray); //ColorBgra2gray, 4);
Any help will be appreciated, thanks!
Finally, i changed some part of code and its works:
Console.WriteLine("##Quantity {0} Founded##",Lines.Cols());
List<Org.Opencv.Core.Point> Corners = new List<Org.Opencv.Core.Point>();
for (int i = 0; i < Lines.Cols(); i++)
{
for (int j = i+1 ; i < Lines.Cols(); j++)
{
try
{
Org.Opencv.Core.Point pt = ComputeInteresect(Lines.Get(0, i), Lines.Get(0, j));
if (pt.X >= 0 && pt.Y >= 0)
{
Corners.Add(pt);
Console.WriteLine ("dobavleno {0} koordinat",Corners.Count);
}
}
catch (Exception ex)
{
Console.WriteLine(ex.Message);
}
}

mosaic bitmap in android

Hi I need to make mosaic effect in android.
Convert this:
To this:
How can i do this?
Thank you
/**
* 效果能实现,但是是个耗时的操作
*
* #param bmp
* #param precent 马赛克的程度(0-1)
* 300*300 precent=1 time=57ms
* #return
*/
public static Bitmap getMosaicsBitmap(Bitmap bmp, double precent) {
long start = System.currentTimeMillis();
int bmpW = bmp.getWidth();
int bmpH = bmp.getHeight();
Bitmap resultBmp = Bitmap.createBitmap(bmpW, bmpH, Bitmap.Config.ARGB_8888);
Canvas canvas = new Canvas(resultBmp);
Paint paint = new Paint();
double unit;
if (precent == 0) {
unit = bmpW;
} else {
unit = 1 / precent;
}
double resultBmpW = bmpW / unit;
double resultBmpH = bmpH / unit;
for (int i = 0; i < resultBmpH; i++) {
for (int j = 0; j < resultBmpW; j++) {
int pickPointX = (int) (unit * (j + 0.5));
int pickPointY = (int) (unit * (i + 0.5));
int color;
if (pickPointX >= bmpW || pickPointY >= bmpH) {
color = bmp.getPixel(bmpW / 2, bmpH / 2);
} else {
color = bmp.getPixel(pickPointX, pickPointY);
}
paint.setColor(color);
canvas.drawRect((int) (unit * j), (int) (unit * i), (int) (unit * (j + 1)), (int) (unit * (i + 1)), paint);
}
}
canvas.setBitmap(null);
long end = System.currentTimeMillis();
Log.v(TAG, "DrawTime:" + (end - start));
return resultBmp;
}
/**
* 和上面的函数同样的功能,效率远高于上面
*
* #param bmp
* #param precent
* #return
*/
public static Bitmap getMosaicsBitmaps(Bitmap bmp, double precent) {
long start = System.currentTimeMillis();
int bmpW = bmp.getWidth();
int bmpH = bmp.getHeight();
int[] pixels = new int[bmpH * bmpW];
bmp.getPixels(pixels, 0, bmpW, 0, 0, bmpW, bmpH);
int raw = (int) (bmpW * precent);
int unit;
if (raw == 0) {
unit = bmpW;
} else {
unit = bmpW / raw; //原来的unit*unit像素点合成一个,使用原左上角的值
}
if (unit >= bmpW || unit >= bmpH) {
return getMosaicsBitmap(bmp, precent);
}
for (int i = 0; i < bmpH; ) {
for (int j = 0; j < bmpW; ) {
int leftTopPoint = i * bmpW + j;
for (int k = 0; k < unit; k++) {
for (int m = 0; m < unit; m++) {
int point = (i + k) * bmpW + (j + m);
if (point < pixels.length) {
pixels[point] = pixels[leftTopPoint];
}
}
}
j += unit;
}
i += unit;
}
long end = System.currentTimeMillis();
Log.v(TAG, "DrawTime:" + (end - start));
return Bitmap.createBitmap(pixels, bmpW, bmpH, Bitmap.Config.ARGB_8888);
}
when you want change a bitmap to mosaic bitmap,just invoke function
getMosaicsBitmaps(bmp,0.1)

Construct spline with android.graphics.Path

I have array of 2d points and I need to create a Path that passes through all of the points. I think I should use Path.cubicTo() method which creates bezier curve between two points using specified control points. The problem is that I don't know control points of my curve. How do I calculate them?
Maybe there's a better way of doing this? Maybe there's some sort of library that could help me?
After I read this articles it became quite simple.
This is how you do it on android. After you run this code your path p will go through all points from knotsArr array.
Point[] knotsArr = {new Point(0, 0),
new Point(5, 5),
new Point(10, 0),
new Point(15, 5)};
Point[][] controlPoints = BezierSplineUtil.getCurveControlPoints(knotsArr);
Point[] firstCP = controlPoints[0];
Point[] secondCP = controlPoints[1];
Path p = new Path();
p.moveTo(knots.get(0).x, knots.get(0).y);
for (int i = 0; i < firstCP.length; i++) {
p.cubicTo(firstCP[i].x, firstCP[i].y,
secondCP[i].x, secondCP[i].y,
knots.get(i + 1).x, knots.get(i + 1).y);
}
BezierSplineUtil.java
public class BezierSplineUtil {
public static class Point {
public final float x;
public final float y;
public Point(float x, float y) {
this.x = x;
this.y = y;
}
}
/**
* Get open-ended bezier spline control points.
*
* #param knots bezier spline points.
* #return [2 x knots.length - 1] matrix. First row of the matrix = first
* control points. Second row of the matrix = second control points.
* #throws IllegalArgumentException if less than two knots are passed.
*/
public static Point[][] getCurveControlPoints(Point[] knots) {
if (knots == null || knots.length < 2) {
throw new IllegalArgumentException("At least two knot points are required");
}
final int n = knots.length - 1;
final Point[] firstControlPoints = new Point[n];
final Point[] secondControlPoints = new Point[n];
// Special case: bezier curve should be a straight line
if (n == 1) {
// 3P1 = 2P0 + P3
float x = (2 * knots[0].x + knots[1].x) / 3;
float y = (2 * knots[0].y + knots[1].y) / 3;
firstControlPoints[0] = new Point(x, y);
// P2 = 2P1 - P0
x = 2 * firstControlPoints[0].x - knots[0].x;
y = 2 * firstControlPoints[0].y - knots[0].y;
secondControlPoints[0] = new Point(x, y);
return new Point[][] { firstControlPoints, secondControlPoints };
}
// Calculate first bezier control points
// Right hand side vector
float[] rhs = new float[n];
// Set right hand side X values
for (int i = 1; i < n - 1; i++) {
rhs[i] = 4 * knots[i].x + 2 * knots[i + 1].x;
}
rhs[0] = knots[0].x + 2 * knots[1].x;
rhs[n - 1] = (8 * knots[n - 1].x + knots[n].x) / 2f;
// Get first control points X-values
float[] x = getFirstControlPoints(rhs);
// Set right hand side Y values
for (int i = 1; i < n - 1; i++) {
rhs[i] = 4 * knots[i].y + 2 * knots[i + 1].y;
}
rhs[0] = knots[0].y + 2 * knots[1].y;
rhs[n - 1] = (8 * knots[n - 1].y + knots[n].y) / 2f;
// Get first control points Y-values
float[] y = getFirstControlPoints(rhs);
for (int i = 0; i < n; i++) {
// First control point
firstControlPoints[i] = new Point(x[i], y[i]);
// Second control point
if (i < n - 1) {
float xx = 2 * knots[i + 1].x - x[i + 1];
float yy = 2 * knots[i + 1].y - y[i + 1];
secondControlPoints[i] = new Point(xx, yy);
} else {
float xx = (knots[n].x + x[n - 1]) / 2;
float yy = (knots[n].y + y[n - 1]) / 2;
secondControlPoints[i] = new Point(xx, yy);
}
}
return new Point[][] { firstControlPoints, secondControlPoints };
}
/**
* Solves a tridiagonal system for one of coordinates (x or y) of first
* bezier control points.
*
* #param rhs right hand side vector.
* #return Solution vector.
*/
private static float[] getFirstControlPoints(float[] rhs) {
int n = rhs.length;
float[] x = new float[n]; // Solution vector
float[] tmp = new float[n]; // Temp workspace
float b = 2.0f;
x[0] = rhs[0] / b;
// Decomposition and forward substitution
for (int i = 1; i < n; i++) {
tmp[i] = 1 / b;
b = (i < n - 1 ? 4.0f : 3.5f) - tmp[i];
x[i] = (rhs[i] - x[i - 1]) / b;
}
// Backsubstitution
for (int i = 1; i < n; i++) {
x[n - i - 1] -= tmp[n - i] * x[n - i];
}
return x;
}
}

Android: How to shift pitch of output sound (realtime)

I'm new in Android development. I'm looking for any method that applies pitch shifting to output sound (in real-time). But I couldn't find any point to start.
I've found this topic but I still don't know how can I apply this.
Any suggestions?
In general, the algorithm is called a phase vocoder -- searching for that on the Internets should get you started.
There are a few open source phase vocoders out there, you should be able to use those for reference too.
You can do phase vocoder in real-time -- the main component used is the FFT, so you'll need a fast FFT. The Android libraries can do this for you, see this documentation: http://developer.android.com/reference/android/media/audiofx/Visualizer.html
As it happens, I'm about to release an open source FFT for ARM that is faster than Apple's vDSP library (which was hitherto the fastest). I'll post back in a few days when I've uploaded it to github.com.
Good luck.
There is no built-in pitch shifting algorithm in the Android SDK. You have to code your own. Pitch shifting is a real hardcore DSP algorithm; good sounding algorithms are results of many months or rather years of development...
I personally do not know any Java implementation so I suggest you to adopt some of the free C++ PS algorithms, the best one - which I use in my audio applications, is SoundTouch:
http://www.surina.net/soundtouch/
I played with its code a little and it seems it would not be too much complicated to rewrite it in Java.
HOME URL: http://www.dspdimension.com
public class AudioPitch{
//region Private Static Memebers
private static int MAX_FRAME_LENGTH = 8192;
private static double M_PI = 3.14159265358979323846;
private static float[] gInFIFO = new float[MAX_FRAME_LENGTH];
private static float[] gOutFIFO = new float[MAX_FRAME_LENGTH];
private static float[] gFFTworksp = new float[2 * MAX_FRAME_LENGTH];
private static float[] gLastPhase = new float[MAX_FRAME_LENGTH / 2 + 1];
private static float[] gSumPhase = new float[MAX_FRAME_LENGTH / 2 + 1];
private static float[] gOutputAccum = new float[2 * MAX_FRAME_LENGTH];
private static float[] gAnaFreq = new float[MAX_FRAME_LENGTH];
private static float[] gAnaMagn = new float[MAX_FRAME_LENGTH];
private static float[] gSynFreq = new float[MAX_FRAME_LENGTH];
private static float[] gSynMagn = new float[MAX_FRAME_LENGTH];
private static long gRover;
//endregion
public static void PitchShift(float pitchShift, long numSampsToProcess, long fftFrameSize/*(long)2048*/, long osamp/*(long)10*/, float sampleRate, float[] indata)
{
double magn, phase, tmp, window, real, imag;
double freqPerBin, expct;
long i, k, qpd, index, inFifoLatency, stepSize, fftFrameSize2;
float[] outdata = indata;
/* set up some handy variables */
fftFrameSize2 = fftFrameSize / 2;
stepSize = fftFrameSize / osamp;
freqPerBin = sampleRate / (double)fftFrameSize;
expct = 2.0 * M_PI * (double)stepSize / (double)fftFrameSize;
inFifoLatency = fftFrameSize - stepSize;
if (gRover == 0) gRover = inFifoLatency;
/* main processing loop */
for (i = 0; i < numSampsToProcess; i++)
{
/* As long as we have not yet collected enough data just read in */
gInFIFO[(int) gRover] = indata[(int) i];
outdata[(int) i] = gOutFIFO[(int) (gRover - inFifoLatency)];
gRover++;
/* now we have enough data for processing */
if (gRover >= fftFrameSize)
{
gRover = inFifoLatency;
/* do windowing and re,im interleave */
for (k = 0; k < fftFrameSize; k++)
{
window = -.5 * Math.cos(2.0 * M_PI * (double)k / (double)fftFrameSize) + .5;
gFFTworksp[(int) (2 * k)] = (float)(gInFIFO[(int) k] * window);
gFFTworksp[(int) (2 * k + 1)] = 0.0F;
}
/* ***************** ANALYSIS ******************* */
/* do transform */
ShortTimeFourierTransform(gFFTworksp, fftFrameSize, -1);
/* this is the analysis step */
for (k = 0; k <= fftFrameSize2; k++)
{
/* de-interlace FFT buffer */
real = gFFTworksp[(int) (2 * k)];
imag = gFFTworksp[(int) (2 * k + 1)];
/* compute magnitude and phase */
magn = 2.0 * Math.sqrt(real * real + imag * imag);
phase = smbAtan2(imag, real);
/* compute phase difference */
tmp = phase - gLastPhase[(int) k];
gLastPhase[(int) k] = (float)phase;
/* subtract expected phase difference */
tmp -= (double)k * expct;
/* map delta phase into +/- Pi interval */
qpd = (long)(tmp / M_PI);
if (qpd >= 0) qpd += qpd & 1;
else qpd -= qpd & 1;
tmp -= M_PI * (double)qpd;
/* get deviation from bin frequency from the +/- Pi interval */
tmp = osamp * tmp / (2.0 * M_PI);
/* compute the k-th partials' true frequency */
tmp = (double)k * freqPerBin + tmp * freqPerBin;
/* store magnitude and true frequency in analysis arrays */
gAnaMagn[(int) k] = (float)magn;
gAnaFreq[(int) k] = (float)tmp;
}
/* ***************** PROCESSING ******************* */
/* this does the actual pitch shifting */
for (int zero = 0; zero < fftFrameSize; zero++)
{
gSynMagn[zero] = 0;
gSynFreq[zero] = 0;
}
for (k = 0; k <= fftFrameSize2; k++)
{
index = (long)(k * pitchShift);
if (index <= fftFrameSize2)
{
gSynMagn[(int) index] += gAnaMagn[(int) k];
gSynFreq[(int) index] = gAnaFreq[(int) k] * pitchShift;
}
}
/* ***************** SYNTHESIS ******************* */
/* this is the synthesis step */
for (k = 0; k <= fftFrameSize2; k++)
{
/* get magnitude and true frequency from synthesis arrays */
magn = gSynMagn[(int) k];
tmp = gSynFreq[(int) k];
/* subtract bin mid frequency */
tmp -= (double)k * freqPerBin;
/* get bin deviation from freq deviation */
tmp /= freqPerBin;
/* take osamp into account */
tmp = 2.0 * M_PI * tmp / osamp;
/* add the overlap phase advance back in */
tmp += (double)k * expct;
/* accumulate delta phase to get bin phase */
gSumPhase[(int) k] += (float)tmp;
phase = gSumPhase[(int) k];
/* get real and imag part and re-interleave */
gFFTworksp[(int) (2 * k)] = (float)(magn * Math.cos(phase));
gFFTworksp[(int) (2 * k + 1)] = (float)(magn * Math.sin(phase));
}
/* zero negative frequencies */
for (k = fftFrameSize + 2; k < 2 * fftFrameSize; k++) gFFTworksp[(int) k] = 0.0F;
/* do inverse transform */
ShortTimeFourierTransform(gFFTworksp, fftFrameSize, 1);
/* do windowing and add to output accumulator */
for (k = 0; k < fftFrameSize; k++)
{
window = -.5 * Math.cos(2.0 * M_PI * (double)k / (double)fftFrameSize) + .5;
gOutputAccum[(int) k] += (float)(2.0 * window * gFFTworksp[(int) (2 * k)] / (fftFrameSize2 * osamp));
}
for (k = 0; k < stepSize; k++) gOutFIFO[(int) k] = gOutputAccum[(int) k];
/* shift accumulator */
//memmove(gOutputAccum, gOutputAccum + stepSize, fftFrameSize * sizeof(float));
for (k = 0; k < fftFrameSize; k++)
{
gOutputAccum[(int) k] = gOutputAccum[(int) (k + stepSize)];
}
/* move input FIFO */
for (k = 0; k < inFifoLatency; k++) gInFIFO[(int) k] = gInFIFO[(int) (k + stepSize)];
}
}
}
//endregion
//region Private Static Methods
public static void ShortTimeFourierTransform(float[] fftBuffer, long fftFrameSize, long sign)
{
float wr, wi, arg, temp;
float tr, ti, ur, ui;
long i, bitm, j, le, le2, k;
for (i = 2; i < 2 * fftFrameSize - 2; i += 2)
{
for (bitm = 2, j = 0; bitm < 2 * fftFrameSize; bitm <<= 1)
{
if ((i & bitm) != 0) j++;
j <<= 1;
}
if (i < j)
{
temp = fftBuffer[(int) i];
fftBuffer[(int) i] = fftBuffer[(int) j];
fftBuffer[(int) j] = temp;
temp = fftBuffer[(int) (i + 1)];
fftBuffer[(int) (i + 1)] = fftBuffer[(int) (j + 1)];
fftBuffer[(int) (j + 1)] = temp;
}
}
long max = (long)(Math.log(fftFrameSize) / Math.log(2.0) + .5);
for (k = 0, le = 2; k < max; k++)
{
le <<= 1;
le2 = le >> 1;
ur = 1.0F;
ui = 0.0F;
arg = (float)M_PI / (le2 >> 1);
wr = (float)Math.cos(arg);
wi = (float)(sign * Math.sin(arg));
for (j = 0; j < le2; j += 2)
{
for (i = j; i < 2 * fftFrameSize; i += le)
{
tr = fftBuffer[(int) (i + le2)] * ur - fftBuffer[(int) (i + le2 + 1)] * ui;
ti = fftBuffer[(int) (i + le2)] * ui + fftBuffer[(int) (i + le2 + 1)] * ur;
fftBuffer[(int) (i + le2)] = fftBuffer[(int) i] - tr;
fftBuffer[(int) (i + le2 + 1)] = fftBuffer[(int) (i + 1)] - ti;
fftBuffer[(int) i] += tr;
fftBuffer[(int) (i + 1)] += ti;
}
tr = ur * wr - ui * wi;
ui = ur * wi + ui * wr;
ur = tr;
}
}
}
//endregion
private static double smbAtan2(double x, double y)
{
double signx;
if (x > 0.) signx = 1.;
else signx = -1.;
if (x == 0.) return 0.;
if (y == 0.) return signx * M_PI / 2.;
return Math.atan2(x, y);
}
}
this code working too but very consumption cpu usage.
pitchShift between 0.5 -2.0
call this class as below:
int maxValueOFShort = 32768;
short [] buffer = new short[800];
float[] inData = new float[buffer.length];
while (audiorackIsRun)
{
int m = recorder.read(buffer, 0, buffer.length);
for(int n=0; n<buffer.length;n++)
inData[n] = buffer[n]/(float)maxValueOFShort;
AudioPitch.PitchShift(1, buffer.length, 4096, 4, 44100, inData);
for(int n=0; n<buffer.length;n++)
buffer[n] = (short)(inData[n]*maxValueOFShort);
player.write(buffer, 0, buffer.length);
}

Categories

Resources