How to get textures applied on mobile devices (android 6.0.1) - android

I've got this webgl application (360 panorama viewer), that wraps an equirectangular image over a sphere, positioning the camera at the center of that sphere to simulate a real world illusion.
It works on desktop browsers (chrome, ff) but does not on android 6.0.1 google chrome (the result is a black screen) and does work on device's built in browser.
However, without applying a texture things work correctly everywhere and I can see a simple primitives (spheres, cubes, pyramids etc).
the part of the code that creates such view:
function load360View(r, center, lats, longs, textureImage) {
let vertexBuffer = gl.createBuffer(),
uvBuffer = gl.createBuffer(),
indexBuffer = gl.createBuffer(),
texture = gl.createTexture(),
vertices = [],
texCoords = [],
indices = [];
let theta, phi, x, y, z;
for (let i = 0; i <= lats; i++) {
theta = i * Math.PI/lats;
for (let j = 0; j <= longs; j++) {
phi = j * 2 * Math.PI/longs;
x = Math.sin(theta) * Math.cos(phi);
y = Math.cos(theta);
z = Math.sin(theta) * Math.sin(phi);
vertices.push(r * x + center.x);
vertices.push(r * y + center.y);
vertices.push(r * z + center.z);
texCoords.push( (j / longs) );
texCoords.push( (i / lats) );
}
}
for (let i = 0; i < lats; i++) {
for (let j = 0; j < longs; j++) {
let first = (i * (longs + 1)) + j;
let second = first + longs + 1;
indices.push(first);
indices.push(second);
indices.push(first + 1);
indices.push(second);
indices.push(second + 1);
indices.push(first + 1);
}
}
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indexBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, new Uint16Array(indices), gl.STATIC_DRAW);
gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer);
gl.enableVertexAttribArray(vPositionLoc);
gl.vertexAttribPointer(vPositionLoc, 3, gl.FLOAT, false, 0, 0);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(vertices), gl.STATIC_DRAW);
gl.bindBuffer(gl.ARRAY_BUFFER, uvBuffer);
gl.enableVertexAttribArray(vtexCoordLoc);
gl.vertexAttribPointer(vtexCoordLoc, 2, gl.FLOAT, false, 0, 0);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(texCoords), gl.STATIC_DRAW);
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, textureImage);
if (isPowerOf2(textureImage.width) && isPowerOf2(textureImage.height)) {
gl.generateMipmap(gl.TEXTURE_2D);
} else {
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
}
return {
vertexBuffer: vertexBuffer,
indexBuffer: indexBuffer,
uvBuffer: uvBuffer,
indices: indices,
texture : texture
}
}
Is this a bug of android's chrome browser or its related to the actually application which would mean that something should be changed ?
p.s.
I Also get this error on firefox desktop (not nightly build):
Error: WebGL: texImage2D: Chosen format/type incured an expensive reformat: 0x1908/0x1401

Related

How to use OpenGL to draw a text over a Android MediaPlayer?

I have a cpp code implementing a media player behavior on Android.
I'm using the media player for playing a mp4 file however, I need to draw text above this.
For testing purposes, I've already tried to do as drawText() function from BootAnimation.cpp however without success.
I'm guessing there is some OpenGL calls I'm missing. Is there some call to be added inside drawText() for it to draw above the mp4?
void BootAnimation::drawText(const char* str, const Font& font, bool bold, int* x, int* y) {
glEnable(GL_BLEND); // Allow us to draw on top of the animation
glBindTexture(GL_TEXTURE_2D, font.texture.name);
const int len = strlen(str);
const int strWidth = font.char_width * len;
if (*x == TEXT_CENTER_VALUE) {
*x = (mWidth - strWidth) / 2;
} else if (*x < 0) {
*x = mWidth + *x - strWidth;
}
if (*y == TEXT_CENTER_VALUE) {
*y = (mHeight - font.char_height) / 2;
} else if (*y < 0) {
*y = mHeight + *y - font.char_height;
}
int cropRect[4] = { 0, 0, font.char_width, -font.char_height };
for (int i = 0; i < len; i++) {
char c = str[i];
if (c < FONT_BEGIN_CHAR || c > FONT_END_CHAR) {
c = '?';
}
// Crop the texture to only the pixels in the current glyph
const int charPos = (c - FONT_BEGIN_CHAR); // Position in the list of valid characters
const int row = charPos / FONT_NUM_COLS;
const int col = charPos % FONT_NUM_COLS;
cropRect[0] = col * font.char_width; // Left of column
cropRect[1] = row * font.char_height * 2; // Top of row
// Move down to bottom of regular (one char_heigh) or bold (two char_heigh) line
cropRect[1] += bold ? 2 * font.char_height : font.char_height;
glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_CROP_RECT_OES, cropRect);
glDrawTexiOES(*x, *y, 0, font.char_width, font.char_height);
*x += font.char_width;
}
glDisable(GL_BLEND); // Return to the animation's default behaviour
glBindTexture(GL_TEXTURE_2D, 0);
}
PS: this is no android app, so it won't be done in app layer.
The Bootanimation.cpp use of OpenGL ES changed a bit and now it's using a more modern way to deal with graphics.
That being said, I found that my case would need a some abstraction as done here. Basic OpenGL manipulation, as use of common vertex and fragment shaders (position and color, really nothing different from fundamentals) and VBO/VAO for data buffering and glDrawArrays is enough for my usage.
I still need to understand and apply some texture and understand the best way (in my scenario) for manipulate text, however I think that is the all.

Vector2 is off - libgdx/java

game.batch.begin();
for (Array obstacle_array123: obstacle_array) {
body = obstacle_array123;
for (Body bodies: body) {
if (bodies.getUserData() instanceof Array && bodies.isActive()) {
sprites_array = (Array)bodies.getUserData();
for (int fix_pos = 0; fix_pos < sprites_array.size; fix_pos++) {
sprite = sprites_array.get(fix_pos);
if (verts.size != 0) verts.removeRange(0, verts.size - 1);
f = bodies.getFixtureList().get(fix_pos);
s = (PolygonShape)f.getShape();
transform = bodies.getTransform();
for (int i = 0; i < s.getVertexCount(); i++)
{
s.getVertex(i, tmp);
transform.mul(tmp);
verts.add(new Vector2(tmp));
}
rotation_point.set((verts.get(0).x + verts.get(1).x + verts.get(2).x + verts.get(3).x) / 4, (verts.get(0).y + verts.get(1).y + verts.get(2).y + verts.get(3).y) / 4);
sprite.setPosition(rotation_point.x - sprite.getWidth() / 2, rotation_point.y - sprite.getHeight() / 2);
sprite.setRotation(bodies.getAngle() * MathUtils.radiansToDegrees);
sprite.draw(game.batch);
}
}
}
}
game.batch.end();
I have a game where my bodies are made from multiple square fixtures, so this is the code to render each square sprite on each square fixture.
2 problems - 1.st --> it only renders the first sprite in the array
2.nd --> if you look at the following loop (SOLVED)
for (int i = 0; i < s.getVertexCount(); i++)
{
s.getVertex(i, tmp);
transform.mul(tmp);
verts.add(new Vector2(tmp));
}
well it is apperantly different compared to
for (int i = 0; i < s.getVertexCount(); i++)
{
s.getVertex(i, tmp);
transform.mul(tmp);
verts.add(tmp);
}
The spawned coordinates in 2nd example are wrong for half width and half height of the square.
When I try to get the coordinated from both examples the numbers are the same, but when setting the sprite position, 2nd example goes off.
You should probably ask both questions separately, but to answer your second question, then they ARE different.
In the first one you add a new Vector2 to verts each time through the loop. So verts will end up holding a load of different Vector2's.
In the second one you add the same Vector2 to verts over and over again, so it will just have one Vector2 with the same value over and over again (remember Java is pass by reference).
Caveat - My answer assumes that verts is some sort of standard collection or a libgdx Array.

creating a digital filter in android

I'm trying to deal with ECG signal processing in android. I want to implement simple digital filters (lowpass, highpass)
I've got a transfer function:
here is what i've found:
wikipedia - lowpass filter - it looks quite easy here.
for i from 1 to n
y[i] := y[i-1] + α * (x[i] - y[i-1])
but there is nothing about transfer function which I want to use.
I also found the following matlab code
%% Low Pass Filter H(z) = (1 - 2z^(-6) + z^(-12)) / (1 - 2z^(-1) + z^(-2))
b = [1 0 0 0 0 0 -2 0 0 0 0 0 1];
a = [1 -2 1];
h_l = filter(b,a,[1 zeros(1,12)]);
ecg_l = conv (ecg ,h_l);
but there is no function like filter and conv in java (or I missed something).
Also I was looking on stackoverflow for an answer. But I didn't found anything about transfer function.
so can someone help me? I just want to move on with my project.
Given a time-domain recurrence equation (such as the one you quoted from wikipedia), the corresponding transfer function in the z-domain can relatively easily be obtained by using the following properties:
Where X(z) and Y(z) are the z-transforms of the time-domain input sequence x and output sequence y respectively.
Going the other way around, given a transfer function which can be expressed as a ratio of polynomials in z, such as:
the recurrence equation of the transfer function can be written as:
There are of course many different ways to implement such a recurrence equation, but a simple filter implementation following the Direct Form II would be along the line of:
// Implementation of an Infinite Impulse Response (IIR) filter
// with recurrence equation:
// y[n] = -\sum_{i=1}^M a_i y[n-i] + \sum_{i=0}^N b_i x[n-i]
public class IIRFilter {
public IIRFilter(float a_[], float b_[]) {
// initialize memory elements
int N = Math.max(a_.length, b_.length);
memory = new float[N-1];
for (int i = 0; i < memory.length; i++) {
memory[i] = 0.0f;
}
// copy filter coefficients
a = new float[N];
int i = 0;
for (; i < a_.length; i++) {
a[i] = a_[i];
}
for (; i < N; i++) {
a[i] = 0.0f;
}
b = new float[N];
i = 0;
for (; i < b_.length; i++) {
b[i] = b_[i];
}
for (; i < N; i++) {
b[i] = 0.0f;
}
}
// Filter samples from input buffer, and store result in output buffer.
// Implementation based on Direct Form II.
// Works similar to matlab's "output = filter(b,a,input)" command
public void process(float input[], float output[]) {
for (int i = 0; i < input.length; i++) {
float in = input[i];
float out = 0.0f;
for (int j = memory.length-1; j >= 0; j--) {
in -= a[j+1] * memory[j];
out += b[j+1] * memory[j];
}
out += b[0] * in;
output[i] = out;
// shift memory
for (int j = memory.length-1; j > 0; j--) {
memory[j] = memory[j - 1];
}
memory[0] = in;
}
}
private float[] a;
private float[] b;
private float[] memory;
}
which you could use to implement your specific transfer function like so:
float g = 1.0f/32.0f; // overall filter gain
float[] a = {1, -2, 1};
float[] b = {g, 0, 0, 0, 0, 0, -2*g, 0, 0, 0, 0, 0, g};
IIRFilter filter = new IIRFilter(a, b);
filter.process(input, output);
Note that you can alternatively also factorize the numerator and denominator into 2nd order polynomials and obtain a cascade of 2nd order filters (known as biquad filters).

How to extract lines from each contour in OpenCV for Android?

I'd like to examine each Canny detected edge and look for the main lines in it (to check if they seem to shape a rectangle, for example if 2 pairs of lines are parallel etc.).
Imgproc.HoughLinesP does what I want, but it gives the lines from the whole image, and I want to know which lines come from the same edges.
I tried also FindContours, and looking for main lines in each contour with approxPolyDP, but this doesn't look adapted because there are often gaps in Canny detected edges. This gives contours of the edges and not the edges themselves.
Here is a test image example :
How can I get a set of lines for each shape ?
Based on Miki's answer, here is what I've done :
Canny
HoughLinesP (or LineSegmentDetector, as you want) : to detect lines
ConnectedComponents : to find Canny "contours" in the Canny image.
Dilate with a 3x3 kernel (see below)
For each Hough line : take a few pixels from the line and look for the most frequent value (ignore 0's).
For example, I chose {p1 , 0.75*p1 + 0.25*p2, 0.5*p1 + 0.5*p2, 0.25*p1 + 0.75*p2, p2}, so if my values are {1,2,0,2,2} then the line belongs to the connectedComponent number 2.
Dilating is to be sure you didn't miss a contour by only 1 pixel (but don't use it if your objects are too close).
This allows to "tag" HoughLines with the color of the contour they belong to.
All of these functions can be found in Imgproc module, this works in OpenCV 3.0 only and gives the desired result.
Here is a code :
// open image
File root = Environment.getExternalStorageDirectory();
File file = new File(root, "image_test.png");
Mat mRGBA = Imgcodecs.imread(file.getAbsolutePath());
Imgproc.cvtColor(mRGBA, mRGBA, Imgproc.COLOR_BGR2RGB);
Mat mGray = new Mat();
Imgproc.cvtColor(mRGBA, mGray, Imgproc.COLOR_RGBA2GRAY);
Imgproc.medianBlur(mGray, mGray, 7);
/* Main part */
Imgproc.Canny(mGray, mGray, 50, 60, 3, true);
Mat aretes = new Mat();
Imgproc.HoughLinesP(mGray, aretes, 1, 0.01745329251, 30, 10, 4);
/**
* Tag Canny edges in the gray picture with indexes from 1 to 65535 (0 = background)
* (Make sure there are less than 255 components or convert mGray to 16U before)
*/
int nb = Imgproc.connectedComponents(mGray,mGray,8,CvType.CV_16U);
Imgproc.dilate(mGray, mGray, Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3,3)));
// for each Hough line
for (int x = 0; x < aretes.rows(); x++) {
double[] vec = aretes.get(x, 0);
double x1 = vec[0],
y1 = vec[1],
x2 = vec[2],
y2 = vec[3];
/**
* Take 5 points from the line
*
* x----x----x----x----x
* P1 P2
*/
double[] pixel_values = new double[5];
pixel_values[0] = mGray.get((int) y1, (int) x1)[0];
pixel_values[1] = mGray.get((int) (y1*0.75 + y2*0.25), (int) (x1*0.75 + x2*0.25))[0];
pixel_values[2] = mGray.get((int) ((y1 + y2) *0.5), (int) ((x1 + x2) *0.5))[0];
pixel_values[3] = mGray.get((int) (y1*0.25 + y2*0.75), (int) (x1*0.25 + x2*0.75))[0];
pixel_values[4] = mGray.get((int) y2, (int) x2)[0];
/**
* Look for the most frequent value
* (To make it readable, the following code accepts the line only if there are at
* least 3 good pixels)
*/
double value;
Arrays.sort(pixel_values);
if (pixel_values[1] == pixel_values[3] || pixel_values[0] == pixel_values[2] || pixel_values[2] == pixel_values[4]) {
value = pixel_values[2];
}
else {
value = 0;
}
/**
* Now value is the index of the connected component (or 0 if it's a bad line)
* You can store it in an other array, here I'll just draw the line with the value
*/
if (value != 0) {
Imgproc.line(mRGBA,new Point(x1,y1),new Point(x2,y2),new Scalar((value * 41 + 50) % 255, (value * 69 + 100) % 255, (value * 91 + 60) % 255),3);
}
}
Imgproc.cvtColor(mRGBA, mRGBA, Imgproc.COLOR_RGB2BGR);
File file2 = new File(root, "image_test_OUT.png");
Imgcodecs.imwrite(file2.getAbsolutePath(), mRGBA);
If you're using OpenCV 3.0.0 you can use LineSegmentDetector, and "AND" your detected lines with the contours.
I provide a sample code below. It's C++ (sorry about that), but you can easily translate in Java. At least you see how to use LineSegmentDetector and how extract common lines for each contour. You'll see the lines on the same contour with the same color.
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
int main()
{
RNG rng(12345);
Mat3b img = imread("path_to_image");
Mat1b gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
Mat3b result;
cvtColor(gray, result, COLOR_GRAY2BGR);
// Detect lines
Ptr<LineSegmentDetector> detector = createLineSegmentDetector();
vector<Vec4i> lines;
detector->detect(gray, lines);
// Draw lines
Mat1b lineMask(gray.size(), uchar(0));
for (int i = 0; i < lines.size(); ++i)
{
line(lineMask, Point(lines[i][0], lines[i][1]), Point(lines[i][2], lines[i][3]), Scalar(255), 2);
}
// Compute edges
Mat1b edges;
Canny(gray, edges, 200, 400);
// Find contours
vector<vector<Point>> contours;
findContours(edges.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
for (int i = 0; i < contours.size(); ++i)
{
// Draw each contour
Mat1b contourMask(gray.size(), uchar(0));
drawContours(contourMask, contours, i, Scalar(255), 2); // Better use 1 here. 2 is just for visualization purposes
// AND the contour and the lines
Mat1b bor;
bitwise_and(contourMask, lineMask, bor);
// Draw the common pixels with a random color
vector<Point> common;
findNonZero(bor, common);
Vec3b color = Vec3b(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
for (int j = 0; j < common.size(); ++j)
{
result(common[j]) = color;
}
}
imshow("result", result);
waitKey();
return 0;
}

draw lots of lines with android drawLines

when i draw a lot of lines, the apps takes a long time to finish drawing. My goal is to connecting of the points in a1[] to form a line. Is there is faster way of doing this? please help!
> $ for (int i = 0; i < x.length - 1; i++) {
> _canvas.drawLine(a1[i].x, a1[i].y, a1[i + 1].x, a1[i + 1].y,_paint);}
Use draw lines. Pack the points into a float[] with 2 points for each point in the line then do this:
if (count >= 4) {
if ((count & 2) != 0) {
canvas.drawLines(pointlist, 0, count-2, linePaint);
canvas.drawLines(pointlist, 2, count-2, linePaint);
}
else {
canvas.drawLines(pointlist, 0, count, linePaint);
canvas.drawLines(pointlist, 2, count - 4, linePaint);
}
}
Where count is the number usable length in the float[] of points. Drawlines goes by 4 floats per segment, but if you stagger them like that you get the result you want without needing to waste 2x the memory and you can effectively move the points if need be.
Use Canvas.drawLines(float[] pts, Paint paint);
Using Canvas.drawLines instead of Canvas.drawLine i have halved the time of drawing.
I have to draw 12 lines with 5000 points for line and the time of drawing with this code is 2393 milliseconds instead of 6000 milliseconds using Canvas.drawLine() method.
int lineIndex = 0;
float[] lines = new float[a1.length * 4];
for (int i = 0; i < a1.length-1; i++) // -1 to deal with last point
{
lines[lineIndex++] = a1[i].x;
lines[lineIndex++] = a1[i].y;
lines[lineIndex++] = a1[i + 1].x
lines[lineIndex++] = a1[i + 1].y;
}
_canvas.drawLines(lines, _paint);
Try creating a Path first, then calling _canvas.drawPath():
Path p = new Path();
p.moveTo(a1[0].x, a1[0].y);
for (int i = 1; i < x.length; i++) {
p.lineTo(a1[i].x, a1[i].y);
}
_canvas.drawPath(p, _paint);

Categories

Resources