2D Position of marker - android

Hi there I'm using ARToolKit v6 to render marker based on nft jpg image it works very well but..
I need to get position of marker in screen(view) and then add a custom textView there is that possible ? how to get 2d position of marker based on projectionMatrix and modelViewMatrix ?
Or can I draw a text or image instead of Cube ?
code
#Override
public void draw() {
super.draw();
GLES20.glEnable(GLES20.GL_CULL_FACE);
GLES20.glEnable(GLES20.GL_DEPTH_TEST);
GLES20.glFrontFace(GLES20.GL_CCW);
boolean show = false;
for (int trackableUID : trackableUIDs) {
if (ARToolKit.getInstance().queryMarkerVisible(trackableUIDs.get(trackableUID))) {
float[] projectionMatrix = ARToolKit.getInstance().getProjectionMatrix();
float[] modelViewMatrix = ARToolKit.getInstance().queryMarkerTransformation(trackableUIDs.get(trackableUID));
Log.i("INFOOOOO", projectionMatrix.toString());
Log.i("INFOOOOO", modelViewMatrix.toString());
cube.draw(projectionMatrix, modelViewMatrix);
show = true;
}
}
}

#sturner thanks so much , I found a solution below is the code if someone is looking for solution
final float[] projectionMatrix = ARToolKit.getInstance().getProjectionMatrix();
final float[] modelViewMatrix = ARToolKit.getInstance().queryMarkerTransformation(trackableUIDs.get(trackableUID));
if (view == null) {
view = new int[4];
view[0] = 0;
view[1] = 0;
view[2] = ARTrackingActivity.width;
view[3] = ARTrackingActivity.height;
}
int i = GLU.gluProject(0, 0, 0, modelViewMatrix, 0, projectionMatrix, 0, view, 0, floats, 0);
if (i == GLES20.GL_TRUE) {
// draw the object in screen
}

Related

How do you make shading work between several 3D objects using rajawali?

I made a 3D environment with a few 3D objects using rajawali. I set up a directionnal light, everything displays, I added diffuse material to every object.
But I can not have the shadow of one object on another.
The objects have their own shadows on themselves depending on how they are oriented within the directionnal light but they doesnt seem to have shadows of other objects. I also tried with a spotlight but it didnt work.
What can I do ? Is there a feature to enable or something ?
Here is my code :
public class BasicRenderer extends Renderer {
private Sphere mEarthSphere;
private DirectionalLight mDirectionalLight;
private SpotLight mSpotLight;
private Object3D[][][] mCubes;
private Object3D mRootCube;
public BasicRenderer(Context context) {
super(context);
setFrameRate(60);
}
#Override
protected void initScene() {
getCurrentScene().setBackgroundColor(0, 0.5f, 1.0f, 1.0f);
getCurrentScene().setFog(new FogMaterialPlugin.FogParams(FogMaterialPlugin.FogType.LINEAR, 0x999999, 50, 100));
mSpotLight = new SpotLight();
mSpotLight.setPower(20);
mSpotLight.enableLookAt();
mSpotLight.setPosition(new Vector3(2, 1, 0));
mSpotLight.setLookAt(0,0,0);
//getCurrentScene().addLight(mSpotLight);
mDirectionalLight = new DirectionalLight(-1f, -2f, -1.0f);
mDirectionalLight.setColor(1.0f, 1.0f, 1.0f);
mDirectionalLight.setPower(1.5f);
getCurrentScene().addLight(mDirectionalLight);
SpecularMethod.Phong phongMethod = new SpecularMethod.Phong(0xeeeeee, 200);
Material material = new Material();
material.enableLighting(true);
material.setDiffuseMethod(new DiffuseMethod.Lambert());
//material.setSpecularMethod(phongMethod);
Texture earthTexture = new Texture("Earth", R.drawable.earthtruecolor_nasa_big);
NormalMapTexture earthNormal = new NormalMapTexture("earthNormal", R.drawable.earthtruecolor_nasa_big_n);
earthTexture.setInfluence(.5f);
try{
material.addTexture(earthTexture);
material.addTexture(earthNormal);
} catch (ATexture.TextureException error){
Log.d("BasicRenderer" + ".initScene", error.toString());
}
material.setColorInfluence(0);
mEarthSphere = new Sphere(1, 24, 24);
mEarthSphere.setMaterial(material);
getCurrentScene().addChild(mEarthSphere);
getCurrentCamera().setZ(4.2f);
mCubes = new Object3D[30][30][2];
Material cubeMaterial = new Material();
cubeMaterial.enableLighting(true);
cubeMaterial.setDiffuseMethod(new DiffuseMethod.Lambert(1));
//cubeMaterial.setSpecularMethod(phongMethod);
cubeMaterial.enableTime(true);
cubeMaterial.setColorInfluence(0);
Texture cubeTexture = new Texture("Stone", R.drawable.stone);
try{
cubeMaterial.addTexture(cubeTexture);
} catch (ATexture.TextureException error){
Log.d("BasicRenderer" + ".initScene", error.toString());
}
cubeMaterial.addPlugin(new DepthMaterialPlugin());
mRootCube = new Cube(1);
mRootCube.setMaterial(cubeMaterial);
mRootCube.setY(-1f);
// -- similar objects with the same material, optimize
mRootCube.setRenderChildrenAsBatch(true);
getCurrentScene().addChild(mRootCube);
mCubes[0][0][0] = mRootCube;
for(int z = 0; z < 2; z++) {
for(int y = 0; y < 5; y++) {
for (int x = 0; x < 30; x++) {
Object3D cube = mRootCube.clone(true);
cube.setY(-5 + y);
cube.setX(-15 + x);
cube.setZ(z);
mRootCube.addChild(cube);
mCubes[x][y][z] = cube;
}
}
}
Object3D cube = mRootCube.clone(true);
cube.setY(0);
cube.setX(-15 + 10);
cube.setZ(1);
mRootCube.addChild(cube);
mCubes[5][10][1] = cube;
getCurrentScene().addChild(mCubes[5][10][1]);
// -- create a chase camera
// the first parameter is the camera offset
// the second parameter is the interpolation factor
ChaseCamera chaseCamera = new ChaseCamera(new Vector3(0, 3, 16), mEarthSphere);
// -- tell the camera which object to chase
// -- set the far plane to 1000 so that we actually see the sky sphere
chaseCamera.setFarPlane(1000);
getCurrentScene().replaceAndSwitchCamera(chaseCamera, 0); //<--Also the only change!!!
}
#Override
public void onRender(final long elapsedTime, final double deltaTime) {
super.onRender(elapsedTime, deltaTime);
mEarthSphere.rotate(Vector3.Axis.Y, 1.0);
}
}
As you can see on the next picture, there is no shadow on the ground next to the cube on the left, but I think there should be.
https://i.imgur.com/qtl6mZf.jpg

Using player's face from camera as a sprite in unity

Is there a way to get a picture of the player and then cropping the face for a sprite or something?
Like the apps that add your face to a body, but the face would be stored in some way.
Following this question answer, you can retrieve the picture from the Android Camera and save it as a texture. Then you use a Texture2D containing your mask (let's say a circle on the middle with alpha around it) to check which byte you want to save and which byte will be transparent. Don't forget to set this Texture2D import properties to Advanced and check Read/Write enabled. Finally you can load the newly created picture as a sprite using either Unity WWW class or System.IO.ReadAllBytes.
Here is a quick implementation of this:
[SerializeField]
private Texture2D maskTexture;
private WebCamTexture webCamTexture;
protected void Start()
{
GetComponent<RawImage>().rectTransform.sizeDelta = new Vector2(maskTexture.width, maskTexture.height);
webCamTexture = new WebCamTexture();
GetComponent<Renderer>().material.mainTexture = webCamTexture;
webCamTexture.Play();
}
public void SavePicture()
{
StartCoroutine(SavePictureCoroutine());
}
private IEnumerator SavePictureCoroutine()
{
yield return new WaitForEndOfFrame();
RawImage rawImageToRead = GetComponent<RawImage>();
//This is to save the current RenderTexture: RenderTexture.GetTemporary() and RenderTexture.ReleaseTemporary() can also be used
RenderTexture previousRenderTexture = RenderTexture.active;
RenderTexture.active = rawImageToRead.texture as RenderTexture;
Texture2D renderedTexture = new Texture2D(maskTexture.width, maskTexture.height, TextureFormat.ARGB32, false);
renderedTexture.ReadPixels(new Rect(Screen.width * 0.5f - maskTexture.width * 0.5f, Screen.height * 0.5f - maskTexture.height * 0.5f, renderedTexture.width, renderedTexture.height), 0, 0);
renderedTexture.Apply();
RenderTexture.active = previousRenderTexture;
//----
for(int i = 0; i < renderedTexture.width; i++)
{
for(int j = 0; j < renderedTexture.height; j++)
{
if(maskTexture.GetPixel(i, j).a > 0.5f)
{
renderedTexture.SetPixel(i, j, renderedTexture.GetPixel(i, j));
}
else
{
renderedTexture.SetPixel(i, j, new Color(0.0f, 0.0f, 0.0f, 0.0f));
}
}
}
renderedTexture.Apply();
yield return renderedTexture;
File.WriteAllBytes(Path.Combine(Application.dataPath, "YOUR_FILE_NAME"), renderedTexture.EncodeToPNG());
}
(Warning: untested code)
Also keep in mind to try to provide what you already tried before asking questions: it helps other users understand what and how you want to achieve it.
Hope this helps,

how to get the coordinates of a contour

I am using OpenCV4Android version 2.4.11 and I am detecting any rectangular shapes in frames retrieved by camera. As shown in image_1 below, i draw a contour of a black color around the detected object, and what I am trying to do
is, to get all the coordinates of the drawn contour the one that is ONLY drawn in black. What I attempted is, as shown in code_1 below, i get the largest contour and the index of the largest contour and save them in the
"largestContour" and "largest_contour_index" respectively. Then, I draw the contour using
Imgproc.drawContours(mMatInputFrame, contours, largest_contour_index, new Scalar(0, 0, 0), 2, 8, hierachy, 0, new Point());
and then I pass points of the largest contour to the class FindCorners because i want to find the specific coordinates of the contour drawn in black as follows:
this.mFindCorners = new FindCorners(largestContour.toArray());
double[] cords = this.mFindCorners.getCords();
the following line of code:
double[] cords = this.mFindCorners.getCords();
should gives me the smalle x-coordinates, smallest y-coordinates, largest x-coordinates and largest y-coordinates. But when i draw a circle around the coordinates i got from "this.mFindCorners.getCords();" i got something like
in image_2 below, which is just the corners of the BoundingRect.
Actually i do not want any coordinates from the boundingRect i want to have access to the coordinates of the contour that is drawn around the detected object in balck
please let me know how to get the coordinates of the contour itself?
code_1:
if (contours.size() > 0) {
for (int i = 0; i < contours.size(); i++) {
contour2f = new MatOfPoint2f(contours.get(i).toArray());
approxDistance = Imgproc.arcLength(contour2f, true) * .01;//.02
approxCurve = new MatOfPoint2f();
Imgproc.approxPolyDP(contour2f, approxCurve, approxDistance, true);
points = new MatOfPoint(approxCurve.toArray());
double area = Math.abs(Imgproc.contourArea(points, true));
if (points.total() >= 4 && area >= 40000 && area <= 200000) {
if (area > largest_area) {
largest_area = area;
largest_contour_index = i;
pointsOfLargestContour = points;
largestContour = contours.get(i);
}
}
}
if (largest_area > 0) {
Imgproc.drawContours(mMatInputFrame, contours, largest_contour_index, new Scalar(0, 0, 0), 2, 8, hierachy, 0, new Point());
this.mFindCorners = new FindCorners(largestContour.toArray());
double[] cords = this.mFindCorners.getCords();
Core.circle(mMatInputFrame, new Point(cords[0], cords[1]), 10, new Scalar(255, 0, 0));
Core.circle(mMatInputFrame, new Point(cords[2], cords[3]), 10, new Scalar(255, 255, 0));
}
FindCorners:
public class FindCorners {
private final static String TAG = FragOpenCVCam.class.getSimpleName();
private ArrayList<Double> mlistXCords = null;
private ArrayList<Double> mlistYCords = null;
private double mSmallestX;
private double mSmallestY;
private double mLargestX;
private double mLargestY;
private double[] mCords = null;
public FindCorners(Point[] points) {
this.mlistXCords = new ArrayList<>();
this.mlistYCords = new ArrayList<>();
this.mCords = new double[4];
Log.d(TAG, "points.length: " + points.length);
for (int i = 0; i < points.length; i++) {
this.mlistXCords.add(points[i].x);
this.mlistYCords.add(points[i].y);
}
//ascending
Collections.sort(this.mlistXCords);
Collections.sort(this.mlistYCords);
this.mSmallestX = this.mlistXCords.get(0);
this.mSmallestY = this.mlistYCords.get(0);
this.mLargestX = this.mlistXCords.get(this.mlistXCords.size() - 1);
this.mLargestY = this.mlistYCords.get(this.mlistYCords.size() - 1);
this.mCords[0] = this.mSmallestX;
this.mCords[1] = this.mSmallestY;
this.mCords[2] = this.mLargestX;
this.mCords[3] = this.mLargestY;
}
public double[] getCords() {
return this.mCords;
}
}
image_1:
image_2:
update
i do not want to have the coordinates of the bounding rect, what i want to have is, the exact coordinates of the black contour. as shown in image_3 the coordinates i am getting from my code are where the red and yellow circles are..but i am looking for having access to the coordinates of the black line "contour" so i can draw some circles on it as shown in image_3. the spots in green are just to show you where i want to have coordinates.
image_3:
Your problem is that you have sorted x's and y's separately and it is obvious that your algorithm will find the red and yellow points.
I can suggest the following algorithm:
int min_x=INF, min_x_index, min_y=1000, min_y_index;
int max_x=-1, max_x_index, max_y=-1, max_y_index;
for (int i = 0; i < points.length; i++)
{
if(points[i].x < min_x) { min_x = points[i].x; min_x_index = i; }
if(points[i].x > max_x) { max_x = points[i].x; max_x_index = i; }
if(points[i].y < min_y) { min_y = points[i].y; min_y_index = i; }
if(points[i].y > max_y) { max_y = points[i].y; max_y_index = i; }
}
Point corner1(points[min_x_index]);
Point corner2(points[min_y_index]);
Point corner3(points[max_x_index]);
Point corner4(points[max_y_index]);

libdgx vector3 conversion wrong

Evening Everyone,
I am attempting to get familiar with libdgx and android by going thru the tutorial Here. All seems good except for grabbing the screen coordinates as they get skewed in a Vector3 conversion.
So x input of 101 gets converted to -796, y input of 968 converted to -429 (touching the upper left corner of the screen, same results from emulator as from my phone). When clicking the bottom right corner, the animation fires in the middle of the screen.
It all seems pretty basic so not really sure what I am setting incorrectly to get a skewed conversion. Any help is appreciated!
camera creation:
camera = new OrthographicCamera(Gdx.graphics.getWidth(), Gdx.graphics.getHeight());
camera.position.set(camera.viewportWidth * .5f, camera.viewportHeight * .5f, 0f);
Grabbing touch coord:
public boolean touchDown(int screenX, int screenY, int pointer, int button) {
touchCoordinateX = screenX;
touchCoordinateY = screenY;
stateTime = 0;
explosionHappening = true;
return true;
}
Render loop:
public void render () {
Gdx.gl.glClearColor(1, 0, 0, 1);
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT);
stateTime += Gdx.graphics.getDeltaTime();
batch.begin();
if (!explosionAnimation.isAnimationFinished(stateTime) && explosionHappening) {
Vector3 touchPoint = new Vector3();
touchPoint.set(touchCoordinateX,touchCoordinateY,0);
TextureRegion currentFrame = explosionAnimation.getKeyFrame(stateTime, false); // #16
camera.unproject(touchPoint);
batch.draw(currentFrame, touchPoint.x, touchPoint.y);
}
// batch.draw(img, 0, 0);
batch.end();
if (explosionAnimation.isAnimationFinished(stateTime)){explosionHappening = false;}
}
I think you forgot to set camera projection matrix to your SpriteBatch. Just add
batch.setProjectionMatrix(camera.combined);
before
batch.begin();

how to use mapPoints?I need draw points(coordinates) depending matrix movement

I have a custom imageView where i have a matrix and i can drag, zoom and now i can paint and delete points(coordinates)over it too.
The problem is: how can i move that points after zoom or drag the matrix
I have saved all the points in (Mark and Marking are custom clases with just 2 variables(x,y) float or int for save coordinates
static ArrayList <Marking> listaPtos = new ArrayList<Marking>();
static ArrayList <Mark> listaMarcas = new ArrayList<Mark>();
how i save the coordinates? using mapPoints?I try it but i dont know the correct formula
void saveCoordinates(float x, float y){
Log.i("guardaCoordenadas","de float");
Mark m = new Mark(x,y);
listaMarcas.add(m);
Log.i("marca",""+m.x+"-"+m.y);
Main.txtCont.setText(x+"-"+y);
guardaCoordenadas(lastTouchX,lastTouchY);
}
void guardaCoordenadas(int x, int y){
Log.i("guardaCoordenadas","de int");
Marking m = new Marking(x,y);
listaPtos.add(m);
Log.i("listaPtos0",""+m.x+"-"+m.y);
}
this is my onDraw
public void onDraw(Canvas c){
Log.d("onDraw","pinta="+pinta);
c.drawBitmap(bitmap, matrix, paintFondo);
c.drawPath(path,new Paint());
if(pinta){
Log.d("activando","clickPinta");
//this.setOnTouchListener(null);
this.setOnTouchListener(clickPinta);
}
else{
Log.d("activando","clickImagen");
//this.setOnTouchListener(null);
this.setOnTouchListener(clickImagen);
}
if(listaPtos!=null){
Log.i("pintando",listaPtos.size()+" puntos");
/*for(Marking mark:listaPtos){
c.drawBitmap(cruz, mark.x, mark.y, paintPuntos);
//c.drawCircle(mark.x, mark.y, 20, new Paint());
}*/
for(Mark mark:listaMarcas){
//c.drawBitmap(cruz, mark.x, mark.y, paintPuntos);
c.drawBitmap(cruz, mark.x, mark.y, new Paint());
}
}
}
and this is the method which i try use for move points:
void moveCoordenadas(float x, float y){
if (mode==DRAG){
for (int i=0; i<listaMarcas.size();i++){
Mark mark = listaMarcas.get(i);
float [] coor = new float[2];
coor[0]=mark.x;
coor[1]=mark.y;
matrix.mapPoints(coor);
mark.x=coor[0];
mark.y=coor[1];
listaMarcas.set(i, mark);
}
}
if (mode==ZOOM){
}
}
Im trying just with drag at first because is easier
thx for your help and your time, if you need more code, just say it
If somebody have the same problem, I solve it with matrix.mapPoints()
(mapPoints convert absolute coordinates to relatives)
Here the code:
void moveCoordinates(){
for (int i=0; i<listaPtos.size();i++){
Marking pto = listaPtos.get(i);//absolutes coordinates
Mark mark = listaMarcas.get(i);//relatives coordinates
float[]coor = new float[2];
coor[0]=pto.x;
coor[1]=pto.y;
matrix.mapPoints(coor);
mark.setX(coor[0]);
mark.setY(coor[1]);
listaMarcas.set(i, mark);
}
}

Categories

Resources