Related
So I have looked absolutely everywhere and can't find the anser. When I draw a 3d cube in opengl es for android it seems to look fine as long as I'm in portrait mode but when I switch to landscape it looks more like a diamond. I assume the issue is with the ratio but I really cant say so here's the code i'm using for the ratios.
public void onSurfaceChanged(GL10 gl, int width, int height){
gl.glViewport(0, 0, width, height);
float ratio = (float) width/height;
gl.glMatrixMode(GL10.GL_PROJECTION);
gl.glLoadIdentity();
gl.glFrustumf(-ratio, ratio, -1, 1, 1, 11);
}
The other option that may be the issue I believe is the cube its self but again not sure so here's the vertices and indices.
private int vertices[] = {
-one, -one, -one,
one, -one, -one,
one, one, -one,
-one, one, -one,
-one, -one, one,
one, -one, one,
one, one, one,
-one, one, one
};
private byte indices[] = {
0, 4, 5, 0, 5, 1,
1, 5, 6, 1, 6, 2,
2, 6, 7, 2, 7, 3,
3, 7, 4, 3, 4, 0,
4, 7, 6, 4, 6, 5,
3, 0, 1, 3, 1, 2
};
Alright so I guess I'm gonna answer my own question here and for all who need it. I used this ratio code instead under the onSurfaceChanged method.
double w = width/4.5;
gl.glViewport(0, (int)-w, width, width);
float ratio = (float) width/width;
gl.glMatrixMode(GL10.GL_PROJECTION);
gl.glLoadIdentity();
gl.glFrustumf(-ratio, ratio, -1, 1, 1, 14);
I am trying to create an iOS/Android demo app that shows Xamarin OpenTK / OpenGl rendering some cubes on the screen. Everything works in iOS but the same code in Android just crashes. This is completely shared code based off some of their demo code.
I get no information about the crash.
using OpenTK.Graphics.ES20;
using Xamarin.Forms;
using OpenTK.Graphics;
using OpenTK;
using System.Reflection;
using System.IO;
using System.Diagnostics;
using System;
namespace GLDemo
{
public class App
{
public static Page GetMainPage ()
{
return new OpenGLPage ();
}
}
public class OpenGLPage : ContentPage
{
uint positionSlot;
uint colorSlot;
uint projectionSlot;
uint modelViewSlot;
uint colorRenderBuffer;
uint depthBuffer;
// cube verticies
Vector3[] Verticies = new Vector3[] {
new Vector3 (1.0f, -1.0f, 1.0f),
new Vector3 (1.0f, 1.0f, 1.0f),
new Vector3 (-1.0f, 1.0f, 1.0f),
new Vector3(-1.0f, -1.0f, 1.0f),
new Vector3(1.0f, -1.0f, -1.0f),
new Vector3(1.0f, 1.0f, -1.0f),
new Vector3(-1.0f, 1.0f, -1.0f),
new Vector3(-1.0f, -1.0f, -1.0f)};
Vector4[] Colors = new Vector4[]{
new Vector4(0.0f, 0.0f, 0.0f, 1.0f),
new Vector4(0.0f, 0.0f, 1.0f, 1.0f),
new Vector4(0.0f, 1.0f, 0.0f, 1.0f),
new Vector4(0.0f, 1.0f, 1.0f, 1.0f),
new Vector4(1.0f, 0.0f, 0.0f, 1.0f),
new Vector4(1.0f, 0.0f, 1.0f, 1.0f),
new Vector4(1.0f, 1.0f, 0.0f, 1.0f),
new Vector4(1.0f, 1.0f, 1.0f, 1.0f)};
byte[] Indices = new byte []
{ 0, 1, 2,
2, 3, 0,
4, 6, 5,
4, 7, 6,
2, 7, 3,
7, 6, 2,
0, 4, 1,
4, 1, 5,
6, 2, 1,
1, 6, 5,
0, 3, 7,
0, 7, 4};
public OpenGLPage ()
{
Title = "OpenGL";
var view = new OpenGLView { HasRenderLoop = true };
var toggle = new Xamarin.Forms.Switch { IsToggled = true };
var button = new Button { Text = "Display" };
view.HeightRequest = 300;
view.WidthRequest = 300;
bool initialize = false;
float rotation = 0.0f;
float translation = 0.0f;
bool goingRight = true;
view.OnDisplay = r => {
if(!initialize){
SetupDepthBuffer ();
SetupRenderBuffers ();
SetupFrameBuffer ();
CompileShaders ();
SetupVBOs ();
initialize = true;
}
GL.ClearColor(0.0f,0.0f,0.0f,1.0f);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
GL.Enable(EnableCap.DepthTest);
rotation += 0.02f;
if(goingRight){
translation += 0.01f;
if(translation > 1.0f){
goingRight = false;
}
} else{
translation -= 0.01f;
if(translation < -1.0f){
goingRight = true;
}
}
for(int i = 0; i < 3; i++){
float h = 4.0f * (float)view.Height / (float)view.Width;
Matrix4[] projection = new Matrix4[]{ Matrix4.CreatePerspectiveOffCenter(-2, 2, -h/2, h/2, 4, 10) };
GL.UniformMatrix4 ((int)projectionSlot, false, ref projection[0]);
Matrix4[] modelView = new Matrix4[]{ Matrix4.CreateRotationX(rotation) * Matrix4.CreateRotationY(rotation) * Matrix4.CreateRotationZ(rotation) * Matrix4.CreateTranslation (translation - i * 3.5f + 4.0f, (float)(-1^i) * translation, -7 + translation) };
GL.UniformMatrix4 ((int)modelViewSlot, false, ref modelView [0]);
GL.DrawElements(BeginMode.Triangles, Indices.Length, DrawElementsType.UnsignedByte, 0);
}
};
toggle.Toggled += (s, a) => {
view.HasRenderLoop = toggle.IsToggled;
};
button.Clicked += (s, a) => view.Display ();
var stack = new StackLayout {
Padding = new Size (20, 20),
Children = {view, toggle, button}
};
Content = stack;
}
void SetupRenderBuffers(){
GL.GenRenderbuffers (1, out colorRenderBuffer);
GL.BindBuffer (BufferTarget.ArrayBuffer, colorRenderBuffer);
GL.RenderbufferStorage (RenderbufferTarget.Renderbuffer, RenderbufferInternalFormat.Rgba4, 300, 300);
}
void SetupFrameBuffer(){
uint frameBuffer;
GL.GenFramebuffers (1, out frameBuffer);
GL.BindFramebuffer (FramebufferTarget.Framebuffer, frameBuffer);
GL.FramebufferRenderbuffer (FramebufferTarget.Framebuffer, FramebufferSlot.ColorAttachment0,
RenderbufferTarget.Renderbuffer, colorRenderBuffer);
GL.FramebufferRenderbuffer (FramebufferTarget.Framebuffer, FramebufferSlot.DepthAttachment,
RenderbufferTarget.Renderbuffer, depthBuffer);
}
void SetupDepthBuffer(){
GL.GenRenderbuffers (1, out depthBuffer);
GL.BindRenderbuffer (RenderbufferTarget.Renderbuffer, depthBuffer);
GL.RenderbufferStorage (RenderbufferTarget.Renderbuffer, RenderbufferInternalFormat.DepthComponent16, 300, 300);
}
uint CompileShader(string shaderName, ShaderType shaderType){
string prefix;
#if __IOS__
prefix = "GLDemo.iOS.";
#endif
#if __ANDROID__
prefix = "GLDemo.Android.";
#endif
var assembly = typeof(App).GetTypeInfo ().Assembly;
foreach (var res in assembly.GetManifestResourceNames())
System.Diagnostics.Debug.WriteLine("found resource: " + res);
Stream stream = assembly.GetManifestResourceStream (prefix + shaderName + ".glsl");
string shaderString;
using (var reader = new StreamReader (stream)) {
shaderString = reader.ReadToEnd ();
}
Debug.WriteLine (shaderString);
uint shaderHandle = (uint)GL.CreateShader (shaderType);
GL.ShaderSource ((int)shaderHandle, shaderString);
GL.CompileShader (shaderHandle);
return shaderHandle;
}
void CompileShaders(){
uint vertexShader = CompileShader ("SimpleVertex", ShaderType.VertexShader);
uint fragmentShader = CompileShader ("SimpleFragment", ShaderType.FragmentShader);
uint programHandle = (uint)GL.CreateProgram ();
GL.AttachShader (programHandle, vertexShader);
GL.AttachShader (programHandle, fragmentShader);
GL.LinkProgram (programHandle);
GL.UseProgram (programHandle);
positionSlot = (uint)GL.GetAttribLocation (programHandle, "Position");
colorSlot = (uint)GL.GetAttribLocation (programHandle, "SourceColor");
projectionSlot = (uint)GL.GetUniformLocation (programHandle, "Projection");
modelViewSlot = (uint)GL.GetUniformLocation (programHandle, "Modelview");
GL.EnableVertexAttribArray (positionSlot);
GL.EnableVertexAttribArray (colorSlot);
GL.EnableVertexAttribArray (projectionSlot);
GL.EnableVertexAttribArray (modelViewSlot);
}
void SetupVBOs(){
uint vertexBuffer;
GL.GenBuffers (1, out vertexBuffer);
GL.BindBuffer (BufferTarget.ArrayBuffer, vertexBuffer);
GL.BufferData (BufferTarget.ArrayBuffer,(IntPtr)(Vector3.SizeInBytes * Verticies.Length), Verticies, BufferUsage.StaticDraw);
GL.VertexAttribPointer (positionSlot, 3, VertexAttribPointerType.Float, false, Vector3.SizeInBytes, 0);
uint colorBuffer;
GL.GenBuffers (1, out colorBuffer);
GL.BindBuffer (BufferTarget.ArrayBuffer, colorBuffer);
GL.BufferData (BufferTarget.ArrayBuffer, (IntPtr)(Vector4.SizeInBytes * Colors.Length), Colors, BufferUsage.StaticDraw);
GL.VertexAttribPointer (colorSlot, 4, VertexAttribPointerType.Float, false, Vector4.SizeInBytes, 0);
uint indexBuffer;
GL.GenBuffers (1, out indexBuffer);
GL.BindBuffer (BufferTarget.ElementArrayBuffer, indexBuffer);
GL.BufferData (BufferTarget.ElementArrayBuffer,(IntPtr)(sizeof(byte) * Indices.Length), Indices, BufferUsage.StaticDraw);
}
}
}
UPDATE
I believe this is something to with Android and VBO. Known issue? Anyone know how to solve it
Update 2
Updated to
void SetupVBOs(){
uint vertexBuffer;
GL.GenBuffers (1, out vertexBuffer);
GL.BindBuffer (BufferTarget.ArrayBuffer, vertexBuffer);
GL.BufferData (BufferTarget.ArrayBuffer,(IntPtr)(Vector3.SizeInBytes * Verticies.Length), Verticies, BufferUsage.StaticDraw);
GL.VertexAttribPointer (positionSlot, 3, VertexAttribPointerType.Float, false, Vector3.SizeInBytes, 0);
uint colorBuffer;
GL.GenBuffers (1, out colorBuffer);
GL.BindBuffer (BufferTarget.ArrayBuffer, colorBuffer);
GL.BufferData (BufferTarget.ArrayBuffer, (IntPtr)(Vector4.SizeInBytes * Colors.Length), Colors, BufferUsage.StaticDraw);
GL.VertexAttribPointer (colorSlot, 4, VertexAttribPointerType.Float, false, Vector4.SizeInBytes, 0);
}
I try
GL.DrawElements(BeginMode.Triangles, Indices.Length, DrawElementsType.UnsignedByte, Indices);
it crashes on device
I try
unsafe
{
fixed (byte* ptr = Indices)
{
GL.DrawElements(BeginMode.Triangles, Indices.Length, DrawElementsType.UnsignedByte, new IntPtr(ptr));
}
}
And nothing renders with error
[Adreno-EGLSUB] : Invalid native buffer. Failed to queueBuffer
[Adreno-EGLSUB] : native buffer is NULL
For some reason, Android crashes when we use an IBO (Index Buffer Object) with Xamarin.Forms + OpenGL. Instead pass the index information into DrawElements.
void SetupVBOs(){
uint vertexBuffer;
GL.GenBuffers (1, out vertexBuffer);
GL.BindBuffer (BufferTarget.ArrayBuffer, vertexBuffer);
GL.BufferData (BufferTarget.ArrayBuffer,(IntPtr)(Vector3.SizeInBytes * Verticies.Length), Verticies, BufferUsage.StaticDraw);
GL.VertexAttribPointer (positionSlot, 3, VertexAttribPointerType.Float, false, Vector3.SizeInBytes, 0);
uint colorBuffer;
GL.GenBuffers (1, out colorBuffer);
GL.BindBuffer (BufferTarget.ArrayBuffer, colorBuffer);
GL.BufferData (BufferTarget.ArrayBuffer, (IntPtr)(Vector4.SizeInBytes * Colors.Length), Colors, BufferUsage.StaticDraw);
GL.VertexAttribPointer (colorSlot, 4, VertexAttribPointerType.Float, false, Vector4.SizeInBytes, 0);
}
EDIT
I have revisited some OpenTK the past week or so, and I figured it out!
GL.DrawElements() is meant to be used to pass in the index data, if you prefer to use an IBO, bind it up as you have shown, and instead use
GL.DrawArrays(BeginMode.Triangles, 0, Indices.Length);
I know this thread is a bit old, but I've been fighting this issue for a couple days and finally solved it by changing the last parameter of the DrawElements call to IntPtr.Zero :
GL.DrawElements(BeginMode.Triangles, Indices.Length, DrawElementsType.UnsignedByte, IntPtr.Zero);
I've tried to search both on google and here the solution to my problem and I don't think it has been asked before (or may I be using the wrong words in my search? ^^')
Anyway, this is what I want to have: a OpenGL surface view (showing a cube for instance) that can rotate according to the orientation of the tablet. So far, nothing hard I guess and I have the code below that works perfectly well
public class RotationVectorDemo extends Activity {
private GLSurfaceView mGLSurfaceView;
private SensorManager mSensorManager;
private MyRenderer mRenderer;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
// Get an instance of the SensorManager
mSensorManager = (SensorManager)getSystemService(SENSOR_SERVICE);
// Create our Preview view and set it as the content of our
// Activity
mRenderer = new MyRenderer();
mGLSurfaceView = new GLSurfaceView(this);
mGLSurfaceView.setRenderer(mRenderer);
setContentView(mGLSurfaceView);
}
#Override
protected void onResume() {
// Ideally a game should implement onResume() and onPause()
// to take appropriate action when the activity looses focus
super.onResume();
mRenderer.start();
mGLSurfaceView.onResume();
}
#Override
protected void onPause() {
// Ideally a game should implement onResume() and onPause()
// to take appropriate action when the activity looses focus
super.onPause();
mRenderer.stop();
mGLSurfaceView.onPause();
}
class MyRenderer implements GLSurfaceView.Renderer, SensorEventListener {
private Cube mCube;
private Sensor mRotationVectorSensor;
private final float[] mRotationMatrix = new float[16];
public MyRenderer() {
// find the rotation-vector sensor
mRotationVectorSensor = mSensorManager.getDefaultSensor(
Sensor.TYPE_ROTATION_VECTOR);
mCube = new Cube();
// initialize the rotation matrix to identity
mRotationMatrix[ 0] = 1;
mRotationMatrix[ 4] = 1;
mRotationMatrix[ 8] = 1;
mRotationMatrix[12] = 1;
}
public void start() {
// enable our sensor when the activity is resumed, ask for
// 10 ms updates.
mSensorManager.registerListener(this, mRotationVectorSensor, 10000);
}
public void stop() {
// make sure to turn our sensor off when the activity is paused
mSensorManager.unregisterListener(this);
}
public void onSensorChanged(SensorEvent event) {
// we received a sensor event. it is a good practice to check
// that we received the proper event
if (event.sensor.getType() == Sensor.TYPE_ROTATION_VECTOR) {
// convert the rotation-vector to a 4x4 matrix. the matrix
// is interpreted by Open GL as the inverse of the
// rotation-vector, which is what we want.
SensorManager.getRotationMatrixFromVector(
mRotationMatrix , event.values);
}
}
public void onDrawFrame(GL10 gl) {
// clear screen
gl.glClear(GL10.GL_COLOR_BUFFER_BIT);
// set-up modelview matrix
gl.glMatrixMode(GL10.GL_MODELVIEW);
gl.glLoadIdentity();
gl.glTranslatef(0, 0, -3.0f);
gl.glMultMatrixf(mRotationMatrix, 0);
// draw our object
gl.glEnableClientState(GL10.GL_VERTEX_ARRAY);
gl.glEnableClientState(GL10.GL_COLOR_ARRAY);
mCube.draw(gl);
}
public void onSurfaceChanged(GL10 gl, int width, int height) {
// set view-port
gl.glViewport(0, 0, width, height);
// set projection matrix
float ratio = (float) width / height;
gl.glMatrixMode(GL10.GL_PROJECTION);
gl.glLoadIdentity();
gl.glFrustumf(-ratio, ratio, -1, 1, 1, 10);
}
public void onSurfaceCreated(GL10 gl, EGLConfig config) {
// dither is enabled by default, we don't need it
gl.glDisable(GL10.GL_DITHER);
// clear screen in white
gl.glClearColor(1,1,1,1);
}
class Cube {
// initialize our cube
private FloatBuffer mVertexBuffer;
private FloatBuffer mColorBuffer;
private ByteBuffer mIndexBuffer;
public Cube() {
final float vertices[] = {
-1, -1, -1, 1, -1, -1,
1, 1, -1, -1, 1, -1,
-1, -1, 1, 1, -1, 1,
1, 1, 1, -1, 1, 1,
};
final float colors[] = {
0, 0, 0, 1, 1, 0, 0, 1,
1, 1, 0, 1, 0, 1, 0, 1,
0, 0, 1, 1, 1, 0, 1, 1,
1, 1, 1, 1, 0, 1, 1, 1,
};
final byte indices[] = {
0, 4, 5, 0, 5, 1,
1, 5, 6, 1, 6, 2,
2, 6, 7, 2, 7, 3,
3, 7, 4, 3, 4, 0,
4, 7, 6, 4, 6, 5,
3, 0, 1, 3, 1, 2
};
ByteBuffer vbb = ByteBuffer.allocateDirect(vertices.length*4);
vbb.order(ByteOrder.nativeOrder());
mVertexBuffer = vbb.asFloatBuffer();
mVertexBuffer.put(vertices);
mVertexBuffer.position(0);
ByteBuffer cbb = ByteBuffer.allocateDirect(colors.length*4);
cbb.order(ByteOrder.nativeOrder());
mColorBuffer = cbb.asFloatBuffer();
mColorBuffer.put(colors);
mColorBuffer.position(0);
mIndexBuffer = ByteBuffer.allocateDirect(indices.length);
mIndexBuffer.put(indices);
mIndexBuffer.position(0);
}
public void draw(GL10 gl) {
gl.glEnable(GL10.GL_CULL_FACE);
gl.glFrontFace(GL10.GL_CW);
gl.glShadeModel(GL10.GL_SMOOTH);
gl.glVertexPointer(3, GL10.GL_FLOAT, 0, mVertexBuffer);
gl.glColorPointer(4, GL10.GL_FLOAT, 0, mColorBuffer);
gl.glDrawElements(GL10.GL_TRIANGLES, 36, GL10.GL_UNSIGNED_BYTE, mIndexBuffer);
}
}
public void onAccuracyChanged(Sensor sensor, int accuracy) {
}
}
}
However, when I am locking the screen, moving around and unlocking it afterwards the cube has moved too. Which is logical and I understand perfectly well why. Yet, I would like to know if it's possible to avoid that, like kinda resetting the sensors or something like that, and how I can do it.
I'm not sure I'm using the good kind of sensor at all of if I should change it, or if it's something that can be solved in the code or so. Bear with me as I'm just beginning to work with android sensors.
Basically, this problem is linked to an other that I have on a bigger application but I figured out it would be simpler to use this example to try and solve that. However, if you want to know what my problem is in my bigger application it's essentially the same except that to move the cube, people can either use their fingers (finger_mode) or the sensors (phone_mode). What I want is somehow to be able to rotate the cube with the fingers without paying attention to the sensors and when I go into sensor_mode that they do not change the orientation of the cube just because they are activated. I'm not sure it's clear, if it's not, lemme know.
I'm guessing, since i use touch to modify the rotation matrix that is used by OpenGL there might be some operations that can be done on rotation matrices to solve my problem. Or maybe it's just a sensor problem. Maybe both actually I have no clue so far but these are the different solutions I have been trying to use.
Thanks in advance for helping me figuring this out.
Best,
So the rotation vector is definitely the good kind of sensor to use. Both the accelerometer and the gyroscope won't be of any help for what I want to do.
However I still have to figure out what to do with the rotation matrices that I have now.
I have this code:
public Scene onLoadScene() {
Random randomGenerator = new Random();
pX = randomGenerator.nextInt(CAMERA_WIDTH);
Sprite snow = new Sprite (pX, 1, 30, 30, mTextureSnowRegion);
scene.getLastChild().attachChild(snow);
return scene;
}
I am trying to make a snowfall. I was trying to use a MoveModifier, but nothing is work.
Please help.
I would suggest using a particle system in AndEngine: http://code.google.com/p/andengineexamples/source/browse/src/org/anddev/andengine/examples/ParticleSystemSimpleExample.java
public Scene onLoadScene() {
Random randomGenerator = new Random();
pX = randomGenerator.nextInt(CAMERA_WIDTH);
Sprite snow = new Sprite (pX, 1, 30, 30, mTextureSnowRegion);
scene.getLastChild().attachChild(snow);
return scene;
This looks good. You just need to add MoveYModifer instead of MoveModifier.
And also you should use an GenericPool in AndEngine, because it uses alot of memory to keep creating new Sprite instances and attaching it. Also dont forget to detach it when the sprite is gone off screen.
Check out this
Using this particle code in my game to create snow. My game uses a 800x480 camera.
final RectangleParticleEmitter particleEmitter = new RectangleParticleEmitter(184.0f,44.0f,340,60);
final ParticleSystem particleSystem = new ParticleSystem(particleEmitter, 100, 200, 360, this.mParticleTextureRegion);
particleSystem.addParticleInitializer(new ColorInitializer(1, 1, 1));
particleSystem.addParticleInitializer(new AlphaInitializer(0));
particleSystem.setBlendFunction(GL10.GL_SRC_ALPHA, GL10.GL_ONE);
particleSystem.addParticleInitializer(new VelocityInitializer(-200, 200, -200, 200));
particleSystem.addParticleInitializer(new RotationInitializer(0.0f, 360.0f));
particleSystem.addParticleModifier(new ScaleModifier(1.0f, 1.2f, 0, 5));
particleSystem.addParticleModifier(new ColorModifier(1, 0.98f, 1, 0.96f, 1, 0.82f, 0, 3));
particleSystem.addParticleModifier(new ColorModifier(1, 1, 0.5f, 1, 0, 1, 4, 6));
particleSystem.addParticleModifier(new org.anddev.andengine.entity.particle.modifier.AlphaModifier(0, 1, 0, 1));
particleSystem.addParticleModifier(new org.anddev.andengine.entity.particle.modifier.AlphaModifier(1, 0, 5, 6));
particleSystem.addParticleModifier(new ExpireModifier(3, 6));
I am using similar particle system settins as #UncleIstvan.
final BatchedPseudoSpriteParticleSystem particleSystem = new BatchedPseudoSpriteParticleSystem(
new RectangleParticleEmitter(CAMERA_WIDTH / 2, CAMERA_HEIGHT, CAMERA_WIDTH, 1),
2, 5, 100, mSnowParticleRegion,
this.getVertexBufferObjectManager()
);
particleSystem.setBlendFunction(GLES20.GL_SRC_ALPHA, GLES20.GL_ONE);
particleSystem.addParticleInitializer(new VelocityParticleInitializer<Entity>(-3, 3, -20, -40));
particleSystem.addParticleInitializer(new AccelerationParticleInitializer<Entity>(-3, 3, -3, -5));
particleSystem.addParticleInitializer(new RotationParticleInitializer<Entity>(0.0f, 360.0f));
particleSystem.addParticleInitializer(new ExpireParticleInitializer<Entity>(10f));
particleSystem.addParticleInitializer(new ScaleParticleInitializer<Entity>(0.2f, 0.5f));
particleSystem.addParticleModifier(new AlphaParticleModifier<Entity>(6f, 10f, 1.0f, 0.0f));
scene.attachChild(particleSystem);
But I added an entity modifier to each particle:
particleSystem.addParticleInitializer(new RegisterXSwingEntityModifierInitializer<Entity>(10f, 0f, (float) Math.PI * 8, 3f, 25f, true));
It needs a custom particle initializer. In the initializer I register a new modifier to each particle:
#Override
public void onInitializeParticle(Particle<T> pParticle) {
pParticle.getEntity().registerEntityModifier(
new PositionXSwingModifier(mDuration,
mFromValue, mToValue,
mFromMagnitude, mToMagnitude));
}
And the last part is the modifier that uses growing sine wave to create the swinging motion (some parts ommited):
public class PositionXSwingModifier extends SingleValueSpanEntityModifier {
public PositionXSwingModifier(float pDuration, float pFromValue, float pToValue,
float pFromMagnitude, float pToMagnitude) {
// fromValue is usually 0
// toValue means how many times will the sine wave oscillate
// every 2pi is full sin wave
super(pDuration, pFromValue, pToValue);
mFromMagnitude = pFromMagnitude;
mToMagnitude = pToMagnitude;
}
#Override
protected void onSetValue(IEntity pItem, float pPercentageDone, float pValue) {
// current magnitude based on percentage
float currentMagnitude = mFromMagnitude + (mToMagnitude - mFromMagnitude) * pPercentageDone;
// current sine wave value
float currentSinValue = (float) Math.sin(pValue);
// change the x position of the flake
pItem.setX(mInitialX + currentMagnitude * currentSinValue);
}
}
It's based partly on my question here: https://gamedev.stackexchange.com/questions/56475/how-to-simulate-feather-fall-in-box2d
And you can get the full code and the APK to try it out here.
I'm currently developing my own augmented reality app. I'm trying to write my own AR Engine, since all frameworks I've seen so far are just usable with GPS data.
It's going to be used indoors, I'm getting my position data from another system.
What I have so far is:
float[] vector = { 2, 2, 1, 0 };
float transformed[] = new float[4];
float[] R = new float[16];
float[] I = new float[16];
float[] r = new float[16];
float[] S = { 400f, 1, 1, 1, 1, -240f, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
float[] B = { 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, 400f, 240f, 1, 1 };
float[] temp1 = new float[16];
float[] temp2 = new float[16];
float[] frustumM = {1.5f,0,0,0,0,-1.5f,0,0,0,0,1.16f,1,0,0,-3.24f,0};
//Rotationmatrix to get transformation for device to world coordinates
SensorManager.getRotationMatrix(R, I, accelerometerValues, geomagneticMatrix);
SensorManager.remapCoordinateSystem(R, SensorManager.AXIS_X, SensorManager.AXIS_Z, r);
//invert to get transformation for world to camera
Matrix.invertM(R, 0, r, 0);
Matrix.multiplyMM(temp1, 0, frustumM, 0, R, 0);
Matrix.multiplyMM(temp2, 0, S, 0, temp1, 0);
Matrix.multiplyMM(temp1, 0, B, 0, temp2, 0);
Matrix.multiplyMV(transformed, 0, temp1, 0, vector, 0);
I know its ugly code, but i'm just trying to get the object "vector" get painted correctly with my position being (0,0,0) for now.
My screen size is hardcoded in the matrix S and B (800x480).
The result should be stored in "transformed" and should be in a form like transformed = {x,y,z,w}
For the math I've used this link: http://www.inf.fu-berlin.de/lehre/WS06/19605_Computergrafik/doku/rossbach_siewert/camera.html
Sometimes my graphic gets painted but it jumps around and its not at the correct position. I've logged the rolling with SensorManager.getOrientation and they seem ok and stable.
So I think I'm doing something with the math wrong but I couldn't find better sources about the math to transform my data. Could anyone help me please?
Thanks in advance
martin