aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJustin Berger <j.david.berger@gmail.com>2018-03-21 01:20:49 -0600
committerJustin Berger <j.david.berger@gmail.com>2018-03-21 01:20:49 -0600
commit3209993da5cd0bed791babf5a45cdb42cd1e6f46 (patch)
treeed79526b99a0ab619e65062c45a2f64d40b33f2d
parent31d9fc358a85fe47dab28f78c396ee1f8d4d6dbb (diff)
parentbe3fa4562f9578472de1ded5588df8dc502898c6 (diff)
downloadlibsurvive-3209993da5cd0bed791babf5a45cdb42cd1e6f46.tar.gz
libsurvive-3209993da5cd0bed791babf5a45cdb42cd1e6f46.tar.bz2
Merge branch 'master' into standard_lh_calib
-rw-r--r--README.md16
-rw-r--r--data_recorder.c4
-rw-r--r--include/libsurvive/poser.h11
-rw-r--r--src/poser.c4
-rw-r--r--src/poser_daveortho.c293
-rw-r--r--src/poser_sba.c174
-rwxr-xr-xsrc/survive_cal.c37
-rw-r--r--src/survive_data.c46
-rw-r--r--src/survive_process.c19
-rw-r--r--src/survive_sensor_activations.c3
10 files changed, 456 insertions, 151 deletions
diff --git a/README.md b/README.md
index 6639946..518a985 100644
--- a/README.md
+++ b/README.md
@@ -117,6 +117,8 @@ Poser | [poser_daveortho.c](src/poser_daveortho.c) | A very fast system using or
Poser | [poser_dummy.c](src/poser_dummy.c) | Template for posers | [@cnlohr](https://github.com/cnlohr)
Poser | [poser_octavioradii.c](src/poser_octavioradii.c) | A potentially very fast poser that works by finding the best fit of the distances from the lighthouse to each sensor that matches the known distances between sensors, given the known angles of a lighthouse sweep. Incomplete- distances appear to be found correctly, but more work needed to turn this into a pose. | [@mwturvey](https://github.com/mwturvey) and [@octavio2895](https://github.com/octavio2895)
Poser | [poser_turveytori.c](src/poser_turveytori.c) | A moderately fast, fairly high precision poser that works by determine the angle at the lighthouse between many sets of two sensors. Using the inscirbed angle theorom, each set defines a torus of possible locations of the lighthouse. Multiple sets define multiple tori, and this poser finds most likely location of the lighthouse using least-squares distance. Best suited for calibration, but is can be used for real-time tracking on a powerful system. | [@mwturvey](https://github.com/mwturvey)
+Poser | [poser_epnp.c](src/poser_epnp.c) | Reasonably fast and accurate calibration and tracker that uses the [EPNP algorithm](https://en.wikipedia.org/wiki/Perspective-n-Point#EPnP) to solve the perspective and points problem. Suitable for fast tracking, but does best with >5-6 sensor readings. | [@jdavidberger](https://github.com/jdavidberger)
+Poser | [poser_sba.c](src/poser_sba.c) | Reasonably fast and accurate calibration and tracker but is dependent on a 'seed' poser to give it an initial estimate. This then performs [bundle adjustment](https://en.wikipedia.org/wiki/Bundle_adjustment) to minimize reprojection error given both ligthhouse readings. This has the benefit of greatly increasing accuracy by incorporating all the light data that is available. Set 'SBASeedPoser' config option to specify the seed poser; default is EPNP. | [@jdavidberger](https://github.com/jdavidberger)
Disambiguator | [survive_data.c](src/survive_data.c) (currently #ifdefed out) | The old disambiguator - very fast, but slightly buggy. | [@cnlohr](https://github.com/cnlohr)
Disambiguator | [survive_data.c](src/survive_data.c) (current disambiguator) | More complicated but much more robust disambiguator | [@mwturvey](https://github.com/mwturvey)
Dismabiguator | superceded disambiguator | A more sophisticated disambiguator, development abandoned. Removed from tree. | [@jpicht](https://github.com/jpicht)
@@ -272,12 +274,20 @@ libsurvive has an integrated tool that allows you to record and playback streams
```
make
-./data_recorder my_playback_file
+./data_recorder -o my_playback_file
```
This gives you a file -- my_playback_file -- with all the device configurations and events file you need to replay it.
-To actually replay it, put that directory path in the 'PlaybackFile' configuration value in config.json and run libsurvive as usual. Note that this will purposefully stop the USB devices from loading as to not confuse the library with inconsistent data.
+You can also just let it stream to standard output, but this tends to be a lot of information.
+
+To actually replay it, put that directory path in the 'playbackfile' configuration value in config.json and run libsurvive as usual. Note that this will purposefully stop the USB devices from loading as to not confuse the library with inconsistent data.
+
+You can also replay it just with command line options:
+
+```
+./calibrate --playbackfile my_playback_file
+```
## Playback speed
@@ -319,7 +329,7 @@ Given an HMD:
* pointed FACE DOWN at the lighthouse
* 1M above the lighthouse
- Will produce a pose of [[0, 0, -1] [0, 0, 0, 1]* ]. NOTE: The quaternion is inverted. This is the pose of the OBJECT in WORLD space. For our example, the lighthouse is at 0,0,0 in world space. In practicality, the lighthouse will be at some other place in the scene.
+ Will produce a pose of [[0, 0, -1] [0, 1, 0, 0] ]. NOTE: The quaternion is inverted, so rotation axis is arbitrary, point is w is 0. This is the pose of the OBJECT in WORLD space. For our example, the lighthouse is at 0,0,0 in world space. In practicality, the lighthouse will be at some other place in the scene.
The idea is you should be able to take a coordinate local to the HMD and then use ```ApplyPoseToPoint``` to transform that point into world space, by applying the output of the poser.
diff --git a/data_recorder.c b/data_recorder.c
index a8e012d..7e5384a 100644
--- a/data_recorder.c
+++ b/data_recorder.c
@@ -62,8 +62,8 @@ int my_config_process(SurviveObject *so, char *ct0conf, int len) {
return survive_default_htc_config_process(so, ct0conf, len);
}
-void my_lighthouse_process(SurviveContext *ctx, uint8_t lighthouse, SurvivePose *lh_pose) {
- survive_default_lighthouse_pose_process(ctx, lighthouse, lh_pose);
+void my_lighthouse_process(SurviveContext *ctx, uint8_t lighthouse, SurvivePose *lh_pose, SurvivePose *obj) {
+ survive_default_lighthouse_pose_process(ctx, lighthouse, lh_pose, obj);
write_to_output("%d LH_POSE %0.6f %0.6f %0.6f %0.6f %0.6f %0.6f %0.6f\n", lighthouse, lh_pose->Pos[0],
lh_pose->Pos[1], lh_pose->Pos[2], lh_pose->Rot[0], lh_pose->Rot[1], lh_pose->Rot[2],
lh_pose->Rot[3]);
diff --git a/include/libsurvive/poser.h b/include/libsurvive/poser.h
index 6b5b09d..b0b1a7b 100644
--- a/include/libsurvive/poser.h
+++ b/include/libsurvive/poser.h
@@ -8,13 +8,14 @@
extern "C" {
#endif
-typedef enum PoserType_t
-{
+typedef enum PoserType_t {
POSERDATA_NONE = 0,
POSERDATA_IMU,
- POSERDATA_LIGHT, //Single lighting event.
- POSERDATA_FULL_SCENE, //Full, statified X, Y sweep data for both lighthouses.
- POSERDATA_DISASSOCIATE, //If you get this, it doesn't contain data. It just tells you to please disassociate from the current SurviveObject and delete your poserdata.
+ POSERDATA_LIGHT, // Single lighting event.
+ POSERDATA_FULL_SCENE, // Full, statified X, Y sweep data for both lighthouses.
+ POSERDATA_DISASSOCIATE, // If you get this, it doesn't contain data. It just tells you to please disassociate from
+ // the current SurviveObject and delete your poserdata.
+ POSERDATA_SYNC, // Sync pulse.
} PoserType;
typedef void (*poser_raw_pose_func)(SurviveObject *so, uint8_t lighthouse, SurvivePose *pose, void *user);
diff --git a/src/poser.c b/src/poser.c
index e52edd2..1ed63da 100644
--- a/src/poser.c
+++ b/src/poser.c
@@ -31,7 +31,9 @@ void PoserData_lighthouse_pose_func(PoserData *poser_data, SurviveObject *so, ui
// We might want to go a step further and affix the first lighthouse in a given pose that preserves up so that
// it doesn't matter where on that surface the object is.
- SurvivePose object2arb = *object_pose;
+ SurvivePose object2arb = {.Rot = {1.}};
+ if (object_pose)
+ object2arb = *object_pose;
SurvivePose lighthouse2arb = *lighthouse_pose;
// Start by just moving from whatever arbitrary space into object space.
diff --git a/src/poser_daveortho.c b/src/poser_daveortho.c
index 2bf57c6..769ce81 100644
--- a/src/poser_daveortho.c
+++ b/src/poser_daveortho.c
@@ -8,7 +8,7 @@
#include <dclapack.h>
#include <linmath.h>
-static int LH_ID;
+// Dave talks about this poser here: https://www.youtube.com/watch?v=nSbEltdH9vM&feature=youtu.be&t=2h29m47s
void OrthoSolve(
FLT T[4][4], // OUTPUT: 4x4 transformation matrix
@@ -20,7 +20,9 @@ void OrthoSolve(
typedef struct
{
- int something;
+ int nextaxis;
+ float lengths[SENSORS_PER_OBJECT][NUM_LIGHTHOUSES][2];
+ float angles[SENSORS_PER_OBJECT][NUM_LIGHTHOUSES][2];
//Stuff
} DummyData;
@@ -30,7 +32,8 @@ int PoserDaveOrtho( SurviveObject * so, PoserData * pd )
SurviveContext * ctx = so->ctx;
DummyData * dd = so->PoserData;
- if( !dd ) so->PoserData = dd = malloc( sizeof( DummyData ) );
+ if (!dd)
+ so->PoserData = dd = calloc(sizeof(DummyData), 1);
switch( pt )
{
@@ -43,13 +46,94 @@ int PoserDaveOrtho( SurviveObject * so, PoserData * pd )
case POSERDATA_LIGHT:
{
PoserDataLight * l = (PoserDataLight*)pd;
- //printf( "LIG:%s %d @ %f rad, %f s (AC %d) (TC %d)\n", so->codename, l->sensor_id, l->angle, l->length, l->acode, l->timecode );
+ if (l->length > dd->lengths[l->sensor_id][l->lh][dd->nextaxis]) {
+ dd->lengths[l->sensor_id][l->lh][dd->nextaxis] = l->length;
+ dd->angles[l->sensor_id][l->lh][dd->nextaxis] = l->angle;
+ }
+ break;
+ }
+ case POSERDATA_SYNC: {
+ PoserDataLight *l = (PoserDataLight *)pd;
+ int lhid = l->lh;
+
+ dd->nextaxis = l->acode & 1;
+
+ if (dd->nextaxis == 0) {
+ int i;
+ int max_hits = 0;
+ FLT S_in[2][SENSORS_PER_OBJECT];
+ FLT X_in[3][SENSORS_PER_OBJECT];
+ for (i = 0; i < SENSORS_PER_OBJECT; i++) {
+ // Load all our valid points into something the LHFinder can use.
+ if (dd->lengths[i][lhid][0] > 0 && dd->lengths[i][lhid][1] > 0) {
+ S_in[0][max_hits] = dd->angles[i][lhid][0];
+ S_in[1][max_hits] = dd->angles[i][lhid][1];
+ dd->lengths[i][lhid][0] = -1;
+ dd->lengths[i][lhid][1] = -1;
+ X_in[0][max_hits] = so->sensor_locations[i * 3 + 0];
+ X_in[1][max_hits] = so->sensor_locations[i * 3 + 1];
+ X_in[2][max_hits] = so->sensor_locations[i * 3 + 2];
+ max_hits++;
+ }
+ }
+ if (max_hits > 2) {
+ // if( lhid == 0 ) { printf( "\033[H\033[2J" ); }
+ // printf( "%d\n", max_hits );
+ FLT tOut[4][4];
+ FLT S_out[2][SENSORS_PER_OBJECT];
+ OrthoSolve(tOut, S_out, S_in, X_in, max_hits);
+
+ // Now, we need to solve where we are as a function of where
+ // the lighthouses are.
+
+ SurvivePose objpose;
+ FLT MT[4][4];
+
+ objpose.Pos[0] = tOut[0][3];
+ objpose.Pos[1] = tOut[1][3];
+ objpose.Pos[2] = tOut[2][3];
+
+ // matrix44transpose( MT, &tOut[0][0] );
+ matrix44copy(&MT[0][0], &tOut[0][0]);
+
+ quatfrommatrix(objpose.Rot, &MT[0][0]);
+
+ // printf( "QUAT: %f %f %f %f = %f\n", quat[0], quat[1], quat[2], quat[3], quatmagnitude(quat) );
+ // quat[2] -= 0.005; //fixes up lh0 in test data set.
+ quatnormalize(objpose.Rot, objpose.Rot);
+
+ SurvivePose poseout;
+ InvertPose(poseout.Pos, objpose.Pos);
+ SurvivePose pose2lh = objpose;
+ // SurvivePose pose2lh = poseout;
+
+ SurvivePose lh2world = so->ctx->bsd[lhid].Pose;
+ SurvivePose world2obj, obj2world;
+ ApplyPoseToPose(world2obj.Pos, lh2world.Pos, pose2lh.Pos);
+ InvertPose(obj2world.Pos, world2obj.Pos);
+ PoserData_poser_raw_pose_func(pd, so, lhid, &obj2world);
+
+ if (0) {
+ printf("INQUAT: %f %f %f %f = %f [%f %f %f]\n", objpose.Rot[0], objpose.Rot[1], objpose.Rot[2],
+ objpose.Rot[3], quatmagnitude(objpose.Rot), objpose.Pos[0], objpose.Pos[1], objpose.Pos[2]);
+ printf("OUQUAT: %f %f %f %f = %f [%f %f %f]\n", poseout.Rot[0], poseout.Rot[1], poseout.Rot[2],
+ poseout.Rot[3], quatmagnitude(poseout.Rot), poseout.Pos[0], poseout.Pos[1], poseout.Pos[2]);
+ }
+ }
+ }
+
+ // if( so->codename[0] == 'H' ) printf( "LIG:%s %d @ %d rad, %f s (AX %d) (TC %d)\n", so->codename,
+ // l->sensor_id, l->lh, l->length, dd->nextaxis, l->timecode );
+
break;
}
case POSERDATA_FULL_SCENE:
{
PoserDataFullScene * fs = (PoserDataFullScene*)pd;
+ int LH_ID;
+ printf("PDFS\n");
+ SurvivePose alignLh0ToXAxis = {};
for( LH_ID = 0; LH_ID < 2; LH_ID++ )
{
int i;
@@ -76,38 +160,54 @@ int PoserDaveOrtho( SurviveObject * so, PoserData * pd )
//Now, we need to solve where we are as a function of where
//the lighthouses are.
- FLT quat[4];
- FLT posoff[3] = { tOut[0][3], tOut[1][3], tOut[2][3] };
+ // FLT quat[4];
+ // FLT posoff[3] = { tOut[0][3], tOut[1][3], tOut[2][3] };
+ SurvivePose objpose;
FLT MT[4][4];
+ objpose.Pos[0] = tOut[0][3];
+ objpose.Pos[1] = tOut[1][3];
+ objpose.Pos[2] = tOut[2][3];
+
//matrix44transpose( MT, &tOut[0][0] );
matrix44copy( &MT[0][0], &tOut[0][0] );
- quatfrommatrix( quat, &MT[0][0] );
-
+ quatfrommatrix(objpose.Rot, &MT[0][0]);
//printf( "QUAT: %f %f %f %f = %f\n", quat[0], quat[1], quat[2], quat[3], quatmagnitude(quat) );
//quat[2] -= 0.005; //fixes up lh0 in test data set.
- quatnormalize( quat, quat );
- printf( "QUAT: %f %f %f %f = %f [%f %f %f]\n", quat[0], quat[1], quat[2], quat[3], quatmagnitude(quat), posoff[0], posoff[1], posoff[2] );
-
- for( i = 0; i < max_hits;i++ )
- {
- FLT pt[3] = { X_in[0][i], X_in[1][i], X_in[2][i] };
- quatrotatevector( pt, quat, pt );
- add3d( pt, pt, posoff );
- printf( "OUT %f %f %f ANGLE %f %f AOUT %f %f\n",
- pt[0], pt[1], pt[2],
- S_in[0][i], S_in[1][i], atan2( pt[0], pt[1] ), atan2( pt[2], pt[1] ) );
+ quatnormalize(objpose.Rot, objpose.Rot);
+ printf("QUAT: %f %f %f %f = %f [%f %f %f]\n", objpose.Rot[0], objpose.Rot[1], objpose.Rot[2],
+ objpose.Rot[3], quatmagnitude(objpose.Rot), objpose.Pos[0], objpose.Pos[1], objpose.Pos[2]);
+
+ if (0)
+ ;
+ for (i = 0; i < max_hits; i++) {
+ FLT pt[3] = {X_in[0][i], X_in[1][i], X_in[2][i]};
+ quatrotatevector(pt, objpose.Rot, pt);
+ add3d(pt, pt, objpose.Pos);
+ printf("OUT %f %f %f ANGLE %f %f AOUT %f %f\n", pt[0], pt[1], pt[2], S_in[0][i], S_in[1][i],
+ atan2(pt[0], pt[1]), atan2(pt[2], pt[1]));
}
- so->FromLHPose[LH_ID].Pos[0] = posoff[0];
- so->FromLHPose[LH_ID].Pos[1] = posoff[1];
- so->FromLHPose[LH_ID].Pos[2] = posoff[2];
- so->FromLHPose[LH_ID].Rot[0] = quat[0];
- so->FromLHPose[LH_ID].Rot[1] = quat[1];
- so->FromLHPose[LH_ID].Rot[2] = quat[2];
- so->FromLHPose[LH_ID].Rot[3] = quat[3];
+ /*
+ so->FromLHPose[LH_ID].Pos[0] = objpose.Pos[0];
+ so->FromLHPose[LH_ID].Pos[1] = objpose.Pos[1];
+ so->FromLHPose[LH_ID].Pos[2] = objpose.Pos[2];
+ so->FromLHPose[LH_ID].Rot[0] = objpose.Rot[0];
+ so->FromLHPose[LH_ID].Rot[1] = objpose.Rot[1];
+ so->FromLHPose[LH_ID].Rot[2] = objpose.Rot[2];
+ so->FromLHPose[LH_ID].Rot[3] = objpose.Rot[3];
+ */
+
+ SurvivePose poseout;
+ InvertPose(poseout.Pos, objpose.Pos);
+ printf("INQUAT: %f %f %f %f = %f [%f %f %f]\n", objpose.Rot[0], objpose.Rot[1], objpose.Rot[2],
+ objpose.Rot[3], quatmagnitude(objpose.Rot), objpose.Pos[0], objpose.Pos[1], objpose.Pos[2]);
+
+ PoserData_lighthouse_pose_func(&fs->hdr, so, LH_ID, &alignLh0ToXAxis, &poseout, 0);
+ printf("OUQUAT: %f %f %f %f = %f [%f %f %f]\n", poseout.Rot[0], poseout.Rot[1], poseout.Rot[2],
+ poseout.Rot[3], quatmagnitude(poseout.Rot), poseout.Pos[0], poseout.Pos[1], poseout.Pos[2]);
}
break;
@@ -146,14 +246,12 @@ REGISTER_LINKTIME( PoserDaveOrtho );
oy=(c)*(x)-(a)*(z); \
oz=(a)*(y)-(b)*(x); }
-void OrthoSolve(
- FLT T[4][4], // OUTPUT: 4x4 transformation matrix
- FLT S_out[2][SENSORS_PER_OBJECT], // OUTPUT: array of screenspace points
- FLT S_in[2][SENSORS_PER_OBJECT], // INPUT: array of screenspace points
- FLT X_in[3][SENSORS_PER_OBJECT], // INPUT: array of offsets
- int nPoints)
-{
- int i,j,k;
+void OrthoSolve(FLT T[4][4], // OUTPUT: 4x4 transformation matrix
+ FLT S_out[2][SENSORS_PER_OBJECT], // OUTPUT: array of screenspace points (May not be populated!!!)
+ FLT S_in[2][SENSORS_PER_OBJECT], // INPUT: array of screenspace points
+ FLT X_in[3][SENSORS_PER_OBJECT], // INPUT: array of offsets
+ int nPoints) {
+ int i,j,k;
FLT R[3][3]; // OUTPUT: 3x3 rotation matrix
FLT trans[3]; // INPUT: x,y,z translation vector
@@ -266,11 +364,11 @@ printf("rhat %f %f (len %f)\n", rhat[0][0], rhat[1][0], rhat_len);
*/
// FLT ydist1 = 1.0 / uhat_len; //0.25*PI / uhat_len;
// FLT ydist2 = 1.0 / rhat_len; //0.25*PI / rhat_len;
- FLT ydist = 1.0 / urhat_len;
- printf("ydist %f\n", ydist);
-// printf("ydist1 %f ydist2 %f ydist %f\n", ydist1, ydist2, ydist);
+ FLT ydist = 1.0 / urhat_len; // TRICKY XXX Dave operates with "y forward" for some reason...
+ // printf("ydist %f\n", ydist);
+ // printf("ydist1 %f ydist2 %f ydist %f\n", ydist1, ydist2, ydist);
- //--------------------
+ //--------------------
// Rescale the axies to be of the proper length
//--------------------
FLT x[3][1] = { {M[0][0]*ydist}, {0.0}, {M[1][0]*ydist} };
@@ -308,9 +406,14 @@ printf("rhat %f %f (len %f)\n", rhat[0][0], rhat[1][0], rhat_len);
// printf("err %f hand %f\n", err, hand);
// If we are the best right-handed frame so far
- //if (hand > 0 && err < bestErr) { x[1][0]=x_y; y[1][0]=y_y; z[1][0]=z_y; bestErr=err; }
- if ( i == 0 && j == 1 && k == 0) { x[1][0]=x_y; y[1][0]=y_y; z[1][0]=z_y; bestErr=err; }
- z_y = -z_y;
+ if (hand > 0 && err < bestErr) {
+ x[1][0] = x_y;
+ y[1][0] = y_y;
+ z[1][0] = z_y;
+ bestErr = err;
+ }
+ // if ( i == 0 && j == 0 && k == 0) { x[1][0]=x_y; y[1][0]=y_y; z[1][0]=z_y; bestErr=err; }
+ z_y = -z_y;
}
y_y = -y_y;
}
@@ -437,60 +540,104 @@ PRINT(ab,2,1);
FLT transdist = sqrt( trans[0]*trans[0] + trans[1]*trans[1] + trans[2]*trans[2] );
//-------------------
- // Pack into the 4x4 transformation matrix
- //-------------------
+// Pack into the 4x4 transformation matrix, and put into correct "world"-space. I.e. where is the object, relative to
+// the lighthouse.
+//-------------------
+
+#if 0
+ //Don't do any transformation to the correct space.
T[0][0]=R[0][0]; T[0][1]=R[0][1]; T[0][2]=R[0][2]; T[0][3]=trans[0];
T[1][0]=R[1][0]; T[1][1]=R[1][1]; T[1][2]=R[1][2]; T[1][3]=trans[1];
T[2][0]=R[2][0]; T[2][1]=R[2][1]; T[2][2]=R[2][2]; T[2][3]=trans[2];
T[3][0]=0.0; T[3][1]=0.0; T[3][2]=0.0; T[3][3]=1.0;
+#else
+
+ /* WRONG: This transposes.
+ T[0][0]=-R[0][0]; T[0][1]=-R[0][1]; T[0][2]=-R[0][2]; T[0][3]=-trans[0];
+ T[2][0]= R[1][0]; T[2][1]= R[1][1]; T[2][2]= R[1][2]; T[2][3]=-trans[1];
+ T[1][0]= R[2][0]; T[1][1]= R[2][1]; T[1][2]= R[2][2]; T[1][3]=trans[2];
+ T[3][0]=0.0; T[3][1]=0.0; T[3][2]=0.0; T[3][3]=1.0;
+ */
+
+ // XXX XXX XXX FOR ANYONE LOOKING TO CONTROL THE COORDINATE FRAME, THESE ARE THE LINES!!!
+
+ /** This is probably all wonky and backwards but "looks" right except with multiple transforms, perhaps it's
+ transposed.
+ T[0][0]=-R[0][0]; T[0][1]= R[0][1]; T[0][2]= R[0][2]; T[0][3]=-trans[0];
+ T[2][0]=-R[1][0]; T[2][1]=-R[1][1]; T[2][2]= R[1][2]; T[2][3]=-trans[1];//This Z axis has problems with getting
+ magnitudes. We can re=make it from the other two axes.
+ T[1][0]=-R[2][0]; T[1][1]= R[2][1]; T[1][2]=-R[2][2]; T[1][3]= trans[2];
+ T[3][0]=0.0; T[3][1]=0.0; T[3][2]=0.0; T[3][3]=1.0;
+ */
+ T[0][0] = -R[0][0];
+ T[0][1] = -R[0][1];
+ T[0][2] = -R[0][2];
+ T[0][3] = -trans[0];
+ T[1][0] = R[2][0];
+ T[1][1] = R[2][1];
+ T[1][2] = R[2][2];
+ T[2][3] = -trans[1];
+ T[2][0] = R[1][0];
+ T[2][1] = R[1][1];
+ T[2][2] = R[1][2];
+ T[1][3] = trans[2];
+ T[3][0] = 0.0;
+ T[3][1] = 0.0;
+ T[3][2] = 0.0;
+ T[3][3] = 1.0;
-
- FLT T2[4][4];
+#endif
//-------------------
// Orthogonalize the matrix
//-------------------
- FLT temp[4][4];
- FLT quat[4], quatNorm[4];
- FLT euler[3];
-
+ // FLT temp[4][4];
+ // FLT quat[4], quatNorm[4];
+ // FLT euler[3];
//-------------------
// Orthogonalize the matrix
//-------------------
- PRINT_MAT(T,4,4);
-#if 1
+ cross3d(&T[2][0], &T[0][0], &T[1][0]); // Generate Z from X/Y because Z is kinda rekt.
+
+// cross3d( &T[1][0], &T[2][0], &T[0][0] ); //Renormalize rotations...
+// cross3d( &T[0][0], &T[1][0], &T[2][0] ); //Renormalize rotations...
+
+// XXX XXX TODO
+// We could further normalize things...
+
+#if 0
// matrix44transpose(T2, T); //Transpose so we are
matrix44copy((FLT*)T2,(FLT*)T);
- cross3d( &T2[1][0], &T2[0][0], &T2[2][0] );
- cross3d( &T2[2][0], &T2[1][0], &T2[0][0] ); //Replace axes in-place.
- matrix44copy((FLT*)T,(FLT*)T2);
+ cross3d( &T2[1][0], &T2[0][0], &T2[2][0] ); //Replace axes in-place.
+ cross3d( &T2[2][0], &T2[1][0], &T2[0][0] );
+// matrix44copy((FLT*)T,(FLT*)T2);
// matrix44transpose(T, T2);
-
+ normalize3d( &T[0][0], &T2[0][0] );
+ normalize3d( &T[1][0], &T2[1][0] );
+ normalize3d( &T[2][0], &T2[2][0] );
+#else
+// normalize3d( &T[0][0], &T[0][0] );
+// normalize3d( &T[1][0], &T[1][0] );
+// normalize3d( &T[2][0], &T[2][0] );
#endif
- normalize3d( &T[0][0], &T[0][0] );
- normalize3d( &T[1][0], &T[1][0] );
- normalize3d( &T[2][0], &T[2][0] );
- //Change handedness
+// printf( " In Axis on headset \n" );
+// printf( " x y z\n" );
+// PRINT_MAT(T,4,4);
- T[1][0]*=-1;
- T[1][1]*=-1;
- T[1][2]*=-1;
+// PRINT_MAT(T,4,4);
-/*
- //Check Orthogonality. Yep. It's orthogonal.
- FLT tmp[3];
- cross3d( tmp, &T[0][0], &T[1][0] );
- printf( "M3: %f\n", magnitude3d( tmp ) );
- cross3d( tmp, &T[2][0], &T[1][0] );
- printf( "M3: %f\n", magnitude3d( tmp ) );
- cross3d( tmp, &T[2][0], &T[0][0] );
- printf( "M3: %f\n", magnitude3d( tmp ) );
-*/
+// PRINT_MAT(T,4,4);
+
+// Fix handedness for rotations...
+// T[1][0]*=-1;
+// T[1][1]*=-1;
+// T[1][2]*=-1;
// PRINT_MAT(T,4,4);
+#if 0
#if 1
@@ -554,6 +701,6 @@ PRINT(ab,2,1);
//S_out[1][i] = Tz;
// printf("point %i Txyz %f %f %f in %f %f out %f %f morph %f %f\n", i, Tx,Ty,Tz, S_in[0][i], S_in[1][i], S_out[0][i], S_out[1][i], S_morph[0][i], S_morph[1][i]);
}
-
+#endif
}
diff --git a/src/poser_sba.c b/src/poser_sba.c
index fa71dd2..b5962f8 100644
--- a/src/poser_sba.c
+++ b/src/poser_sba.c
@@ -24,6 +24,12 @@ typedef struct {
SurvivePose camera_params[2];
} sba_context;
+typedef struct {
+ sba_context hdr;
+ int acode;
+ int lh;
+} sba_context_single_sweep;
+
void metric_function(int j, int i, double *aj, double *xij, void *adata) {
sba_context *ctx = (sba_context *)(adata);
SurviveObject *so = ctx->so;
@@ -56,6 +62,25 @@ size_t construct_input(const SurviveObject *so, PoserDataFullScene *pdfs, char *
return measCount;
}
+size_t construct_input_from_scene_single_sweep(const SurviveObject *so, PoserDataLight *pdl,
+ SurviveSensorActivations *scene, char *vmask, double *meas, int acode,
+ int lh) {
+ size_t rtn = 0;
+
+ for (size_t sensor = 0; sensor < so->sensor_ct; sensor++) {
+ const uint32_t *data_timecode = scene->timecode[sensor][lh];
+ if (pdl->timecode - data_timecode[acode & 1] <= SurviveSensorActivations_default_tolerance) {
+ double *a = scene->angles[sensor][lh];
+ vmask[sensor * NUM_LIGHTHOUSES + lh] = 1;
+ meas[rtn++] = a[acode & 0x1];
+ } else {
+ vmask[sensor * NUM_LIGHTHOUSES + lh] = 0;
+ }
+ }
+
+ return rtn;
+}
+
size_t construct_input_from_scene(const SurviveObject *so, PoserDataLight *pdl, SurviveSensorActivations *scene,
char *vmask, double *meas) {
size_t rtn = 0;
@@ -95,6 +120,30 @@ void sba_set_position(SurviveObject *so, uint8_t lighthouse, SurvivePose *new_po
}
void *GetDriver(const char *name);
+void str_metric_function_single_sweep(int j, int i, double *bi, double *xij, void *adata) {
+ SurvivePose obj = *(SurvivePose *)bi;
+ int sensor_idx = j >> 1;
+
+ sba_context_single_sweep *ctx = (sba_context_single_sweep *)(adata);
+ SurviveObject *so = ctx->hdr.so;
+ int lh = ctx->lh;
+ int acode = ctx->acode;
+
+ assert(lh < 2);
+ assert(sensor_idx < so->sensor_ct);
+
+ quatnormalize(obj.Rot, obj.Rot);
+ FLT xyz[3];
+ ApplyPoseToPoint(xyz, obj.Pos, &so->sensor_locations[sensor_idx * 3]);
+
+ // std::cerr << "Processing " << sensor_idx << ", " << lh << std::endl;
+ SurvivePose *camera = &so->ctx->bsd[lh].Pose;
+
+ FLT out[2];
+ survive_reproject_from_pose_with_config(so->ctx, &ctx->hdr.calibration_config, lh, camera, xyz, out);
+ *xij = out[acode];
+}
+
void str_metric_function(int j, int i, double *bi, double *xij, void *adata) {
SurvivePose obj = *(SurvivePose *)bi;
int sensor_idx = j >> 1;
@@ -115,14 +164,108 @@ void str_metric_function(int j, int i, double *bi, double *xij, void *adata) {
survive_reproject_from_pose_with_config(so->ctx, &ctx->calibration_config, lh, camera, xyz, xij);
}
-// Optimizes for object assuming given LH
+static double run_sba_find_3d_structure_single_sweep(survive_calibration_config options, PoserDataLight *pdl,
+ SurviveObject *so, SurviveSensorActivations *scene, int acode,
+ int lh, int max_iterations /* = 50*/,
+ double max_reproj_error /* = 0.005*/) {
+ double *covx = 0;
+
+ char *vmask = alloca(sizeof(char) * so->sensor_ct);
+ double *meas = alloca(sizeof(double) * so->sensor_ct);
+ size_t meas_size = construct_input_from_scene_single_sweep(so, pdl, scene, vmask, meas, acode, lh);
+
+ static int failure_count = 500;
+ if (so->ctx->bsd[0].PositionSet == 0 || so->ctx->bsd[1].PositionSet == 0 || meas_size < 8) {
+ if (so->ctx->bsd[0].PositionSet && so->ctx->bsd[1].PositionSet && failure_count++ == 500) {
+ SurviveContext *ctx = so->ctx;
+ SV_INFO("Can't solve for position with just %lu measurements", meas_size);
+ failure_count = 0;
+ }
+ return -1;
+ }
+ failure_count = 0;
+
+ SurvivePose soLocation = so->OutPose;
+ bool currentPositionValid = quatmagnitude(&soLocation.Rot[0]);
+
+ {
+ const char *subposer = config_read_str(so->ctx->global_config_values, "SBASeedPoser", "PoserEPNP");
+ PoserCB driver = (PoserCB)GetDriver(subposer);
+ SurviveContext *ctx = so->ctx;
+ if (driver) {
+ PoserData hdr = pdl->hdr;
+ memset(&pdl->hdr, 0, sizeof(pdl->hdr)); // Clear callback functions
+ pdl->hdr.pt = hdr.pt;
+ pdl->hdr.rawposeproc = sba_set_position;
+
+ sba_set_position_t locations = {};
+ pdl->hdr.userdata = &locations;
+ driver(so, &pdl->hdr);
+ pdl->hdr = hdr;
+
+ if (locations.hasInfo == false) {
+
+ return -1;
+ } else if (locations.hasInfo) {
+ soLocation = locations.poses;
+ }
+ } else {
+ SV_INFO("Not using a seed poser for SBA; results will likely be way off");
+ }
+ }
+
+ double opts[SBA_OPTSSZ] = {};
+ double info[SBA_INFOSZ] = {};
+
+ sba_context_single_sweep ctx = {.hdr = {options, &pdl->hdr, so}, .acode = acode, .lh = lh};
+
+ opts[0] = SBA_INIT_MU;
+ opts[1] = SBA_STOP_THRESH;
+ opts[2] = SBA_STOP_THRESH;
+ opts[3] = SBA_STOP_THRESH;
+ opts[3] = SBA_STOP_THRESH; // max_reproj_error * meas.size();
+ opts[4] = 0.0;
+
+ int status = sba_str_levmar(1, // Number of 3d points
+ 0, // Number of 3d points to fix in spot
+ so->sensor_ct, vmask,
+ soLocation.Pos, // Reads as the full pose though
+ 7, // pnp -- SurvivePose
+ meas, // x* -- measurement data
+ 0, // cov data
+ 1, // mnp -- 2 points per image
+ str_metric_function_single_sweep,
+ 0, // jacobia of metric_func
+ &ctx, // user data
+ max_iterations, // Max iterations
+ 0, // verbosity
+ opts, // options
+ info); // info
+
+ if (status > 0) {
+ quatnormalize(soLocation.Rot, soLocation.Rot);
+ PoserData_poser_raw_pose_func(&pdl->hdr, so, 1, &soLocation);
+
+ SurviveContext *ctx = so->ctx;
+ // Docs say info[0] should be divided by meas; I don't buy it really...
+ static int cnt = 0;
+ if (cnt++ > 1000 || meas_size < 8) {
+ SV_INFO("%f original reproj error for %lu meas", (info[0] / meas_size * 2), meas_size);
+ SV_INFO("%f cur reproj error", (info[1] / meas_size * 2));
+ cnt = 0;
+ }
+ }
+
+ return info[1] / meas_size * 2;
+}
+
static double run_sba_find_3d_structure(survive_calibration_config options, PoserDataLight *pdl, SurviveObject *so,
SurviveSensorActivations *scene, int max_iterations /* = 50*/,
double max_reproj_error /* = 0.005*/) {
double *covx = 0;
- char *vmask = malloc(sizeof(char) * so->sensor_ct * NUM_LIGHTHOUSES);
- double *meas = malloc(sizeof(double) * 2 * so->sensor_ct * NUM_LIGHTHOUSES);
+ char *vmask = alloca(sizeof(char) * so->sensor_ct * NUM_LIGHTHOUSES);
+ double *meas = alloca(sizeof(double) * 2 * so->sensor_ct * NUM_LIGHTHOUSES);
size_t meas_size = construct_input_from_scene(so, pdl, scene, vmask, meas);
static int failure_count = 500;
@@ -132,8 +275,6 @@ static double run_sba_find_3d_structure(survive_calibration_config options, Pose
SV_INFO("Can't solve for position with just %lu measurements", meas_size);
failure_count = 0;
}
- free(vmask);
- free(meas);
return -1;
}
failure_count = 0;
@@ -157,8 +298,6 @@ static double run_sba_find_3d_structure(survive_calibration_config options, Pose
pdl->hdr = hdr;
if (locations.hasInfo == false) {
- free(vmask);
- free(meas);
return -1;
} else if (locations.hasInfo) {
@@ -213,9 +352,6 @@ static double run_sba_find_3d_structure(survive_calibration_config options, Pose
}
}
- free(vmask);
- free(meas);
-
return info[1] / meas_size * 2;
}
@@ -224,8 +360,8 @@ static double run_sba(survive_calibration_config options, PoserDataFullScene *pd
int max_iterations /* = 50*/, double max_reproj_error /* = 0.005*/) {
double *covx = 0;
- char *vmask = malloc(sizeof(char) * so->sensor_ct * NUM_LIGHTHOUSES);
- double *meas = malloc(sizeof(double) * 2 * so->sensor_ct * NUM_LIGHTHOUSES);
+ char *vmask = alloca(sizeof(char) * so->sensor_ct * NUM_LIGHTHOUSES);
+ double *meas = alloca(sizeof(double) * 2 * so->sensor_ct * NUM_LIGHTHOUSES);
size_t meas_size = construct_input(so, pdfs, vmask, meas);
sba_context sbactx = {options, &pdfs->hdr, so, .camera_params = {so->ctx->bsd[0].Pose, so->ctx->bsd[1].Pose},
@@ -290,9 +426,6 @@ static double run_sba(survive_calibration_config options, PoserDataFullScene *pd
// Docs say info[0] should be divided by meas; I don't buy it really...
// std::cerr << info[0] / meas.size() * 2 << " original reproj error" << std::endl;
- free(vmask);
- free(meas);
-
{
SurviveContext *ctx = so->ctx;
// Docs say info[0] should be divided by meas; I don't buy it really...
@@ -307,11 +440,18 @@ int PoserSBA(SurviveObject *so, PoserData *pd) {
switch (pd->pt) {
case POSERDATA_LIGHT: {
SurviveSensorActivations *scene = &so->activations;
-
PoserDataLight *lightData = (PoserDataLight *)pd;
-
+ /*
+ static int last_acode = -1;
+ static int last_lh = -1;
+ if(last_lh != lightData->lh || last_acode != lightData->acode) {
+ */
survive_calibration_config config = *survive_calibration_default_config();
FLT error = run_sba_find_3d_structure(config, lightData, so, scene, 50, .5);
+ /*}
+ last_lh = lightData->lh;
+ last_acode = lightData->acode;
+ */
return 0;
}
case POSERDATA_FULL_SCENE: {
diff --git a/src/survive_cal.c b/src/survive_cal.c
index cb242ae..3367aa0 100755
--- a/src/survive_cal.c
+++ b/src/survive_cal.c
@@ -80,7 +80,7 @@ void ootx_packet_clbk_d(ootx_decoder_context *ct, ootx_packet* packet)
lighthouses_completed++;
if (lighthouses_completed >= NUM_LIGHTHOUSES) {
- config_save(ctx, "config.json");
+ config_save(ctx, survive_configs(ctx, "configfile", SC_GET, "config.json"));
}
}
@@ -138,9 +138,9 @@ void survive_cal_install( struct SurviveContext * ctx )
// setting the required trackers for calibration to be permissive to make it easier for a newbie to start--
// basically, libsurvive will detect whatever they have plugged in and start using that.
-// const char * RequiredTrackersForCal = config_read_str(ctx->global_config_values, "RequiredTrackersForCal", "HMD,WM0,WM1");
- const char * RequiredTrackersForCal = config_read_str(ctx->global_config_values, "RequiredTrackersForCal", "");
- const uint32_t AllowAllTrackersForCal = config_read_uint32( ctx->global_config_values, "AllowAllTrackersForCal", 1 );
+// const char * RequiredTrackersForCal = config_read_str(ctx->global_config_values, "RequiredTrackersForCal", "HMD,WM0,WM1");
+ const char *RequiredTrackersForCal = survive_configs(ctx, "requiredtrackersforcal", SC_SETCONFIG, "");
+ const uint32_t AllowAllTrackersForCal = survive_configi(ctx, "allowalltrackersforcal", SC_SETCONFIG, 1);
size_t requiredTrackersFound = 0;
for (int j=0; j < ctx->objs_ct; j++)
@@ -334,22 +334,21 @@ void survive_cal_angle( struct SurviveObject * so, int sensor_id, int acode, uin
int i, j, k;
cd->found_common = 1;
for( i = 0; i < cd->numPoseObjects; i++ )
- //for( i = 0; i < MAX_SENSORS_TO_CAL/SENSORS_PER_OBJECT; i++ )
- for( j = 0; j < ctx->activeLighthouses; j++ )
{
- int sensors_visible = 0;
- for( k = 0; k < SENSORS_PER_OBJECT; k++ )
- {
- if( cd->all_counts[k+i*SENSORS_PER_OBJECT][j][0] > NEEDED_COMMON_POINTS &&
- cd->all_counts[k+i*SENSORS_PER_OBJECT][j][1] > NEEDED_COMMON_POINTS )
- sensors_visible++;
- }
- if( sensors_visible < MIN_SENSORS_VISIBLE_PER_LH_FOR_CAL )
- {
- //printf( "Dev %d, LH %d not enough visible points found.\n", i, j );
- reset_calibration( cd );
- cd->found_common = 0;
- return;
+ // for( i = 0; i < MAX_SENSORS_TO_CAL/SENSORS_PER_OBJECT; i++ )
+ for (j = 0; j < ctx->activeLighthouses; j++) {
+ int sensors_visible = 0;
+ for (k = 0; k < SENSORS_PER_OBJECT; k++) {
+ if (cd->all_counts[k + i * SENSORS_PER_OBJECT][j][0] > NEEDED_COMMON_POINTS &&
+ cd->all_counts[k + i * SENSORS_PER_OBJECT][j][1] > NEEDED_COMMON_POINTS)
+ sensors_visible++;
+ }
+ if (sensors_visible < MIN_SENSORS_VISIBLE_PER_LH_FOR_CAL) {
+ // printf( "Dev %d, LH %d not enough visible points found.\n", i, j );
+ reset_calibration(cd);
+ cd->found_common = 0;
+ return;
+ }
}
}
diff --git a/src/survive_data.c b/src/survive_data.c
index befcbe7..c586658 100644
--- a/src/survive_data.c
+++ b/src/survive_data.c
@@ -41,6 +41,7 @@ typedef struct
typedef struct
{
double acode_offset;
+ int sent_out_ootx_bits;
} lightcap2_global_data;
typedef struct
@@ -218,7 +219,22 @@ void handle_lightcap2_process_sweep_data(SurviveObject *so)
//printf("%4d\n", lcd->sweep.sweep_len[i]);
int offset_from = lcd->sweep.sweep_time[i] - lcd->per_sweep.activeSweepStartTime + lcd->sweep.sweep_len[i] / 2;
-// if (offset_from < 380000 && offset_from > 70000)
+ // first, send out the sync pulse data for the last round (for OOTX decoding
+ if (!lcd->global.sent_out_ootx_bits) {
+ if (lcd->per_sweep.lh_max_pulse_length[0] != 0) {
+ so->ctx->lightproc(
+ so, -1, handle_lightcap2_getAcodeFromSyncPulse(so, lcd->per_sweep.lh_max_pulse_length[0]),
+ lcd->per_sweep.lh_max_pulse_length[0], lcd->per_sweep.lh_start_time[0], 0, 0);
+ }
+ if (lcd->per_sweep.lh_max_pulse_length[1] != 0) {
+ so->ctx->lightproc(
+ so, -2, handle_lightcap2_getAcodeFromSyncPulse(so, lcd->per_sweep.lh_max_pulse_length[1]),
+ lcd->per_sweep.lh_max_pulse_length[1], lcd->per_sweep.lh_start_time[1], 0, 1);
+ }
+ lcd->global.sent_out_ootx_bits = 1;
+ }
+
+ // if (offset_from < 380000 && offset_from > 70000)
{
//if (longest_pulse *10 / 8 < lcd->sweep.sweep_len[i])
{
@@ -329,32 +345,7 @@ void handle_lightcap2_sync(SurviveObject * so, LightcapElement * le )
//this should probably be fixed. Maybe some kind of timing based guess at which lighthouse.
// looks like this is the first sync pulse. Cool!
-
- // first, send out the sync pulse data for the last round (for OOTX decoding
- {
- if (lcd->per_sweep.lh_max_pulse_length[0] != 0)
- {
- so->ctx->lightproc(
- so,
- -1,
- handle_lightcap2_getAcodeFromSyncPulse(so, lcd->per_sweep.lh_max_pulse_length[0]),
- lcd->per_sweep.lh_max_pulse_length[0],
- lcd->per_sweep.lh_start_time[0],
- 0,
- 0);
- }
- if (lcd->per_sweep.lh_max_pulse_length[1] != 0)
- {
- so->ctx->lightproc(
- so,
- -2,
- handle_lightcap2_getAcodeFromSyncPulse(so, lcd->per_sweep.lh_max_pulse_length[1]),
- lcd->per_sweep.lh_max_pulse_length[1],
- lcd->per_sweep.lh_start_time[1],
- 0,
- 1);
- }
- }
+ lcd->global.sent_out_ootx_bits = 0;
//fprintf(stderr, "************************************ Reinitializing Disambiguator!!!\n");
// initialize here.
@@ -384,7 +375,6 @@ void handle_lightcap2_sync(SurviveObject * so, LightcapElement * le )
}
*/
}
-
// printf("%d %d\n", acode, lcd->per_sweep.activeLighthouse );
}
diff --git a/src/survive_process.c b/src/survive_process.c
index a49b632..1402dab 100644
--- a/src/survive_process.c
+++ b/src/survive_process.c
@@ -19,7 +19,24 @@ void survive_default_light_process( SurviveObject * so, int sensor_id, int acode
}
//We don't use sync times, yet.
- if( acode < -1 ) return;
+ if (sensor_id <= -1) {
+ if (so->PoserFn) {
+ PoserDataLight l = {
+ .hdr =
+ {
+ .pt = POSERDATA_SYNC,
+ },
+ .sensor_id = sensor_id,
+ .acode = acode,
+ .timecode = timecode,
+ .length = length,
+ .angle = 0,
+ .lh = lh,
+ };
+ so->PoserFn(so, (PoserData *)&l);
+ }
+ return;
+ }
if( base_station > NUM_LIGHTHOUSES ) return;
diff --git a/src/survive_sensor_activations.c b/src/survive_sensor_activations.c
index dce353c..4d1801c 100644
--- a/src/survive_sensor_activations.c
+++ b/src/survive_sensor_activations.c
@@ -26,5 +26,4 @@ void SurviveSensorActivations_add(SurviveSensorActivations *self, struct PoserDa
*data_timecode = lightData->timecode;
}
-// Roughly 31ms at a 48mhz clock rate
-uint32_t SurviveSensorActivations_default_tolerance = 500000; \ No newline at end of file
+uint32_t SurviveSensorActivations_default_tolerance = (uint32_t)(48000000 /*mhz*/ * (16.7 * 2 /*ms*/) / 1000); \ No newline at end of file