aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Turvey <mwturvey@users.noreply.github.com>2017-03-26 11:53:16 -0700
committerGitHub <noreply@github.com>2017-03-26 11:53:16 -0700
commit01c05b1a4df2dd85d9cafb4290616ecfe21304f4 (patch)
tree149c6be1140118b4cdbe72ccfee032b975be7195
parentb795afd28b7f7e2c1b9ca3f01e9f5ffeb1c75be8 (diff)
parent51b8cf7083d712eac730fe90f383abfabc7f2676 (diff)
downloadlibsurvive-01c05b1a4df2dd85d9cafb4290616ecfe21304f4.tar.gz
libsurvive-01c05b1a4df2dd85d9cafb4290616ecfe21304f4.tar.bz2
Merge pull request #44 from mwturvey/AddingPosers
Adding posers
-rw-r--r--include/libsurvive/survive.h6
-rw-r--r--src/poser_octavioradii.c542
-rw-r--r--src/poser_turveytori.c871
-rw-r--r--winbuild/libsurvive/libsurvive.vcxproj2
-rw-r--r--winbuild/libsurvive/libsurvive.vcxproj.filters6
5 files changed, 1424 insertions, 3 deletions
diff --git a/include/libsurvive/survive.h b/include/libsurvive/survive.h
index e13312d..278bbca 100644
--- a/include/libsurvive/survive.h
+++ b/include/libsurvive/survive.h
@@ -32,9 +32,9 @@ struct SurviveObject
PoserCB PoserFn;
//Device-specific information about the location of the sensors. This data will be used by the poser.
- int8_t nr_locations;
- FLT * sensor_locations;
- FLT * sensor_normals;
+ int8_t nr_locations; // sensor count
+ FLT * sensor_locations; // size is nr_locations*3. Contains x,y,z values for each sensor
+ FLT * sensor_normals;// size is nrlocations*3. cointains normal vector for each sensor
//Timing sensitive data (mostly for disambiguation)
int32_t timebase_hz; //48,000,000 for normal vive hardware. (checked)
diff --git a/src/poser_octavioradii.c b/src/poser_octavioradii.c
new file mode 100644
index 0000000..18d4026
--- /dev/null
+++ b/src/poser_octavioradii.c
@@ -0,0 +1,542 @@
+#include <survive.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+typedef struct
+{
+ int something;
+ //Stuff
+} OctavioRadiiData;
+
+#include <stdio.h>
+#include <stdlib.h>
+#include "linmath.h"
+#include <string.h>
+#include <stdint.h>
+#include <math.h>
+
+#define PTS 32
+#define MAX_CHECKS 40000
+#define MIN_HITS_FOR_VALID 10
+
+FLT hmd_points[PTS * 3];
+FLT hmd_norms[PTS * 3];
+FLT hmd_point_angles[PTS * 2];
+int hmd_point_counts[PTS * 2];
+int best_hmd_target = 0;
+int LoadData(char Camera, const char * FileData);
+
+//Values used for RunTest()
+FLT LighthousePos[3] = { 0, 0, 0 };
+FLT LighthouseQuat[4] = { 1, 0, 0, 0 };
+
+FLT RunTest(int print);
+void PrintOpti();
+
+#define MAX_POINT_PAIRS 100
+
+typedef struct
+{
+ FLT x;
+ FLT y;
+ FLT z;
+} Point;
+
+typedef struct
+{
+ Point point; // location of the sensor on the tracked object;
+ Point normal; // unit vector indicating the normal for the sensor
+ double theta; // "horizontal" angular measurement from lighthouse radians
+ double phi; // "vertical" angular measurement from lighthouse in radians.
+} TrackedSensor;
+
+typedef struct
+{
+ size_t numSensors;
+ TrackedSensor sensor[0];
+} TrackedObject;
+
+typedef struct
+{
+ unsigned char index1;
+ unsigned char index2;
+ FLT KnownDistance;
+} PointPair;
+
+static FLT distance(Point a, Point b)
+{
+ FLT x = a.x - b.x;
+ FLT y = a.y - b.y;
+ FLT z = a.z - b.z;
+ return FLT_SQRT(x*x + y*y + z*z);
+}
+
+typedef struct
+{
+ FLT HorizAngle;
+ FLT VertAngle;
+} SensorAngles;
+
+#define SQUARED(x) ((x)*(x))
+
+static FLT calculateFitnessOld(SensorAngles *angles, FLT *radii, PointPair *pairs, size_t numPairs)
+{
+ FLT fitness = 0;
+ for (size_t i = 0; i < numPairs; i++)
+ {
+ FLT estimatedDistanceBetweenPoints =
+ SQUARED(radii[pairs[i].index1])
+ + SQUARED(radii[pairs[i].index2])
+ - 2 * radii[pairs[i].index1] * radii[pairs[i].index2]
+ * FLT_SIN(angles[pairs[i].index1].HorizAngle) * FLT_SIN(angles[pairs[i].index2].HorizAngle)
+ * FLT_COS(angles[pairs[i].index1].VertAngle - angles[pairs[i].index2].VertAngle)
+ + FLT_COS(angles[pairs[i].index1].VertAngle) * FLT_COS(angles[pairs[i].index2].VertAngle);
+
+ fitness += SQUARED(estimatedDistanceBetweenPoints - pairs[i].KnownDistance);
+ }
+
+ return FLT_SQRT(fitness);
+}
+
+// angles is an array of angles between a sensor pair
+// pairs is an array of point pairs
+// radii is the guess at the radii of those angles
+static FLT calculateFitnessOld2(SensorAngles *angles, FLT *radii, PointPair *pairs, size_t numPairs)
+{
+ FLT fitness = 0;
+ for (size_t i = 0; i < numPairs; i++)
+ {
+ // These are the vectors that represent the direction for the two points.
+ // TODO: optimize by precomputing the tangent.
+ FLT v1[3], v2[3], diff[3];
+
+ v1[0] = 1;
+ v2[0] = 1;
+ v1[1] = tan(angles[pairs[i].index1].HorizAngle); // can be precomputed
+ v2[1] = tan(angles[pairs[i].index2].HorizAngle); // can be precomputed
+ v1[2] = tan(angles[pairs[i].index1].VertAngle); // can be precomputed
+ v2[2] = tan(angles[pairs[i].index2].VertAngle); // can be precomputed
+
+ // Now, normalize the vectors
+ normalize3d(v1, v1); // can be precomputed
+ normalize3d(v2, v2); // can be precomputed
+
+ // Now, given the specified radii, find where the new points are
+ scale3d(v1, v1, radii[pairs[i].index1]);
+ scale3d(v2, v2, radii[pairs[i].index2]);
+
+ // Cool, now find the vector between these two points
+ // TODO: optimize the following two funcs into one.
+ sub3d(diff, v1, v2);
+
+ FLT distance = magnitude3d(diff);
+
+ FLT t1 = magnitude3d(v1);
+ FLT t2 = magnitude3d(v2);
+
+
+
+ FLT estimatedDistanceBetweenPoints =
+
+ SQUARED(radii[pairs[i].index1])
+ + SQUARED(radii[pairs[i].index2])
+ - 2 * radii[pairs[i].index1] * radii[pairs[i].index2]
+ * FLT_SIN(angles[pairs[i].index1].HorizAngle) * FLT_SIN(angles[pairs[i].index2].HorizAngle)
+ * FLT_COS(angles[pairs[i].index1].VertAngle - angles[pairs[i].index2].VertAngle)
+ + FLT_COS(angles[pairs[i].index1].VertAngle) * FLT_COS(angles[pairs[i].index2].VertAngle);
+
+
+ //fitness += SQUARED(estimatedDistanceBetweenPoints - pairs[i].KnownDistance);
+ fitness += SQUARED(distance - pairs[i].KnownDistance);
+ }
+
+ return FLT_SQRT(fitness);
+}
+
+static FLT angleBetweenSensors(SensorAngles *a, SensorAngles *b)
+{
+ FLT angle = FLT_ACOS(FLT_COS(a->VertAngle - b->VertAngle)*FLT_COS(a->HorizAngle - b->HorizAngle));
+ //FLT angle2 = FLT_ACOS(FLT_COS(b->phi - a->phi)*FLT_COS(b->theta - a->theta));
+
+ return angle;
+}
+
+// angles is an array of angles between a sensor pair
+// pairs is an array of point pairs
+// radii is the guess at the radii of those angles
+static FLT calculateFitness(SensorAngles *angles, FLT *radii, PointPair *pairs, size_t numPairs)
+{
+ FLT fitness = 0;
+ for (size_t i = 0; i < numPairs; i++)
+ {
+
+ FLT angle = angleBetweenSensors(&angles[pairs[i].index1], &angles[pairs[i].index2]);
+
+ // now we have a side-angle-side triangle, and we need to find the third side.
+
+ // The Law of Cosines says: a^2 = b^2 + c^2 ? 2bc * cosA,
+ // where A is the angle opposite side a.
+
+ // Transform this to:
+ // a = sqrt(b^2 + c^2 - 2bc * cosA) and we know the length of the missing side!
+
+ FLT b2 = (SQUARED(radii[pairs[i].index1]));
+ FLT c2 = (SQUARED(radii[pairs[i].index2]));
+ FLT bc2 = (2 * radii[pairs[i].index1] * radii[pairs[i].index2]);
+ FLT cosA = (FLT_COS(angle));
+
+ FLT angleInDegrees = angle * 180 / LINMATHPI;
+
+ FLT dist = sqrt( (SQUARED(radii[pairs[i].index1])) +
+ (SQUARED(radii[pairs[i].index2])) -
+ ( (2 * radii[pairs[i].index1] * radii[pairs[i].index2]) *
+ (FLT_COS(angle))));
+
+
+ FLT fitnessAdder = SQUARED(dist - pairs[i].KnownDistance);
+
+ if (isnan(fitnessAdder))
+ {
+ int a = 0;
+ }
+
+ //printf(" %2d %f\n", i, fitnessAdder);
+
+ //fitness += SQUARED(estimatedDistanceBetweenPoints - pairs[i].KnownDistance);
+ fitness += SQUARED(dist - pairs[i].KnownDistance);
+ }
+
+ //fitness = 1 / fitness;
+ return FLT_SQRT(fitness);
+}
+
+#define MAX_RADII 32
+
+// note gradientOut will be of the same degree as numRadii
+static void getGradient(FLT *gradientOut, SensorAngles *angles, FLT *radii, size_t numRadii, PointPair *pairs, size_t numPairs, const FLT precision)
+{
+ FLT baseline = calculateFitness(angles, radii, pairs, numPairs);
+
+ for (size_t i = 0; i < numRadii; i++)
+ {
+ FLT tmpPlus[MAX_RADII];
+ memcpy(tmpPlus, radii, sizeof(*radii) * numRadii);
+ tmpPlus[i] += precision;
+ gradientOut[i] = -(calculateFitness(angles, tmpPlus, pairs, numPairs) - baseline);
+ }
+
+ return;
+}
+
+static void normalizeAndMultiplyVector(FLT *vectorToNormalize, size_t count, FLT desiredMagnitude)
+{
+ FLT distanceIn = 0;
+
+ for (size_t i = 0; i < count; i++)
+ {
+ distanceIn += SQUARED(vectorToNormalize[i]);
+ }
+ distanceIn = FLT_SQRT(distanceIn);
+
+
+ FLT scale = desiredMagnitude / distanceIn;
+
+ for (size_t i = 0; i < count; i++)
+ {
+ vectorToNormalize[i] *= scale;
+ }
+
+ return;
+}
+
+
+static RefineEstimateUsingGradientDescentRadii(FLT *estimateOut, SensorAngles *angles, FLT *initialEstimate, size_t numRadii, PointPair *pairs, size_t numPairs, FILE *logFile)
+{
+ int i = 0;
+ FLT lastMatchFitness = calculateFitness(angles, initialEstimate, pairs, numPairs);
+ if (estimateOut != initialEstimate)
+ {
+ memcpy(estimateOut, initialEstimate, sizeof(*estimateOut) * numRadii);
+ }
+
+
+ // The values below are somewhat magic, and definitely tunable
+ // The initial vlue of g will represent the biggest step that the gradient descent can take at first.
+ // bigger values may be faster, especially when the initial guess is wildly off.
+ // The downside to a bigger starting guess is that if we've picked a good guess at the local minima
+ // if there are other local minima, we may accidentally jump to such a local minima and get stuck there.
+ // That's fairly unlikely with the lighthouse problem, from expereince.
+ // The other downside is that if it's too big, we may have to spend a few iterations before it gets down
+ // to a size that doesn't jump us out of our minima.
+ // The terminal value of g represents how close we want to get to the local minima before we're "done"
+ // The change in value of g for each iteration is intentionally very close to 1.
+ // in fact, it probably could probably be 1 without any issue. The main place where g is decremented
+ // is in the block below when we've made a jump that results in a worse fitness than we're starting at.
+ // In those cases, we don't take the jump, and instead lower the value of g and try again.
+ for (FLT g = 0.4; g > 0.00001; g *= 0.9999)
+ {
+ i++;
+
+
+
+ FLT point1[MAX_RADII];
+ memcpy(point1, estimateOut, sizeof(*point1) * numRadii);
+
+ // let's get 3 iterations of gradient descent here.
+ FLT gradient1[MAX_RADII];
+ getGradient(gradient1, angles, point1, numRadii, pairs, numPairs, g / 1000 /*somewhat arbitrary*/);
+ normalizeAndMultiplyVector(gradient1, numRadii, g);
+
+ FLT point2[MAX_RADII];
+ for (size_t i = 0; i < numRadii; i++)
+ {
+ point2[i] = point1[i] + gradient1[i];
+ }
+ FLT gradient2[MAX_RADII];
+ getGradient(gradient2, angles, point2, numRadii, pairs, numPairs, g / 1000 /*somewhat arbitrary*/);
+ normalizeAndMultiplyVector(gradient2, numRadii, g);
+
+ FLT point3[MAX_RADII];
+ for (size_t i = 0; i < numRadii; i++)
+ {
+ point3[i] = point2[i] + gradient2[i];
+ }
+
+ // remember that gradient descent has a tendency to zig-zag when it encounters a narrow valley?
+ // Well, solving the lighthouse problem presents a very narrow valley, and the zig-zag of a basic
+ // gradient descent is kinda horrible here. Instead, think about the shape that a zig-zagging
+ // converging gradient descent makes. Instead of using the gradient as the best indicator of
+ // the direction we should follow, we're looking at one side of the zig-zag pattern, and specifically
+ // following *that* vector. As it turns out, this works *amazingly* well.
+
+ FLT specialGradient[MAX_RADII];
+ for (size_t i = 0; i < numRadii; i++)
+ {
+ specialGradient[i] = point3[i] - point1[i];
+ }
+
+ // The second parameter to this function is very much a tunable parameter. Different values will result
+ // in a different number of iterations before we get to the minimum. Numbers between 3-10 seem to work well
+ // It's not clear what would be optimum here.
+ normalizeAndMultiplyVector(specialGradient, numRadii, g / 4);
+
+
+ FLT point4[MAX_RADII];
+ for (size_t i = 0; i < numRadii; i++)
+ {
+ point4[i] = point3[i] + specialGradient[i];
+ }
+
+
+ FLT newMatchFitness = calculateFitness(angles, point4, pairs, numPairs);
+
+
+ if (newMatchFitness < lastMatchFitness)
+ {
+ //if (logFile)
+ //{
+ // writePoint(logFile, lastPoint.x, lastPoint.y, lastPoint.z, 0xFFFFFF);
+ //}
+
+ lastMatchFitness = newMatchFitness;
+ memcpy(estimateOut, point4, sizeof(*estimateOut) * numRadii);
+
+#ifdef RADII_DEBUG
+ printf("+ %d %0.9f (%0.9f) \n", i, newMatchFitness, g);
+#endif
+ g = g * 1.05;
+ }
+ else
+ {
+//#ifdef RADII_DEBUG
+ // printf("-");
+ //printf("- %d %0.9f (%0.9f) [%0.9f] \n", i, newMatchFitness, g, estimateOut[0]);
+//#endif
+ // if it wasn't a match, back off on the distance we jump
+ g *= 0.7;
+
+ }
+
+#ifdef RADII_DEBUG
+ FLT avg = 0;
+ FLT diffFromAvg[MAX_RADII];
+
+ for (size_t m = 0; m < numRadii; m++)
+ {
+ avg += estimateOut[m];
+ }
+ avg = avg / numRadii;
+
+ for (size_t m = 0; m < numRadii; m++)
+ {
+ diffFromAvg[m] = estimateOut[m] - avg;;
+ }
+ printf("[avg:%f] ", avg);
+
+ for (size_t x = 0; x < numRadii; x++)
+ {
+ printf("%f, ", diffFromAvg[x]);
+ //printf("%f, ", estimateOut[x]);
+ }
+ printf("\n");
+
+
+#endif
+
+
+ }
+ printf("\ni=%d\n", i);
+}
+
+void SolveForLighthouseRadii(Point *objPosition, FLT *objOrientation, TrackedObject *obj)
+{
+ FLT estimate[MAX_RADII];
+
+ for (size_t i = 0; i < MAX_RADII; i++)
+ {
+ estimate[i] = 2.38;
+ }
+
+ SensorAngles angles[MAX_RADII];
+ PointPair pairs[MAX_POINT_PAIRS];
+
+ size_t pairCount = 0;
+
+ //obj->numSensors = 5; // TODO: HACK!!!!
+
+ for (size_t i = 0; i < obj->numSensors; i++)
+ {
+ angles[i].HorizAngle = obj->sensor[i].theta;
+ angles[i].VertAngle = obj->sensor[i].phi;
+ }
+
+ for (size_t i = 0; i < obj->numSensors - 1; i++)
+ {
+ for (size_t j = i + 1; j < obj->numSensors; j++)
+ {
+ pairs[pairCount].index1 = i;
+ pairs[pairCount].index2 = j;
+ pairs[pairCount].KnownDistance = distance(obj->sensor[i].point, obj->sensor[j].point);
+ pairCount++;
+ }
+ }
+
+
+ RefineEstimateUsingGradientDescentRadii(estimate, angles, estimate, obj->numSensors, pairs, pairCount, NULL);
+
+ // we should now have an estimate of the radii.
+
+ for (size_t i = 0; i < obj->numSensors; i++)
+ {
+ printf("radius[%d]: %f\n", i, estimate[i]);
+ }
+ // (FLT *estimateOut, SensorAngles *angles, FLT *initialEstimate, size_t numRadii, PointPair *pairs, size_t numPairs, FILE *logFile)
+
+ return;
+}
+
+int PoserOctavioRadii( SurviveObject * so, PoserData * pd )
+{
+ PoserType pt = pd->pt;
+ SurviveContext * ctx = so->ctx;
+ OctavioRadiiData * dd = so->PoserData;
+
+ if( !dd ) so->PoserData = dd = malloc( sizeof(OctavioRadiiData) );
+
+ switch( pt )
+ {
+ case POSERDATA_IMU:
+ {
+ PoserDataIMU * imu = (PoserDataIMU*)pd;
+ //printf( "IMU:%s (%f %f %f) (%f %f %f)\n", so->codename, imu->accel[0], imu->accel[1], imu->accel[2], imu->gyro[0], imu->gyro[1], imu->gyro[2] );
+ break;
+ }
+ case POSERDATA_LIGHT:
+ {
+ PoserDataLight * l = (PoserDataLight*)pd;
+ //printf( "LIG:%s %d @ %f rad, %f s (AC %d) (TC %d)\n", so->codename, l->sensor_id, l->angle, l->length, l->acode, l->timecode );
+ break;
+ }
+ case POSERDATA_FULL_SCENE:
+ {
+ TrackedObject *to;
+
+ PoserDataFullScene * fs = (PoserDataFullScene*)pd;
+
+ to = malloc(sizeof(TrackedObject) + (SENSORS_PER_OBJECT * sizeof(TrackedSensor)));
+
+ //FLT lengths[SENSORS_PER_OBJECT][NUM_LIGHTHOUSES][2];
+ //FLT angles[SENSORS_PER_OBJECT][NUM_LIGHTHOUSES][2]; //2 Axes (Angles in LH space)
+ //FLT synctimes[SENSORS_PER_OBJECT][NUM_LIGHTHOUSES];
+
+ //to->numSensors = so->nr_locations;
+ {
+ int sensorCount = 0;
+
+ for (int i = 0; i < so->nr_locations; i++)
+ {
+ if (fs->lengths[i][0][0] != -1 && fs->lengths[i][0][1] != -1) //lh 0
+ {
+ to->sensor[sensorCount].normal.x = so->sensor_normals[i * 3 + 0];
+ to->sensor[sensorCount].normal.y = so->sensor_normals[i * 3 + 1];
+ to->sensor[sensorCount].normal.z = so->sensor_normals[i * 3 + 2];
+ to->sensor[sensorCount].point.x = so->sensor_locations[i * 3 + 0];
+ to->sensor[sensorCount].point.y = so->sensor_locations[i * 3 + 1];
+ to->sensor[sensorCount].point.z = so->sensor_locations[i * 3 + 2];
+ to->sensor[sensorCount].theta = fs->angles[i][0][0] + LINMATHPI / 2; // lighthouse 0, angle 0 (horizontal)
+ to->sensor[sensorCount].phi = fs->angles[i][0][1] + LINMATHPI / 2; // lighthosue 0, angle 1 (vertical)
+ sensorCount++;
+ }
+ }
+
+ to->numSensors = sensorCount;
+
+ Point position;
+ FLT orientation[4];
+
+ SolveForLighthouseRadii(&position, &orientation, to);
+ }
+ {
+ int sensorCount = 0;
+ int lh = 1;
+
+ for (int i = 0; i < so->nr_locations; i++)
+ {
+ if (fs->lengths[i][lh][0] != -1 && fs->lengths[i][lh][1] != -1)
+ {
+ to->sensor[sensorCount].normal.x = so->sensor_normals[i * 3 + 0];
+ to->sensor[sensorCount].normal.y = so->sensor_normals[i * 3 + 1];
+ to->sensor[sensorCount].normal.z = so->sensor_normals[i * 3 + 2];
+ to->sensor[sensorCount].point.x = so->sensor_locations[i * 3 + 0];
+ to->sensor[sensorCount].point.y = so->sensor_locations[i * 3 + 1];
+ to->sensor[sensorCount].point.z = so->sensor_locations[i * 3 + 2];
+ to->sensor[sensorCount].theta = fs->angles[i][lh][0] + LINMATHPI / 2; // lighthouse 0, angle 0 (horizontal)
+ to->sensor[sensorCount].phi = fs->angles[i][lh][1] + LINMATHPI / 2; // lighthosue 0, angle 1 (vertical)
+ sensorCount++;
+ }
+ }
+
+ to->numSensors = sensorCount;
+
+ Point position;
+ FLT orientation[4];
+
+ SolveForLighthouseRadii(&position, &orientation, to);
+ }
+ //printf( "Full scene data.\n" );
+ break;
+ }
+ case POSERDATA_DISASSOCIATE:
+ {
+ free( dd );
+ so->PoserData = 0;
+ //printf( "Need to disassociate.\n" );
+ break;
+ }
+ }
+ return 0;
+}
+
+
+REGISTER_LINKTIME( PoserOctavioRadii );
+
diff --git a/src/poser_turveytori.c b/src/poser_turveytori.c
new file mode 100644
index 0000000..4e602f3
--- /dev/null
+++ b/src/poser_turveytori.c
@@ -0,0 +1,871 @@
+#include <survive.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <memory.h>
+#include <assert.h>
+#include "linmath.h"
+#include <stddef.h>
+#include <math.h>
+#include <stdint.h>
+
+
+#define PointToFlts(x) ((FLT*)(x))
+
+typedef struct
+{
+ FLT x;
+ FLT y;
+ FLT z;
+} Point;
+
+void writePoint(FILE *file, double x, double y, double z, unsigned int rgb) {}
+void updateHeader(FILE * file) {}
+void writeAxes(FILE * file) {}
+void drawLineBetweenPoints(FILE *file, Point a, Point b, unsigned int color) {}
+void writePcdHeader(FILE * file) {}
+void writePointCloud(FILE *f, Point *pointCloud, unsigned int Color) {}
+void markPointWithStar(FILE *file, Point point, unsigned int color) {}
+
+typedef struct
+{
+ Point point; // location of the sensor on the tracked object;
+ Point normal; // unit vector indicating the normal for the sensor
+ double theta; // "horizontal" angular measurement from lighthouse radians
+ double phi; // "vertical" angular measurement from lighthouse in radians.
+} TrackedSensor;
+
+typedef struct
+{
+ size_t numSensors;
+ TrackedSensor sensor[0];
+} TrackedObject;
+
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846264338327
+#endif
+
+#define SQUARED(x) ((x)*(x))
+
+typedef union
+{
+ struct
+ {
+ unsigned char Blue;
+ unsigned char Green;
+ unsigned char Red;
+ unsigned char Alpha;
+ };
+ uint32_t long_value;
+} RGBValue;
+
+static RGBValue RED = { .Red = 255,.Green = 0,.Blue = 0,.Alpha = 125 };
+static RGBValue GREEN = { .Red = 0,.Green = 255,.Blue = 0,.Alpha = 125 };
+static RGBValue BLUE = { .Red = 0,.Green = 0,.Blue = 255,.Alpha = 125 };
+
+static const double WORLD_BOUNDS = 100;
+#define MAX_TRACKED_POINTS 40
+
+static const float DefaultPointsPerOuterDiameter = 60;
+
+typedef struct
+{
+ int something;
+ //Stuff
+} ToriData;
+
+
+
+
+
+
+
+static FLT distance(Point a, Point b)
+{
+ FLT x = a.x - b.x;
+ FLT y = a.y - b.y;
+ FLT z = a.z - b.z;
+ return FLT_SQRT(x*x + y*y + z*z);
+}
+
+Matrix3x3 GetRotationMatrixForTorus(Point a, Point b)
+{
+ Matrix3x3 result;
+ FLT v1[3] = { 0, 0, 1 };
+ FLT v2[3] = { a.x - b.x, a.y - b.y, a.z - b.z };
+
+ normalize3d(v2, v2);
+
+ rotation_between_vecs_to_m3(&result, v1, v2);
+
+ // Useful for debugging...
+ //FLT v2b[3];
+ //rotate_vec(v2b, v1, result);
+
+ return result;
+}
+
+typedef struct
+{
+ Point a;
+ Point b;
+ FLT angle;
+ FLT tanAngle; // tangent of angle
+ Matrix3x3 rotation;
+ Matrix3x3 invRotation; // inverse of rotation
+
+} PointsAndAngle;
+
+
+Point RotateAndTranslatePoint(Point p, Matrix3x3 rot, Point newOrigin)
+{
+ Point q;
+
+ double pf[3] = { p.x, p.y, p.z };
+ q.x = rot.val[0][0] * p.x + rot.val[1][0] * p.y + rot.val[2][0] * p.z + newOrigin.x;
+ q.y = rot.val[0][1] * p.x + rot.val[1][1] * p.y + rot.val[2][1] * p.z + newOrigin.y;
+ q.z = rot.val[0][2] * p.x + rot.val[1][2] * p.y + rot.val[2][2] * p.z + newOrigin.z;
+
+ return q;
+}
+
+double angleFromPoints(Point p1, Point p2, Point center)
+{
+ Point v1, v2, v1norm, v2norm;
+ v1.x = p1.x - center.x;
+ v1.y = p1.y - center.y;
+ v1.z = p1.z - center.z;
+
+ v2.x = p2.x - center.x;
+ v2.y = p2.y - center.y;
+ v2.z = p2.z - center.z;
+
+ double v1mag = sqrt(v1.x * v1.x + v1.y * v1.y + v1.z * v1.z);
+ v1norm.x = v1.x / v1mag;
+ v1norm.y = v1.y / v1mag;
+ v1norm.z = v1.z / v1mag;
+
+ double v2mag = sqrt(v2.x * v2.x + v2.y * v2.y + v2.z * v2.z);
+ v2norm.x = v2.x / v2mag;
+ v2norm.y = v2.y / v2mag;
+ v2norm.z = v2.z / v2mag;
+
+ double res = v1norm.x * v2norm.x + v1norm.y * v2norm.y + v1norm.z * v2norm.z;
+
+ double angle = acos(res);
+
+ return angle;
+}
+
+Point midpoint(Point a, Point b)
+{
+ Point m;
+ m.x = (a.x + b.x) / 2;
+ m.y = (a.y + b.y) / 2;
+ m.z = (a.z + b.z) / 2;
+
+ return m;
+}
+
+// What we're doing here is:
+// * Given a point in space
+// * And points and a lighthouse angle that implicitly define a torus
+// * for that torus, what is the toroidal angle of the plane that will go through that point in space
+// * and given that toroidal angle, what is the poloidal angle that will be directed toward that point in space?
+void estimateToroidalAndPoloidalAngleOfPoint(
+ PointsAndAngle *pna,
+ Point point,
+ double *toroidalSin,
+ double *toroidalCos,
+ double *poloidalAngle,
+ double *poloidalSin)
+{
+ // We take the inverse of the rotation matrix, and this now defines a rotation matrix that will take us from
+ // the tracked object coordinate system into the "easy" or "default" coordinate system of the torus.
+ // Using this will allow us to derive angles much more simply by being in a "friendly" coordinate system.
+ Matrix3x3 rot = pna->invRotation;
+ Point origin;
+ origin.x = 0;
+ origin.y = 0;
+ origin.z = 0;
+
+ Point m = midpoint(pna->a, pna->b);
+
+ // in this new coordinate system, we'll rename all of the points we care about to have an "F" after them
+ // This will be their representation in the "friendly" coordinate system
+ Point pointF;
+
+ // Okay, I lied a little above. In addition to the rotation matrix that we care about, there was also
+ // a translation that we did to move the origin. If we're going to get to the "friendly" coordinate system
+ // of the torus, we need to first undo the translation, then undo the rotation. Below, we're undoing the translation.
+ pointF.x = point.x - m.x;
+ pointF.y = point.y - m.y;
+ pointF.z = point.z - m.z;
+
+ // now we'll undo the rotation part.
+ pointF = RotateAndTranslatePoint(pointF, rot, origin);
+
+ // hooray, now pointF is in our more-friendly coordinate system.
+
+ // Now, it's time to figure out the toroidal angle to that point. This should be pretty easy.
+ // We will "flatten" the z dimension to only look at the x and y values. Then, we just need to measure the
+ // angle between a vector to pointF and a vector along the x axis.
+
+ FLT toroidalHyp = FLT_SQRT(SQUARED(pointF.y) + SQUARED(pointF.x));
+
+ *toroidalSin = pointF.y / toroidalHyp;
+
+ *toroidalCos = pointF.x / toroidalHyp;
+
+ //*toroidalAngle = atan(pointF.y / pointF.x);
+ //if (pointF.x < 0)
+ //{
+ // *toroidalAngle += M_PI;
+ //}
+
+ //assert(*toroidalSin / FLT_SIN(*toroidalAngle) - 1 < 0.000001);
+ //assert(*toroidalSin / FLT_SIN(*toroidalAngle) - 1 > -0.000001);
+
+ //assert(*toroidalCos / FLT_COS(*toroidalAngle) - 1 < 0.000001);
+ //assert(*toroidalCos / FLT_COS(*toroidalAngle) - 1 > -0.000001);
+
+ // SCORE!! We've got the toroidal angle. We're half done!
+
+ // Okay, what next...? Now, we will need to rotate the torus *again* to make it easy to
+ // figure out the poloidal angle. We should rotate the entire torus by the toroidal angle
+ // so that the point we're focusin on will lie on the x/z plane. We then should translate the
+ // torus so that the center of the poloidal circle is at the origin. At that point, it will
+ // be trivial to determine the poloidal angle-- it will be the angle on the xz plane of a
+ // vector from the origin to the point.
+
+ // okay, instead of rotating the torus & point by the toroidal angle to get the point on
+ // the xz plane, we're going to take advantage of the radial symmetry of the torus
+ // (i.e. it's symmetric about the point we'd want to rotate it, so the rotation wouldn't
+ // change the torus at all). Therefore, we'll leave the torus as is, but we'll rotate the point
+ // This will only impact the x and y coordinates, and we'll use "G" as the postfix to represent
+ // this new coordinate system
+
+ Point pointG;
+ pointG.z = pointF.z;
+ pointG.y = 0;
+ pointG.x = sqrt(SQUARED(pointF.x) + SQUARED(pointF.y));
+
+ // okay, that ended up being easier than I expected. Now that we have the point on the xZ plane,
+ // our next step will be to shift it down so that the center of the poloidal circle is at the origin.
+ // As you may have noticed, y has now gone to zero, and from here on out, we can basically treat
+ // this as a 2D problem. I think we're getting close...
+
+ // I stole these lines from the torus generator. Gonna need the poloidal radius.
+ double distanceBetweenPoints = distance(pna->a, pna->b); // we don't care about the coordinate system of these points because we're just getting distance.
+ double toroidalRadius = distanceBetweenPoints / (2 * pna->tanAngle);
+ double poloidalRadius = sqrt(SQUARED(toroidalRadius) + SQUARED(distanceBetweenPoints / 2));
+
+ // The center of the polidal circle already lies on the z axis at this point, so we won't shift z at all.
+ // The shift along the X axis will be the toroidal radius.
+
+ Point pointH;
+ pointH.z = pointG.z;
+ pointH.y = pointG.y;
+ pointH.x = pointG.x - toroidalRadius;
+
+ // Okay, almost there. If we treat pointH as a vector on the XZ plane, if we get its angle,
+ // that will be the poloidal angle we're looking for. (crosses fingers)
+
+ FLT poloidalHyp = FLT_SQRT(SQUARED(pointH.z) + SQUARED(pointH.x));
+
+ *poloidalSin = pointH.z / poloidalHyp;
+
+
+ *poloidalAngle = atan(pointH.z / pointH.x);
+ if (pointH.x < 0)
+ {
+ *poloidalAngle += M_PI;
+ }
+
+ //assert(*toroidalSin / FLT_SIN(*toroidalAngle) - 1 < 0.000001);
+ //assert(*toroidalSin / FLT_SIN(*toroidalAngle) - 1 > -0.000001);
+
+
+
+ // Wow, that ended up being not so much code, but a lot of interesting trig.
+ // can't remember the last time I spent so much time working through each line of code.
+
+ return;
+}
+
+#define MAX_POINT_PAIRS 100
+
+FLT angleBetweenSensors(TrackedSensor *a, TrackedSensor *b)
+{
+ FLT angle = FLT_ACOS(FLT_COS(a->phi - b->phi)*FLT_COS(a->theta - b->theta));
+ //FLT angle2 = FLT_ACOS(FLT_COS(b->phi - a->phi)*FLT_COS(b->theta - a->theta));
+
+ return angle;
+}
+
+// This provides a pretty good estimate of the angle above, probably better
+// the further away the lighthouse is. But, it's not crazy-precise.
+// It's main advantage is speed.
+FLT pythAngleBetweenSensors2(TrackedSensor *a, TrackedSensor *b)
+{
+ FLT p = (a->phi - b->phi);
+ FLT d = (a->theta - b->theta);
+
+ FLT adjd = FLT_SIN((a->phi + b->phi) / 2);
+ FLT adjP = FLT_SIN((a->theta + b->theta) / 2);
+ FLT pythAngle = sqrt(SQUARED(p*adjP) + SQUARED(d*adjd));
+ return pythAngle;
+}
+
+Point calculateTorusPointFromAngles(PointsAndAngle *pna, FLT toroidalSin, FLT toroidalCos, FLT poloidalAngle, FLT poloidalSin)
+{
+ Point result;
+
+ FLT distanceBetweenPoints = distance(pna->a, pna->b);
+ Point m = midpoint(pna->a, pna->b);
+ Matrix3x3 rot = pna->rotation;
+
+ FLT toroidalRadius = distanceBetweenPoints / (2 * pna->tanAngle);
+ FLT poloidalRadius = FLT_SQRT(SQUARED(toroidalRadius) + SQUARED(distanceBetweenPoints / 2));
+
+ result.x = (toroidalRadius + poloidalRadius*cos(poloidalAngle))*toroidalCos;
+ result.y = (toroidalRadius + poloidalRadius*cos(poloidalAngle))*toroidalSin;
+ result.z = poloidalRadius*poloidalSin;
+ result = RotateAndTranslatePoint(result, rot, m);
+
+ return result;
+}
+
+FLT getPointFitnessForPna(Point pointIn, PointsAndAngle *pna)
+{
+
+ double toroidalSin = 0;
+ double toroidalCos = 0;
+ double poloidalAngle = 0;
+ double poloidalSin = 0;
+
+ estimateToroidalAndPoloidalAngleOfPoint(
+ pna,
+ pointIn,
+ &toroidalSin,
+ &toroidalCos,
+ &poloidalAngle,
+ &poloidalSin);
+
+ Point torusPoint = calculateTorusPointFromAngles(pna, toroidalSin, toroidalCos, poloidalAngle, poloidalSin);
+
+ FLT dist = distance(pointIn, torusPoint);
+
+ // This is some voodoo black magic. This is here to solve the problem that the origin
+ // (which is near the center of all the tori) erroniously will rank as a good match.
+ // through a lot of empiracle testing on how to compensate for this, the "fudge factor"
+ // below ended up being the best fit. As simple as it is, I have a strong suspicion
+ // that there's some crazy complex thesis-level math that could be used to derive this
+ // but it works so we'll run with it.
+ // Note that this may be resulting in a skewing of the found location by several millimeters.
+ // it is not clear if this is actually removing existing skew (to get a more accurate value)
+ // or if it is introducing an undesirable skew.
+ double fudge = FLT_SIN((poloidalAngle - M_PI) / 2);
+ dist = dist / fudge;
+
+ return dist;
+}
+
+FLT getPointFitness(Point pointIn, PointsAndAngle *pna, size_t pnaCount)
+{
+ FLT fitness;
+
+ FLT resultSum = 0;
+
+ for (size_t i = 0; i < pnaCount; i++)
+ {
+ fitness = getPointFitnessForPna(pointIn, &(pna[i]));
+ resultSum += SQUARED(fitness);
+ }
+
+ return 1 / FLT_SQRT(resultSum);
+}
+
+Point getGradient(Point pointIn, PointsAndAngle *pna, size_t pnaCount, FLT precision)
+{
+ Point result;
+
+ Point tmpXplus = pointIn;
+ Point tmpXminus = pointIn;
+ tmpXplus.x = pointIn.x + precision;
+ tmpXminus.x = pointIn.x - precision;
+ result.x = getPointFitness(tmpXplus, pna, pnaCount) - getPointFitness(tmpXminus, pna, pnaCount);
+
+ Point tmpYplus = pointIn;
+ Point tmpYminus = pointIn;
+ tmpYplus.y = pointIn.y + precision;
+ tmpYminus.y = pointIn.y - precision;
+ result.y = getPointFitness(tmpYplus, pna, pnaCount) - getPointFitness(tmpYminus, pna, pnaCount);
+
+ Point tmpZplus = pointIn;
+ Point tmpZminus = pointIn;
+ tmpZplus.z = pointIn.z + precision;
+ tmpZminus.z = pointIn.z - precision;
+ result.z = getPointFitness(tmpZplus, pna, pnaCount) - getPointFitness(tmpZminus, pna, pnaCount);
+
+ return result;
+}
+
+Point getNormalizedVector(Point vectorIn, FLT desiredMagnitude)
+{
+ FLT distanceIn = sqrt(SQUARED(vectorIn.x) + SQUARED(vectorIn.y) + SQUARED(vectorIn.z));
+
+ FLT scale = desiredMagnitude / distanceIn;
+
+ Point result = vectorIn;
+
+ result.x *= scale;
+ result.y *= scale;
+ result.z *= scale;
+
+ return result;
+}
+
+Point getAvgPoints(Point a, Point b)
+{
+ Point result;
+ result.x = (a.x + b.x) / 2;
+ result.y = (a.y + b.y) / 2;
+ result.z = (a.z + b.z) / 2;
+ return result;
+}
+
+
+// This is modifies the basic gradient descent algorithm to better handle the shallow valley case,
+// which appears to be typical of this convergence.
+static Point RefineEstimateUsingModifiedGradientDescent1(Point initialEstimate, PointsAndAngle *pna, size_t pnaCount, FILE *logFile)
+{
+ int i = 0;
+ FLT lastMatchFitness = getPointFitness(initialEstimate, pna, pnaCount);
+ Point lastPoint = initialEstimate;
+
+ // The values below are somewhat magic, and definitely tunable
+ // The initial vlue of g will represent the biggest step that the gradient descent can take at first.
+ // bigger values may be faster, especially when the initial guess is wildly off.
+ // The downside to a bigger starting guess is that if we've picked a good guess at the local minima
+ // if there are other local minima, we may accidentally jump to such a local minima and get stuck there.
+ // That's fairly unlikely with the lighthouse problem, from expereince.
+ // The other downside is that if it's too big, we may have to spend a few iterations before it gets down
+ // to a size that doesn't jump us out of our minima.
+ // The terminal value of g represents how close we want to get to the local minima before we're "done"
+ // The change in value of g for each iteration is intentionally very close to 1.
+ // in fact, it probably could probably be 1 without any issue. The main place where g is decremented
+ // is in the block below when we've made a jump that results in a worse fitness than we're starting at.
+ // In those cases, we don't take the jump, and instead lower the value of g and try again.
+ for (FLT g = 0.2; g > 0.00001; g *= 0.99)
+ {
+ i++;
+ Point point1 = lastPoint;
+ // let's get 3 iterations of gradient descent here.
+ Point gradient1 = getGradient(point1, pna, pnaCount, g / 1000 /*somewhat arbitrary*/);
+ Point gradientN1 = getNormalizedVector(gradient1, g);
+
+ Point point2;
+ point2.x = point1.x + gradientN1.x;
+ point2.y = point1.y + gradientN1.y;
+ point2.z = point1.z + gradientN1.z;
+
+ Point gradient2 = getGradient(point2, pna, pnaCount, g / 1000 /*somewhat arbitrary*/);
+ Point gradientN2 = getNormalizedVector(gradient2, g);
+
+ Point point3;
+ point3.x = point2.x + gradientN2.x;
+ point3.y = point2.y + gradientN2.y;
+ point3.z = point2.z + gradientN2.z;
+
+ // remember that gradient descent has a tendency to zig-zag when it encounters a narrow valley?
+ // Well, solving the lighthouse problem presents a very narrow valley, and the zig-zag of a basic
+ // gradient descent is kinda horrible here. Instead, think about the shape that a zig-zagging
+ // converging gradient descent makes. Instead of using the gradient as the best indicator of
+ // the direction we should follow, we're looking at one side of the zig-zag pattern, and specifically
+ // following *that* vector. As it turns out, this works *amazingly* well.
+
+ Point specialGradient = { .x = point3.x - point1.x,.y = point3.y - point1.y,.z = point3.y - point1.y };
+
+ // The second parameter to this function is very much a tunable parameter. Different values will result
+ // in a different number of iterations before we get to the minimum. Numbers between 3-10 seem to work well
+ // It's not clear what would be optimum here.
+ specialGradient = getNormalizedVector(specialGradient, g / 4);
+
+ Point point4;
+
+ point4.x = point3.x + specialGradient.x;
+ point4.y = point3.y + specialGradient.y;
+ point4.z = point3.z + specialGradient.z;
+
+ FLT newMatchFitness = getPointFitness(point4, pna, pnaCount);
+
+ if (newMatchFitness > lastMatchFitness)
+ {
+ if (logFile)
+ {
+ writePoint(logFile, lastPoint.x, lastPoint.y, lastPoint.z, 0xFFFFFF);
+ }
+
+ lastMatchFitness = newMatchFitness;
+ lastPoint = point4;
+#ifdef TORI_DEBUG
+ printf("+");
+#endif
+ }
+ else
+ {
+#ifdef TORI_DEBUG
+ printf("-");
+#endif
+ g *= 0.7;
+
+ }
+
+
+ }
+ printf("\ni=%d\n", i);
+
+ return lastPoint;
+}
+
+
+// interesting-- this is one place where we could use any sensors that are only hit by
+// just an x or y axis to make our estimate better. TODO: bring that data to this fn.
+FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj)
+{
+ for (size_t i = 0; i < obj->numSensors; i++)
+ {
+ // first, get the normal of the plane for the horizonal sweep
+ FLT theta = obj->sensor[i].theta;
+ // make two vectors that lie on the plane
+ FLT t1[3] = { 1, tan(theta), 0 };
+ FLT t2[3] = { 1, tan(theta), 1 };
+
+ FLT tNorm[3];
+
+ // the normal is the cross of two vectors on the plane.
+ cross3d(tNorm, t1, t2);
+
+ // distance for this plane is d= fabs(A*x + B*y)/sqrt(A^2+B^2) (z term goes away since this plane is "vertical")
+ // where A is
+ //FLT d =
+ }
+}
+
+static Point RefineRotationEstimate(Point initialEstimate, PointsAndAngle *pna, size_t pnaCount, FILE *logFile)
+{
+ int i = 0;
+ FLT lastMatchFitness = getPointFitness(initialEstimate, pna, pnaCount);
+ Point lastPoint = initialEstimate;
+
+ // The values below are somewhat magic, and definitely tunable
+ // The initial vlue of g will represent the biggest step that the gradient descent can take at first.
+ // bigger values may be faster, especially when the initial guess is wildly off.
+ // The downside to a bigger starting guess is that if we've picked a good guess at the local minima
+ // if there are other local minima, we may accidentally jump to such a local minima and get stuck there.
+ // That's fairly unlikely with the lighthouse problem, from expereince.
+ // The other downside is that if it's too big, we may have to spend a few iterations before it gets down
+ // to a size that doesn't jump us out of our minima.
+ // The terminal value of g represents how close we want to get to the local minima before we're "done"
+ // The change in value of g for each iteration is intentionally very close to 1.
+ // in fact, it probably could probably be 1 without any issue. The main place where g is decremented
+ // is in the block below when we've made a jump that results in a worse fitness than we're starting at.
+ // In those cases, we don't take the jump, and instead lower the value of g and try again.
+ for (FLT g = 0.2; g > 0.00001; g *= 0.99)
+ {
+ i++;
+ Point point1 = lastPoint;
+ // let's get 3 iterations of gradient descent here.
+ Point gradient1 = getGradient(point1, pna, pnaCount, g / 1000 /*somewhat arbitrary*/);
+ Point gradientN1 = getNormalizedVector(gradient1, g);
+
+ Point point2;
+ point2.x = point1.x + gradientN1.x;
+ point2.y = point1.y + gradientN1.y;
+ point2.z = point1.z + gradientN1.z;
+
+ Point gradient2 = getGradient(point2, pna, pnaCount, g / 1000 /*somewhat arbitrary*/);
+ Point gradientN2 = getNormalizedVector(gradient2, g);
+
+ Point point3;
+ point3.x = point2.x + gradientN2.x;
+ point3.y = point2.y + gradientN2.y;
+ point3.z = point2.z + gradientN2.z;
+
+ // remember that gradient descent has a tendency to zig-zag when it encounters a narrow valley?
+ // Well, solving the lighthouse problem presents a very narrow valley, and the zig-zag of a basic
+ // gradient descent is kinda horrible here. Instead, think about the shape that a zig-zagging
+ // converging gradient descent makes. Instead of using the gradient as the best indicator of
+ // the direction we should follow, we're looking at one side of the zig-zag pattern, and specifically
+ // following *that* vector. As it turns out, this works *amazingly* well.
+
+ Point specialGradient = { .x = point3.x - point1.x,.y = point3.y - point1.y,.z = point3.y - point1.y };
+
+ // The second parameter to this function is very much a tunable parameter. Different values will result
+ // in a different number of iterations before we get to the minimum. Numbers between 3-10 seem to work well
+ // It's not clear what would be optimum here.
+ specialGradient = getNormalizedVector(specialGradient, g / 4);
+
+ Point point4;
+
+ point4.x = point3.x + specialGradient.x;
+ point4.y = point3.y + specialGradient.y;
+ point4.z = point3.z + specialGradient.z;
+
+ FLT newMatchFitness = getPointFitness(point4, pna, pnaCount);
+
+ if (newMatchFitness > lastMatchFitness)
+ {
+ if (logFile)
+ {
+ writePoint(logFile, lastPoint.x, lastPoint.y, lastPoint.z, 0xFFFFFF);
+ }
+
+ lastMatchFitness = newMatchFitness;
+ lastPoint = point4;
+#ifdef TORI_DEBUG
+ printf("+");
+#endif
+ }
+ else
+ {
+#ifdef TORI_DEBUG
+ printf("-");
+#endif
+ g *= 0.7;
+
+ }
+
+
+ }
+ printf("\ni=%d\n", i);
+
+ return lastPoint;
+}
+
+void SolveForRotation(FLT rotOut[4], TrackedObject *obj, Point lh)
+{
+
+ // Step 1, create initial quaternion for guess.
+ // This should have the lighthouse directly facing the tracked object.
+ Point trackedObjRelativeToLh = { .x = -lh.x,.y = -lh.y,.z = -lh.z };
+ FLT theta = atan2(-lh.x, -lh.y);
+ FLT zAxis[3] = { 0, 0, 1 };
+ FLT quat1[4];
+ quatfromaxisangle(quat1, zAxis, theta);
+ // not correcting for phi, but that's less important.
+
+ // Step 2, optimize the quaternion to match the data.
+
+}
+
+
+Point SolveForLighthouse(TrackedObject *obj, char doLogOutput)
+{
+ PointsAndAngle pna[MAX_POINT_PAIRS];
+
+ volatile size_t sizeNeeded = sizeof(pna);
+
+ Point avgNorm = { 0 };
+
+ size_t pnaCount = 0;
+ for (unsigned int i = 0; i < obj->numSensors; i++)
+ {
+ for (unsigned int j = 0; j < i; j++)
+ {
+ if (pnaCount < MAX_POINT_PAIRS)
+ {
+ pna[pnaCount].a = obj->sensor[i].point;
+ pna[pnaCount].b = obj->sensor[j].point;
+
+ pna[pnaCount].angle = angleBetweenSensors(&obj->sensor[i], &obj->sensor[j]);
+ //pna[pnaCount].angle = pythAngleBetweenSensors2(&obj->sensor[i], &obj->sensor[j]);
+ pna[pnaCount].tanAngle = FLT_TAN(pna[pnaCount].angle);
+
+ double pythAngle = sqrt(SQUARED(obj->sensor[i].phi - obj->sensor[j].phi) + SQUARED(obj->sensor[i].theta - obj->sensor[j].theta));
+
+ pna[pnaCount].rotation = GetRotationMatrixForTorus(pna[pnaCount].a, pna[pnaCount].b);
+ pna[pnaCount].invRotation = inverseM33(pna[pnaCount].rotation);
+
+
+ pnaCount++;
+ }
+ }
+
+ avgNorm.x += obj->sensor[i].normal.x;
+ avgNorm.y += obj->sensor[i].normal.y;
+ avgNorm.z += obj->sensor[i].normal.z;
+ }
+ avgNorm.x = avgNorm.x / obj->numSensors;
+ avgNorm.y = avgNorm.y / obj->numSensors;
+ avgNorm.z = avgNorm.z / obj->numSensors;
+
+ FLT avgNormF[3] = { avgNorm.x, avgNorm.y, avgNorm.z };
+
+
+ FILE *logFile = NULL;
+ if (doLogOutput)
+ {
+ logFile = fopen("pointcloud2.pcd", "wb");
+ writePcdHeader(logFile);
+ writeAxes(logFile);
+ }
+
+
+ // Point refinedEstimageGd = RefineEstimateUsingModifiedGradientDescent1(initialEstimate, pna, pnaCount, logFile);
+
+
+ // arbitrarily picking a value of 8 meters out to start from.
+ // intentionally picking the direction of the average normal vector of the sensors that see the lighthouse
+ // since this is least likely to pick the incorrect "mirror" point that would send us
+ // back into the search for the correct point (see "if (a1 > M_PI / 2)" below)
+ Point p1 = getNormalizedVector(avgNorm, 8);
+
+ Point refinedEstimateGd = RefineEstimateUsingModifiedGradientDescent1(p1, pna, pnaCount, logFile);
+
+ FLT pf1[3] = { refinedEstimateGd.x, refinedEstimateGd.y, refinedEstimateGd.z };
+
+ FLT a1 = anglebetween3d(pf1, avgNormF);
+
+ if (a1 > M_PI / 2)
+ {
+ Point p2 = { .x = -refinedEstimateGd.x,.y = -refinedEstimateGd.y,.z = -refinedEstimateGd.z };
+ refinedEstimateGd = RefineEstimateUsingModifiedGradientDescent1(p2, pna, pnaCount, logFile);
+
+ //FLT pf2[3] = { refinedEstimageGd2.x, refinedEstimageGd2.y, refinedEstimageGd2.z };
+
+ //FLT a2 = anglebetween3d(pf2, avgNormF);
+
+ }
+
+ FLT fitGd = getPointFitness(refinedEstimateGd, pna, pnaCount);
+
+ FLT distance = FLT_SQRT(SQUARED(refinedEstimateGd.x) + SQUARED(refinedEstimateGd.y) + SQUARED(refinedEstimateGd.z));
+ printf("(%4.4f, %4.4f, %4.4f)\n", refinedEstimateGd.x, refinedEstimateGd.y, refinedEstimateGd.z);
+ printf("Distance is %f, Fitness is %f\n", distance, fitGd);
+
+ if (logFile)
+ {
+ updateHeader(logFile);
+ fclose(logFile);
+ }
+ //fgetc(stdin);
+ return refinedEstimateGd;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+int PoserTurveyTori( SurviveObject * so, PoserData * pd )
+{
+ PoserType pt = pd->pt;
+ SurviveContext * ctx = so->ctx;
+ ToriData * dd = so->PoserData;
+
+ if (!dd)
+ {
+ so->PoserData = dd = malloc(sizeof(ToriData));
+ memset(dd, 0, sizeof(ToriData));
+ }
+
+ switch( pt )
+ {
+ case POSERDATA_IMU:
+ {
+ PoserDataIMU * imu = (PoserDataIMU*)pd;
+ //printf( "IMU:%s (%f %f %f) (%f %f %f)\n", so->codename, imu->accel[0], imu->accel[1], imu->accel[2], imu->gyro[0], imu->gyro[1], imu->gyro[2] );
+ break;
+ }
+ case POSERDATA_LIGHT:
+ {
+ PoserDataLight * l = (PoserDataLight*)pd;
+ //printf( "LIG:%s %d @ %f rad, %f s (AC %d) (TC %d)\n", so->codename, l->sensor_id, l->angle, l->length, l->acode, l->timecode );
+ break;
+ }
+ case POSERDATA_FULL_SCENE:
+ {
+ TrackedObject *to;
+
+ PoserDataFullScene * fs = (PoserDataFullScene*)pd;
+
+ to = malloc(sizeof(TrackedObject) + (SENSORS_PER_OBJECT * sizeof(TrackedSensor)));
+
+ //FLT lengths[SENSORS_PER_OBJECT][NUM_LIGHTHOUSES][2];
+ //FLT angles[SENSORS_PER_OBJECT][NUM_LIGHTHOUSES][2]; //2 Axes (Angles in LH space)
+ //FLT synctimes[SENSORS_PER_OBJECT][NUM_LIGHTHOUSES];
+
+ //to->numSensors = so->nr_locations;
+ {
+ int sensorCount = 0;
+
+ for (int i = 0; i < so->nr_locations; i++)
+ {
+ if (fs->lengths[i][0][0] != -1 && fs->lengths[i][0][1] != -1) //lh 0
+ {
+ to->sensor[sensorCount].normal.x = so->sensor_normals[i * 3 + 0];
+ to->sensor[sensorCount].normal.y = so->sensor_normals[i * 3 + 1];
+ to->sensor[sensorCount].normal.z = so->sensor_normals[i * 3 + 2];
+ to->sensor[sensorCount].point.x = so->sensor_locations[i * 3 + 0];
+ to->sensor[sensorCount].point.y = so->sensor_locations[i * 3 + 1];
+ to->sensor[sensorCount].point.z = so->sensor_locations[i * 3 + 2];
+ to->sensor[sensorCount].theta = fs->angles[i][0][0] + LINMATHPI / 2; // lighthouse 0, angle 0 (horizontal)
+ to->sensor[sensorCount].phi = fs->angles[i][0][1] + LINMATHPI / 2; // lighthosue 0, angle 1 (vertical)
+ sensorCount++;
+ }
+ }
+
+ to->numSensors = sensorCount;
+
+ SolveForLighthouse(to, 0);
+ }
+ {
+ int sensorCount = 0;
+ int lh = 1;
+
+ for (int i = 0; i < so->nr_locations; i++)
+ {
+ if (fs->lengths[i][lh][0] != -1 && fs->lengths[i][lh][1] != -1)
+ {
+ to->sensor[sensorCount].normal.x = so->sensor_normals[i * 3 + 0];
+ to->sensor[sensorCount].normal.y = so->sensor_normals[i * 3 + 1];
+ to->sensor[sensorCount].normal.z = so->sensor_normals[i * 3 + 2];
+ to->sensor[sensorCount].point.x = so->sensor_locations[i * 3 + 0];
+ to->sensor[sensorCount].point.y = so->sensor_locations[i * 3 + 1];
+ to->sensor[sensorCount].point.z = so->sensor_locations[i * 3 + 2];
+ to->sensor[sensorCount].theta = fs->angles[i][lh][0] + LINMATHPI / 2; // lighthouse 0, angle 0 (horizontal)
+ to->sensor[sensorCount].phi = fs->angles[i][lh][1] + LINMATHPI / 2; // lighthosue 0, angle 1 (vertical)
+ sensorCount++;
+ }
+ }
+
+ to->numSensors = sensorCount;
+
+ SolveForLighthouse(to, 0);
+ }
+ //printf( "Full scene data.\n" );
+ break;
+ }
+ case POSERDATA_DISASSOCIATE:
+ {
+ free( dd );
+ so->PoserData = 0;
+ //printf( "Need to disassociate.\n" );
+ break;
+ }
+ }
+ return 0;
+}
+
+
+REGISTER_LINKTIME( PoserTurveyTori );
+
diff --git a/winbuild/libsurvive/libsurvive.vcxproj b/winbuild/libsurvive/libsurvive.vcxproj
index 643cff5..c794382 100644
--- a/winbuild/libsurvive/libsurvive.vcxproj
+++ b/winbuild/libsurvive/libsurvive.vcxproj
@@ -153,6 +153,8 @@
<ClCompile Include="..\..\src\poser_charlesslow.c" />
<ClCompile Include="..\..\src\poser_daveortho.c" />
<ClCompile Include="..\..\src\poser_dummy.c" />
+ <ClCompile Include="..\..\src\poser_octavioradii.c" />
+ <ClCompile Include="..\..\src\poser_turveytori.c" />
<ClCompile Include="..\..\src\survive.c" />
<ClCompile Include="..\..\src\survive_cal.c" />
<ClCompile Include="..\..\src\survive_config.c" />
diff --git a/winbuild/libsurvive/libsurvive.vcxproj.filters b/winbuild/libsurvive/libsurvive.vcxproj.filters
index 0bb9d1b..e7d44e2 100644
--- a/winbuild/libsurvive/libsurvive.vcxproj.filters
+++ b/winbuild/libsurvive/libsurvive.vcxproj.filters
@@ -84,6 +84,12 @@
<ClCompile Include="..\..\redist\CNFGWinDriver.c">
<Filter>Source Files</Filter>
</ClCompile>
+ <ClCompile Include="..\..\src\poser_turveytori.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\src\poser_octavioradii.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\..\src\ootx_decoder.h">