aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormwturvey <michael.w.turvey@intel.com>2017-03-27 12:48:40 -0700
committermwturvey <michael.w.turvey@intel.com>2017-03-27 12:48:40 -0700
commitb9e9c89a46a91cbc69d7832d6f67e571723d11e6 (patch)
tree7818492fa84148f0e5fcbe336c245d4aab34199f
parentf923e3c5ad03e7942eb46b67666807b738a1428a (diff)
downloadlibsurvive-b9e9c89a46a91cbc69d7832d6f67e571723d11e6.tar.gz
libsurvive-b9e9c89a46a91cbc69d7832d6f67e571723d11e6.tar.bz2
Tori puzzle pieces in place, rotation not converging
-rw-r--r--src/poser_turveytori.c197
1 files changed, 149 insertions, 48 deletions
diff --git a/src/poser_turveytori.c b/src/poser_turveytori.c
index 37f79bb..15961c8 100644
--- a/src/poser_turveytori.c
+++ b/src/poser_turveytori.c
@@ -412,7 +412,7 @@ Point getGradient(Point pointIn, PointsAndAngle *pna, size_t pnaCount, FLT preci
return result;
}
-Point getNormalizedVector(Point vectorIn, FLT desiredMagnitude)
+Point getNormalizedAndScaledVector(Point vectorIn, FLT desiredMagnitude)
{
FLT distanceIn = sqrt(SQUARED(vectorIn.x) + SQUARED(vectorIn.y) + SQUARED(vectorIn.z));
@@ -464,7 +464,7 @@ static Point RefineEstimateUsingModifiedGradientDescent1(Point initialEstimate,
Point point1 = lastPoint;
// let's get 3 iterations of gradient descent here.
Point gradient1 = getGradient(point1, pna, pnaCount, g / 1000 /*somewhat arbitrary*/);
- Point gradientN1 = getNormalizedVector(gradient1, g);
+ Point gradientN1 = getNormalizedAndScaledVector(gradient1, g);
Point point2;
point2.x = point1.x + gradientN1.x;
@@ -472,7 +472,7 @@ static Point RefineEstimateUsingModifiedGradientDescent1(Point initialEstimate,
point2.z = point1.z + gradientN1.z;
Point gradient2 = getGradient(point2, pna, pnaCount, g / 1000 /*somewhat arbitrary*/);
- Point gradientN2 = getNormalizedVector(gradient2, g);
+ Point gradientN2 = getNormalizedAndScaledVector(gradient2, g);
Point point3;
point3.x = point2.x + gradientN2.x;
@@ -491,7 +491,7 @@ static Point RefineEstimateUsingModifiedGradientDescent1(Point initialEstimate,
// The second parameter to this function is very much a tunable parameter. Different values will result
// in a different number of iterations before we get to the minimum. Numbers between 3-10 seem to work well
// It's not clear what would be optimum here.
- specialGradient = getNormalizedVector(specialGradient, g / 4);
+ specialGradient = getNormalizedAndScaledVector(specialGradient, g / 4);
Point point4;
@@ -533,6 +533,75 @@ static Point RefineEstimateUsingModifiedGradientDescent1(Point initialEstimate,
// interesting-- this is one place where we could use any sensors that are only hit by
// just an x or y axis to make our estimate better. TODO: bring that data to this fn.
+FLT RotationEstimateFitnessOld(Point lhPoint, FLT *quaternion, TrackedObject *obj)
+{
+ FLT fitness = 0;
+ for (size_t i = 0; i < obj->numSensors; i++)
+ {
+ // first, get the normal of the plane for the horizonal sweep
+ FLT theta = obj->sensor[i].theta;
+ // make two vectors that lie on the plane
+ FLT t1H[3] = { 1, tan(theta-LINMATHPI/2), 0 };
+ FLT t2H[3] = { 1, tan(theta-LINMATHPI/2), 1 };
+
+ FLT tNormH[3];
+
+ // the normal is the cross of two vectors on the plane.
+ cross3d(tNormH, t1H, t2H);
+
+ normalize3d(tNormH, tNormH);
+
+ // Now do the same for the vertical sweep
+
+ // first, get the normal of the plane for the horizonal sweep
+ FLT phi = obj->sensor[i].phi;
+ // make two vectors that lie on the plane
+ FLT t1V[3] = { 0, 1, tan(phi-LINMATHPI/2)};
+ FLT t2V[3] = { 1, 1, tan(phi-LINMATHPI/2)};
+
+ FLT tNormV[3];
+
+ // the normal is the cross of two vectors on the plane.
+ cross3d(tNormV, t1V, t2V);
+
+ normalize3d(tNormV, tNormV);
+
+
+ // First, where is the sensor in the object's reference frame?
+ FLT sensor_in_obj_reference_frame[3] = {obj->sensor->point.x, obj->sensor->point.y, obj->sensor->point.z};
+ // Where is the point, in the reference frame of the lighthouse?
+ // This has two steps, first we translate from the object's location being the
+ // origin to the lighthouse being the origin.
+ // And second, we apply the quaternion to rotate into the proper reference frame for the lighthouse.
+
+ FLT sensor_in_lh_reference_frame[3];
+ sub3d(sensor_in_lh_reference_frame, sensor_in_obj_reference_frame, (FLT[3]){lhPoint.x, lhPoint.y, lhPoint.z});
+
+ quatrotatevector(sensor_in_lh_reference_frame, quaternion, sensor_in_lh_reference_frame);
+
+ // now the we've got the location of the sensor in the lighthouses's reference frame, given lhPoint and quaternion inputs.
+
+ // We need an arbitrary vector from the plane to the point.
+ // Since the plane goes through the origin, this is trivial.
+ // The sensor point itself is such a vector!
+
+ // And go calculate the distances!
+ // TODO: don't need to ABS these because we square them below.
+ FLT dH = FLT_FABS(dot3d(sensor_in_lh_reference_frame, tNormH));
+ FLT dV = FLT_FABS(dot3d(sensor_in_lh_reference_frame, tNormV));
+
+
+ fitness += SQUARED(dH);
+ fitness += SQUARED(dV);
+ }
+
+ fitness = FLT_SQRT(fitness);
+
+ return fitness;
+}
+
+// interesting-- this is one place where we could use any sensors that are only hit by
+// just an x or y axis to make our estimate better. TODO: bring that data to this fn.
FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj)
{
FLT fitness = 0;
@@ -541,8 +610,8 @@ FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj)
// first, get the normal of the plane for the horizonal sweep
FLT theta = obj->sensor[i].theta;
// make two vectors that lie on the plane
- FLT t1H[3] = { 1, tan(theta), 0 };
- FLT t2H[3] = { 1, tan(theta), 1 };
+ FLT t1H[3] = { 1, tan(theta-LINMATHPI/2), 0 };
+ FLT t2H[3] = { 1, tan(theta-LINMATHPI/2), 1 };
FLT tNormH[3];
@@ -556,8 +625,8 @@ FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj)
// first, get the normal of the plane for the horizonal sweep
FLT phi = obj->sensor[i].phi;
// make two vectors that lie on the plane
- FLT t1V[3] = { 0, 1, tan(phi)};
- FLT t2V[3] = { 1, 1, tan(phi)};
+ FLT t1V[3] = { 0, 1, tan(phi-LINMATHPI/2)};
+ FLT t2V[3] = { 1, 1, tan(phi-LINMATHPI/2)};
FLT tNormV[3];
@@ -600,11 +669,43 @@ FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj)
return fitness;
}
-static Point RefineRotationEstimate(Point initialEstimate, PointsAndAngle *pna, size_t pnaCount, FILE *logFile)
+void getRotationGradient(FLT *gradientOut, Point lhPoint, FLT *quaternion, TrackedObject *obj, FLT precision)
+{
+
+ FLT baseFitness = RotationEstimateFitness(lhPoint, quaternion, obj);
+
+ FLT tmp0plus[4];
+ quatadd(tmp0plus, quaternion, (FLT[4]){precision, 0, 0, 0});
+ gradientOut[0] = RotationEstimateFitness(lhPoint, tmp0plus, obj) - baseFitness;
+
+ FLT tmp1plus[4];
+ quatadd(tmp1plus, quaternion, (FLT[4]){0, precision, 0, 0});
+ gradientOut[1] = RotationEstimateFitness(lhPoint, tmp1plus, obj) - baseFitness;
+
+ FLT tmp2plus[4];
+ quatadd(tmp2plus, quaternion, (FLT[4]){0, 0, precision, 0});
+ gradientOut[2] = RotationEstimateFitness(lhPoint, tmp2plus, obj) - baseFitness;
+
+ FLT tmp3plus[4];
+ quatadd(tmp3plus, quaternion, (FLT[4]){0, 0, 0, precision});
+ gradientOut[3] = RotationEstimateFitness(lhPoint, tmp3plus, obj) - baseFitness;
+
+ return;
+}
+
+void getNormalizedAndScaledRotationGradient(FLT *vectorToScale, FLT desiredMagnitude)
+{
+ quatnormalize(vectorToScale, vectorToScale);
+ quatscale(vectorToScale, vectorToScale, desiredMagnitude);
+ return;
+}
+
+static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstimate, TrackedObject *obj)
{
int i = 0;
- FLT lastMatchFitness = getPointFitness(initialEstimate, pna, pnaCount);
- Point lastPoint = initialEstimate;
+ FLT lastMatchFitness = RotationEstimateFitness(lhPoint, initialEstimate, obj);
+
+ quatcopy(rotOut, initialEstimate);
// The values below are somewhat magic, and definitely tunable
// The initial vlue of g will represent the biggest step that the gradient descent can take at first.
@@ -622,23 +723,25 @@ static Point RefineRotationEstimate(Point initialEstimate, PointsAndAngle *pna,
for (FLT g = 0.2; g > 0.00001; g *= 0.99)
{
i++;
- Point point1 = lastPoint;
+ FLT point1[3];
+ copy3d(point1, rotOut);
// let's get 3 iterations of gradient descent here.
- Point gradient1 = getGradient(point1, pna, pnaCount, g / 1000 /*somewhat arbitrary*/);
- Point gradientN1 = getNormalizedVector(gradient1, g);
+ FLT gradient1[4];
+
+ getRotationGradient(gradient1, lhPoint, point1, obj, g/1000);
+ getNormalizedAndScaledRotationGradient(gradient1,g);
- Point point2;
- point2.x = point1.x + gradientN1.x;
- point2.y = point1.y + gradientN1.y;
- point2.z = point1.z + gradientN1.z;
+ FLT point2[4];
+ quatadd(point2, gradient1, point1);
+ quatnormalize(point2,point2);
- Point gradient2 = getGradient(point2, pna, pnaCount, g / 1000 /*somewhat arbitrary*/);
- Point gradientN2 = getNormalizedVector(gradient2, g);
+ FLT gradient2[4];
+ getRotationGradient(gradient2, lhPoint, point2, obj, g/1000);
+ getNormalizedAndScaledRotationGradient(gradient2,g);
- Point point3;
- point3.x = point2.x + gradientN2.x;
- point3.y = point2.y + gradientN2.y;
- point3.z = point2.z + gradientN2.z;
+ FLT point3[4];
+ quatadd(point3, gradient2, point2);
+ quatnormalize(point3,point3);
// remember that gradient descent has a tendency to zig-zag when it encounters a narrow valley?
// Well, solving the lighthouse problem presents a very narrow valley, and the zig-zag of a basic
@@ -647,48 +750,41 @@ static Point RefineRotationEstimate(Point initialEstimate, PointsAndAngle *pna,
// the direction we should follow, we're looking at one side of the zig-zag pattern, and specifically
// following *that* vector. As it turns out, this works *amazingly* well.
- Point specialGradient = { .x = point3.x - point1.x,.y = point3.y - point1.y,.z = point3.y - point1.y };
+ FLT specialGradient[4];
+ quatsub(specialGradient,point3,point1);
// The second parameter to this function is very much a tunable parameter. Different values will result
// in a different number of iterations before we get to the minimum. Numbers between 3-10 seem to work well
// It's not clear what would be optimum here.
- specialGradient = getNormalizedVector(specialGradient, g / 4);
+ getNormalizedAndScaledRotationGradient(specialGradient,g/4);
- Point point4;
+ FLT point4[4];
+ quatadd(point4, specialGradient, point3);
+ quatnormalize(point4,point4);
- point4.x = point3.x + specialGradient.x;
- point4.y = point3.y + specialGradient.y;
- point4.z = point3.z + specialGradient.z;
-
- FLT newMatchFitness = getPointFitness(point4, pna, pnaCount);
+ FLT newMatchFitness = RotationEstimateFitness(lhPoint, point4, obj);
if (newMatchFitness > lastMatchFitness)
{
- if (logFile)
- {
- writePoint(logFile, lastPoint.x, lastPoint.y, lastPoint.z, 0xFFFFFF);
- }
lastMatchFitness = newMatchFitness;
- lastPoint = point4;
-#ifdef TORI_DEBUG
+ quatcopy(rotOut, point4);
+//#ifdef TORI_DEBUG
printf("+");
-#endif
+//#endif
}
else
{
-#ifdef TORI_DEBUG
+//#ifdef TORI_DEBUG
printf("-");
-#endif
+//#endif
g *= 0.7;
}
}
- printf("\ni=%d\n", i);
-
- return lastPoint;
+ printf("\nRi=%d\n", i);
}
void SolveForRotation(FLT rotOut[4], TrackedObject *obj, Point lh)
@@ -698,12 +794,14 @@ void SolveForRotation(FLT rotOut[4], TrackedObject *obj, Point lh)
// This should have the lighthouse directly facing the tracked object.
Point trackedObjRelativeToLh = { .x = -lh.x,.y = -lh.y,.z = -lh.z };
FLT theta = atan2(-lh.x, -lh.y);
- FLT zAxis[3] = { 0, 0, 1 };
- FLT quat1[4];
- quatfromaxisangle(quat1, zAxis, theta);
+ FLT zAxis[4] = { 0, 0, 1 ,0};
+ //FLT quat1[4];
+ //quatfromaxisangle(quat1, zAxis, theta);
// not correcting for phi, but that's less important.
+
// Step 2, optimize the quaternion to match the data.
+ RefineRotationEstimate(rotOut, lh, zAxis, obj);
}
@@ -767,7 +865,7 @@ Point SolveForLighthouse(TrackedObject *obj, char doLogOutput)
// intentionally picking the direction of the average normal vector of the sensors that see the lighthouse
// since this is least likely to pick the incorrect "mirror" point that would send us
// back into the search for the correct point (see "if (a1 > M_PI / 2)" below)
- Point p1 = getNormalizedVector(avgNorm, 8);
+ Point p1 = getNormalizedAndScaledVector(avgNorm, 8);
Point refinedEstimateGd = RefineEstimateUsingModifiedGradientDescent1(p1, pna, pnaCount, logFile);
@@ -792,6 +890,9 @@ Point SolveForLighthouse(TrackedObject *obj, char doLogOutput)
printf("(%4.4f, %4.4f, %4.4f)\n", refinedEstimateGd.x, refinedEstimateGd.y, refinedEstimateGd.z);
printf("Distance is %f, Fitness is %f\n", distance, fitGd);
+ FLT rot[4];
+ SolveForRotation(rot, obj, refinedEstimateGd);
+
if (logFile)
{
updateHeader(logFile);