From ef039bb3ee8ce95f25e86baddbc4b3d5f21e6743 Mon Sep 17 00:00:00 2001 From: mwturvey Date: Mon, 27 Mar 2017 10:20:05 -0700 Subject: Fixing compiler warnings --- src/poser_octavioradii.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) (limited to 'src') diff --git a/src/poser_octavioradii.c b/src/poser_octavioradii.c index 18d4026..3893085 100644 --- a/src/poser_octavioradii.c +++ b/src/poser_octavioradii.c @@ -24,14 +24,11 @@ FLT hmd_norms[PTS * 3]; FLT hmd_point_angles[PTS * 2]; int hmd_point_counts[PTS * 2]; int best_hmd_target = 0; -int LoadData(char Camera, const char * FileData); //Values used for RunTest() FLT LighthousePos[3] = { 0, 0, 0 }; FLT LighthouseQuat[4] = { 1, 0, 0, 0 }; -FLT RunTest(int print); -void PrintOpti(); #define MAX_POINT_PAIRS 100 @@ -410,9 +407,9 @@ void SolveForLighthouseRadii(Point *objPosition, FLT *objOrientation, TrackedObj angles[i].VertAngle = obj->sensor[i].phi; } - for (size_t i = 0; i < obj->numSensors - 1; i++) + for (unsigned char i = 0; i < obj->numSensors - 1; i++) { - for (size_t j = i + 1; j < obj->numSensors; j++) + for (unsigned char j = i + 1; j < obj->numSensors; j++) { pairs[pairCount].index1 = i; pairs[pairCount].index2 = j; @@ -426,7 +423,7 @@ void SolveForLighthouseRadii(Point *objPosition, FLT *objOrientation, TrackedObj // we should now have an estimate of the radii. - for (size_t i = 0; i < obj->numSensors; i++) + for (int i = 0; i < obj->numSensors; i++) { printf("radius[%d]: %f\n", i, estimate[i]); } @@ -494,7 +491,7 @@ int PoserOctavioRadii( SurviveObject * so, PoserData * pd ) Point position; FLT orientation[4]; - SolveForLighthouseRadii(&position, &orientation, to); + SolveForLighthouseRadii(&position, orientation, to); } { int sensorCount = 0; @@ -521,7 +518,7 @@ int PoserOctavioRadii( SurviveObject * so, PoserData * pd ) Point position; FLT orientation[4]; - SolveForLighthouseRadii(&position, &orientation, to); + SolveForLighthouseRadii(&position, orientation, to); } //printf( "Full scene data.\n" ); break; -- cgit v1.2.3 From f923e3c5ad03e7942eb46b67666807b738a1428a Mon Sep 17 00:00:00 2001 From: mwturvey Date: Mon, 27 Mar 2017 10:59:26 -0700 Subject: completed fitness func for tori poser rotation --- src/poser_turveytori.c | 60 ++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 7 deletions(-) (limited to 'src') diff --git a/src/poser_turveytori.c b/src/poser_turveytori.c index 4e602f3..37f79bb 100644 --- a/src/poser_turveytori.c +++ b/src/poser_turveytori.c @@ -535,23 +535,69 @@ static Point RefineEstimateUsingModifiedGradientDescent1(Point initialEstimate, // just an x or y axis to make our estimate better. TODO: bring that data to this fn. FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj) { + FLT fitness = 0; for (size_t i = 0; i < obj->numSensors; i++) { // first, get the normal of the plane for the horizonal sweep FLT theta = obj->sensor[i].theta; // make two vectors that lie on the plane - FLT t1[3] = { 1, tan(theta), 0 }; - FLT t2[3] = { 1, tan(theta), 1 }; + FLT t1H[3] = { 1, tan(theta), 0 }; + FLT t2H[3] = { 1, tan(theta), 1 }; - FLT tNorm[3]; + FLT tNormH[3]; // the normal is the cross of two vectors on the plane. - cross3d(tNorm, t1, t2); + cross3d(tNormH, t1H, t2H); - // distance for this plane is d= fabs(A*x + B*y)/sqrt(A^2+B^2) (z term goes away since this plane is "vertical") - // where A is - //FLT d = + normalize3d(tNormH, tNormH); + + // Now do the same for the vertical sweep + + // first, get the normal of the plane for the horizonal sweep + FLT phi = obj->sensor[i].phi; + // make two vectors that lie on the plane + FLT t1V[3] = { 0, 1, tan(phi)}; + FLT t2V[3] = { 1, 1, tan(phi)}; + + FLT tNormV[3]; + + // the normal is the cross of two vectors on the plane. + cross3d(tNormV, t1V, t2V); + + normalize3d(tNormV, tNormV); + + + // First, where is the sensor in the object's reference frame? + FLT sensor_in_obj_reference_frame[3] = {obj->sensor->point.x, obj->sensor->point.y, obj->sensor->point.z}; + // Where is the point, in the reference frame of the lighthouse? + // This has two steps, first we translate from the object's location being the + // origin to the lighthouse being the origin. + // And second, we apply the quaternion to rotate into the proper reference frame for the lighthouse. + + FLT sensor_in_lh_reference_frame[3]; + sub3d(sensor_in_lh_reference_frame, sensor_in_obj_reference_frame, (FLT[3]){lhPoint.x, lhPoint.y, lhPoint.z}); + + quatrotatevector(sensor_in_lh_reference_frame, quaternion, sensor_in_lh_reference_frame); + + // now the we've got the location of the sensor in the lighthouses's reference frame, given lhPoint and quaternion inputs. + + // We need an arbitrary vector from the plane to the point. + // Since the plane goes through the origin, this is trivial. + // The sensor point itself is such a vector! + + // And go calculate the distances! + // TODO: don't need to ABS these because we square them below. + FLT dH = FLT_FABS(dot3d(sensor_in_lh_reference_frame, tNormH)); + FLT dV = FLT_FABS(dot3d(sensor_in_lh_reference_frame, tNormV)); + + + fitness += SQUARED(dH); + fitness += SQUARED(dV); } + + fitness = FLT_SQRT(fitness); + + return fitness; } static Point RefineRotationEstimate(Point initialEstimate, PointsAndAngle *pna, size_t pnaCount, FILE *logFile) -- cgit v1.2.3 From b9e9c89a46a91cbc69d7832d6f67e571723d11e6 Mon Sep 17 00:00:00 2001 From: mwturvey Date: Mon, 27 Mar 2017 12:48:40 -0700 Subject: Tori puzzle pieces in place, rotation not converging --- src/poser_turveytori.c | 197 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 149 insertions(+), 48 deletions(-) (limited to 'src') diff --git a/src/poser_turveytori.c b/src/poser_turveytori.c index 37f79bb..15961c8 100644 --- a/src/poser_turveytori.c +++ b/src/poser_turveytori.c @@ -412,7 +412,7 @@ Point getGradient(Point pointIn, PointsAndAngle *pna, size_t pnaCount, FLT preci return result; } -Point getNormalizedVector(Point vectorIn, FLT desiredMagnitude) +Point getNormalizedAndScaledVector(Point vectorIn, FLT desiredMagnitude) { FLT distanceIn = sqrt(SQUARED(vectorIn.x) + SQUARED(vectorIn.y) + SQUARED(vectorIn.z)); @@ -464,7 +464,7 @@ static Point RefineEstimateUsingModifiedGradientDescent1(Point initialEstimate, Point point1 = lastPoint; // let's get 3 iterations of gradient descent here. Point gradient1 = getGradient(point1, pna, pnaCount, g / 1000 /*somewhat arbitrary*/); - Point gradientN1 = getNormalizedVector(gradient1, g); + Point gradientN1 = getNormalizedAndScaledVector(gradient1, g); Point point2; point2.x = point1.x + gradientN1.x; @@ -472,7 +472,7 @@ static Point RefineEstimateUsingModifiedGradientDescent1(Point initialEstimate, point2.z = point1.z + gradientN1.z; Point gradient2 = getGradient(point2, pna, pnaCount, g / 1000 /*somewhat arbitrary*/); - Point gradientN2 = getNormalizedVector(gradient2, g); + Point gradientN2 = getNormalizedAndScaledVector(gradient2, g); Point point3; point3.x = point2.x + gradientN2.x; @@ -491,7 +491,7 @@ static Point RefineEstimateUsingModifiedGradientDescent1(Point initialEstimate, // The second parameter to this function is very much a tunable parameter. Different values will result // in a different number of iterations before we get to the minimum. Numbers between 3-10 seem to work well // It's not clear what would be optimum here. - specialGradient = getNormalizedVector(specialGradient, g / 4); + specialGradient = getNormalizedAndScaledVector(specialGradient, g / 4); Point point4; @@ -531,6 +531,75 @@ static Point RefineEstimateUsingModifiedGradientDescent1(Point initialEstimate, } +// interesting-- this is one place where we could use any sensors that are only hit by +// just an x or y axis to make our estimate better. TODO: bring that data to this fn. +FLT RotationEstimateFitnessOld(Point lhPoint, FLT *quaternion, TrackedObject *obj) +{ + FLT fitness = 0; + for (size_t i = 0; i < obj->numSensors; i++) + { + // first, get the normal of the plane for the horizonal sweep + FLT theta = obj->sensor[i].theta; + // make two vectors that lie on the plane + FLT t1H[3] = { 1, tan(theta-LINMATHPI/2), 0 }; + FLT t2H[3] = { 1, tan(theta-LINMATHPI/2), 1 }; + + FLT tNormH[3]; + + // the normal is the cross of two vectors on the plane. + cross3d(tNormH, t1H, t2H); + + normalize3d(tNormH, tNormH); + + // Now do the same for the vertical sweep + + // first, get the normal of the plane for the horizonal sweep + FLT phi = obj->sensor[i].phi; + // make two vectors that lie on the plane + FLT t1V[3] = { 0, 1, tan(phi-LINMATHPI/2)}; + FLT t2V[3] = { 1, 1, tan(phi-LINMATHPI/2)}; + + FLT tNormV[3]; + + // the normal is the cross of two vectors on the plane. + cross3d(tNormV, t1V, t2V); + + normalize3d(tNormV, tNormV); + + + // First, where is the sensor in the object's reference frame? + FLT sensor_in_obj_reference_frame[3] = {obj->sensor->point.x, obj->sensor->point.y, obj->sensor->point.z}; + // Where is the point, in the reference frame of the lighthouse? + // This has two steps, first we translate from the object's location being the + // origin to the lighthouse being the origin. + // And second, we apply the quaternion to rotate into the proper reference frame for the lighthouse. + + FLT sensor_in_lh_reference_frame[3]; + sub3d(sensor_in_lh_reference_frame, sensor_in_obj_reference_frame, (FLT[3]){lhPoint.x, lhPoint.y, lhPoint.z}); + + quatrotatevector(sensor_in_lh_reference_frame, quaternion, sensor_in_lh_reference_frame); + + // now the we've got the location of the sensor in the lighthouses's reference frame, given lhPoint and quaternion inputs. + + // We need an arbitrary vector from the plane to the point. + // Since the plane goes through the origin, this is trivial. + // The sensor point itself is such a vector! + + // And go calculate the distances! + // TODO: don't need to ABS these because we square them below. + FLT dH = FLT_FABS(dot3d(sensor_in_lh_reference_frame, tNormH)); + FLT dV = FLT_FABS(dot3d(sensor_in_lh_reference_frame, tNormV)); + + + fitness += SQUARED(dH); + fitness += SQUARED(dV); + } + + fitness = FLT_SQRT(fitness); + + return fitness; +} + // interesting-- this is one place where we could use any sensors that are only hit by // just an x or y axis to make our estimate better. TODO: bring that data to this fn. FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj) @@ -541,8 +610,8 @@ FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj) // first, get the normal of the plane for the horizonal sweep FLT theta = obj->sensor[i].theta; // make two vectors that lie on the plane - FLT t1H[3] = { 1, tan(theta), 0 }; - FLT t2H[3] = { 1, tan(theta), 1 }; + FLT t1H[3] = { 1, tan(theta-LINMATHPI/2), 0 }; + FLT t2H[3] = { 1, tan(theta-LINMATHPI/2), 1 }; FLT tNormH[3]; @@ -556,8 +625,8 @@ FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj) // first, get the normal of the plane for the horizonal sweep FLT phi = obj->sensor[i].phi; // make two vectors that lie on the plane - FLT t1V[3] = { 0, 1, tan(phi)}; - FLT t2V[3] = { 1, 1, tan(phi)}; + FLT t1V[3] = { 0, 1, tan(phi-LINMATHPI/2)}; + FLT t2V[3] = { 1, 1, tan(phi-LINMATHPI/2)}; FLT tNormV[3]; @@ -600,11 +669,43 @@ FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj) return fitness; } -static Point RefineRotationEstimate(Point initialEstimate, PointsAndAngle *pna, size_t pnaCount, FILE *logFile) +void getRotationGradient(FLT *gradientOut, Point lhPoint, FLT *quaternion, TrackedObject *obj, FLT precision) +{ + + FLT baseFitness = RotationEstimateFitness(lhPoint, quaternion, obj); + + FLT tmp0plus[4]; + quatadd(tmp0plus, quaternion, (FLT[4]){precision, 0, 0, 0}); + gradientOut[0] = RotationEstimateFitness(lhPoint, tmp0plus, obj) - baseFitness; + + FLT tmp1plus[4]; + quatadd(tmp1plus, quaternion, (FLT[4]){0, precision, 0, 0}); + gradientOut[1] = RotationEstimateFitness(lhPoint, tmp1plus, obj) - baseFitness; + + FLT tmp2plus[4]; + quatadd(tmp2plus, quaternion, (FLT[4]){0, 0, precision, 0}); + gradientOut[2] = RotationEstimateFitness(lhPoint, tmp2plus, obj) - baseFitness; + + FLT tmp3plus[4]; + quatadd(tmp3plus, quaternion, (FLT[4]){0, 0, 0, precision}); + gradientOut[3] = RotationEstimateFitness(lhPoint, tmp3plus, obj) - baseFitness; + + return; +} + +void getNormalizedAndScaledRotationGradient(FLT *vectorToScale, FLT desiredMagnitude) +{ + quatnormalize(vectorToScale, vectorToScale); + quatscale(vectorToScale, vectorToScale, desiredMagnitude); + return; +} + +static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstimate, TrackedObject *obj) { int i = 0; - FLT lastMatchFitness = getPointFitness(initialEstimate, pna, pnaCount); - Point lastPoint = initialEstimate; + FLT lastMatchFitness = RotationEstimateFitness(lhPoint, initialEstimate, obj); + + quatcopy(rotOut, initialEstimate); // The values below are somewhat magic, and definitely tunable // The initial vlue of g will represent the biggest step that the gradient descent can take at first. @@ -622,23 +723,25 @@ static Point RefineRotationEstimate(Point initialEstimate, PointsAndAngle *pna, for (FLT g = 0.2; g > 0.00001; g *= 0.99) { i++; - Point point1 = lastPoint; + FLT point1[3]; + copy3d(point1, rotOut); // let's get 3 iterations of gradient descent here. - Point gradient1 = getGradient(point1, pna, pnaCount, g / 1000 /*somewhat arbitrary*/); - Point gradientN1 = getNormalizedVector(gradient1, g); + FLT gradient1[4]; + + getRotationGradient(gradient1, lhPoint, point1, obj, g/1000); + getNormalizedAndScaledRotationGradient(gradient1,g); - Point point2; - point2.x = point1.x + gradientN1.x; - point2.y = point1.y + gradientN1.y; - point2.z = point1.z + gradientN1.z; + FLT point2[4]; + quatadd(point2, gradient1, point1); + quatnormalize(point2,point2); - Point gradient2 = getGradient(point2, pna, pnaCount, g / 1000 /*somewhat arbitrary*/); - Point gradientN2 = getNormalizedVector(gradient2, g); + FLT gradient2[4]; + getRotationGradient(gradient2, lhPoint, point2, obj, g/1000); + getNormalizedAndScaledRotationGradient(gradient2,g); - Point point3; - point3.x = point2.x + gradientN2.x; - point3.y = point2.y + gradientN2.y; - point3.z = point2.z + gradientN2.z; + FLT point3[4]; + quatadd(point3, gradient2, point2); + quatnormalize(point3,point3); // remember that gradient descent has a tendency to zig-zag when it encounters a narrow valley? // Well, solving the lighthouse problem presents a very narrow valley, and the zig-zag of a basic @@ -647,48 +750,41 @@ static Point RefineRotationEstimate(Point initialEstimate, PointsAndAngle *pna, // the direction we should follow, we're looking at one side of the zig-zag pattern, and specifically // following *that* vector. As it turns out, this works *amazingly* well. - Point specialGradient = { .x = point3.x - point1.x,.y = point3.y - point1.y,.z = point3.y - point1.y }; + FLT specialGradient[4]; + quatsub(specialGradient,point3,point1); // The second parameter to this function is very much a tunable parameter. Different values will result // in a different number of iterations before we get to the minimum. Numbers between 3-10 seem to work well // It's not clear what would be optimum here. - specialGradient = getNormalizedVector(specialGradient, g / 4); + getNormalizedAndScaledRotationGradient(specialGradient,g/4); - Point point4; + FLT point4[4]; + quatadd(point4, specialGradient, point3); + quatnormalize(point4,point4); - point4.x = point3.x + specialGradient.x; - point4.y = point3.y + specialGradient.y; - point4.z = point3.z + specialGradient.z; - - FLT newMatchFitness = getPointFitness(point4, pna, pnaCount); + FLT newMatchFitness = RotationEstimateFitness(lhPoint, point4, obj); if (newMatchFitness > lastMatchFitness) { - if (logFile) - { - writePoint(logFile, lastPoint.x, lastPoint.y, lastPoint.z, 0xFFFFFF); - } lastMatchFitness = newMatchFitness; - lastPoint = point4; -#ifdef TORI_DEBUG + quatcopy(rotOut, point4); +//#ifdef TORI_DEBUG printf("+"); -#endif +//#endif } else { -#ifdef TORI_DEBUG +//#ifdef TORI_DEBUG printf("-"); -#endif +//#endif g *= 0.7; } } - printf("\ni=%d\n", i); - - return lastPoint; + printf("\nRi=%d\n", i); } void SolveForRotation(FLT rotOut[4], TrackedObject *obj, Point lh) @@ -698,12 +794,14 @@ void SolveForRotation(FLT rotOut[4], TrackedObject *obj, Point lh) // This should have the lighthouse directly facing the tracked object. Point trackedObjRelativeToLh = { .x = -lh.x,.y = -lh.y,.z = -lh.z }; FLT theta = atan2(-lh.x, -lh.y); - FLT zAxis[3] = { 0, 0, 1 }; - FLT quat1[4]; - quatfromaxisangle(quat1, zAxis, theta); + FLT zAxis[4] = { 0, 0, 1 ,0}; + //FLT quat1[4]; + //quatfromaxisangle(quat1, zAxis, theta); // not correcting for phi, but that's less important. + // Step 2, optimize the quaternion to match the data. + RefineRotationEstimate(rotOut, lh, zAxis, obj); } @@ -767,7 +865,7 @@ Point SolveForLighthouse(TrackedObject *obj, char doLogOutput) // intentionally picking the direction of the average normal vector of the sensors that see the lighthouse // since this is least likely to pick the incorrect "mirror" point that would send us // back into the search for the correct point (see "if (a1 > M_PI / 2)" below) - Point p1 = getNormalizedVector(avgNorm, 8); + Point p1 = getNormalizedAndScaledVector(avgNorm, 8); Point refinedEstimateGd = RefineEstimateUsingModifiedGradientDescent1(p1, pna, pnaCount, logFile); @@ -792,6 +890,9 @@ Point SolveForLighthouse(TrackedObject *obj, char doLogOutput) printf("(%4.4f, %4.4f, %4.4f)\n", refinedEstimateGd.x, refinedEstimateGd.y, refinedEstimateGd.z); printf("Distance is %f, Fitness is %f\n", distance, fitGd); + FLT rot[4]; + SolveForRotation(rot, obj, refinedEstimateGd); + if (logFile) { updateHeader(logFile); -- cgit v1.2.3 From 9ead7de95621f1d7d59fed26fc7431344fdd9db4 Mon Sep 17 00:00:00 2001 From: mwturvey Date: Mon, 27 Mar 2017 13:58:04 -0700 Subject: Something is converging. --- src/poser_turveytori.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) (limited to 'src') diff --git a/src/poser_turveytori.c b/src/poser_turveytori.c index 15961c8..824fabb 100644 --- a/src/poser_turveytori.c +++ b/src/poser_turveytori.c @@ -646,7 +646,8 @@ FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj) FLT sensor_in_lh_reference_frame[3]; sub3d(sensor_in_lh_reference_frame, sensor_in_obj_reference_frame, (FLT[3]){lhPoint.x, lhPoint.y, lhPoint.z}); - quatrotatevector(sensor_in_lh_reference_frame, quaternion, sensor_in_lh_reference_frame); + //quatrotatevector(sensor_in_lh_reference_frame, quaternion, sensor_in_lh_reference_frame); + rotatearoundaxis(sensor_in_lh_reference_frame, sensor_in_lh_reference_frame, quaternion, quaternion[3]); // now the we've got the location of the sensor in the lighthouses's reference frame, given lhPoint and quaternion inputs. @@ -666,7 +667,7 @@ FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj) fitness = FLT_SQRT(fitness); - return fitness; + return 1/fitness; } void getRotationGradient(FLT *gradientOut, Point lhPoint, FLT *quaternion, TrackedObject *obj, FLT precision) @@ -693,10 +694,18 @@ void getRotationGradient(FLT *gradientOut, Point lhPoint, FLT *quaternion, Track return; } +//void getNormalizedAndScaledRotationGradient(FLT *vectorToScale, FLT desiredMagnitude) +//{ +// quatnormalize(vectorToScale, vectorToScale); +// quatscale(vectorToScale, vectorToScale, desiredMagnitude); +// return; +//} void getNormalizedAndScaledRotationGradient(FLT *vectorToScale, FLT desiredMagnitude) { quatnormalize(vectorToScale, vectorToScale); quatscale(vectorToScale, vectorToScale, desiredMagnitude); + //vectorToScale[3] = desiredMagnitude; + return; } @@ -723,8 +732,8 @@ static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstim for (FLT g = 0.2; g > 0.00001; g *= 0.99) { i++; - FLT point1[3]; - copy3d(point1, rotOut); + FLT point1[4]; + quatcopy(point1, rotOut); // let's get 3 iterations of gradient descent here. FLT gradient1[4]; @@ -733,7 +742,7 @@ static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstim FLT point2[4]; quatadd(point2, gradient1, point1); - quatnormalize(point2,point2); + //quatnormalize(point2,point2); FLT gradient2[4]; getRotationGradient(gradient2, lhPoint, point2, obj, g/1000); @@ -741,7 +750,7 @@ static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstim FLT point3[4]; quatadd(point3, gradient2, point2); - quatnormalize(point3,point3); + //quatnormalize(point3,point3); // remember that gradient descent has a tendency to zig-zag when it encounters a narrow valley? // Well, solving the lighthouse problem presents a very narrow valley, and the zig-zag of a basic @@ -760,7 +769,7 @@ static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstim FLT point4[4]; quatadd(point4, specialGradient, point3); - quatnormalize(point4,point4); + //quatnormalize(point4,point4); FLT newMatchFitness = RotationEstimateFitness(lhPoint, point4, obj); @@ -770,13 +779,13 @@ static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstim lastMatchFitness = newMatchFitness; quatcopy(rotOut, point4); //#ifdef TORI_DEBUG - printf("+"); + printf("+ %8.8f, %f\n", newMatchFitness, point4[3]); //#endif } else { //#ifdef TORI_DEBUG - printf("-"); + printf("- , %f\n", point4[3]); //#endif g *= 0.7; @@ -794,7 +803,7 @@ void SolveForRotation(FLT rotOut[4], TrackedObject *obj, Point lh) // This should have the lighthouse directly facing the tracked object. Point trackedObjRelativeToLh = { .x = -lh.x,.y = -lh.y,.z = -lh.z }; FLT theta = atan2(-lh.x, -lh.y); - FLT zAxis[4] = { 0, 0, 1 ,0}; + FLT zAxis[4] = { 0, 0, 1 , theta}; //FLT quat1[4]; //quatfromaxisangle(quat1, zAxis, theta); // not correcting for phi, but that's less important. -- cgit v1.2.3 From e5c15af3af93356bb056624726e0b6068354690f Mon Sep 17 00:00:00 2001 From: mwturvey Date: Mon, 27 Mar 2017 14:20:29 -0700 Subject: Getting very close --- src/poser_turveytori.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'src') diff --git a/src/poser_turveytori.c b/src/poser_turveytori.c index 824fabb..d737723 100644 --- a/src/poser_turveytori.c +++ b/src/poser_turveytori.c @@ -779,8 +779,10 @@ static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstim lastMatchFitness = newMatchFitness; quatcopy(rotOut, point4); //#ifdef TORI_DEBUG - printf("+ %8.8f, %f\n", newMatchFitness, point4[3]); + printf("+ %8.8f, (%8.8f, %8.8f, %8.8f) %f\n", newMatchFitness, point4[0], point4[1], point4[2], point4[3]); //#endif + g *= 1.03; + } else { @@ -796,6 +798,17 @@ static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstim printf("\nRi=%d\n", i); } +static void WhereIsTheTrackedObject(FLT *rotation, Point lhPoint) +{ + FLT reverseRotation[4] = {rotation[0], rotation[1], rotation[2], -rotation[3]}; + FLT objPoint[3] = {lhPoint.x, lhPoint.y, lhPoint.z}; + + rotatearoundaxis(objPoint, objPoint, reverseRotation, reverseRotation[3]); + + printf("The tracked object is at location (%f, %f, %f)\n", objPoint[0], objPoint[1], objPoint[2]); +} + + void SolveForRotation(FLT rotOut[4], TrackedObject *obj, Point lh) { @@ -812,6 +825,8 @@ void SolveForRotation(FLT rotOut[4], TrackedObject *obj, Point lh) // Step 2, optimize the quaternion to match the data. RefineRotationEstimate(rotOut, lh, zAxis, obj); + WhereIsTheTrackedObject(rotOut, lh); + } -- cgit v1.2.3 From e557b5128a32c99dcda0e6a9784a507258342301 Mon Sep 17 00:00:00 2001 From: mwturvey Date: Mon, 27 Mar 2017 14:36:54 -0700 Subject: Small tweak. Using axis/angle --- src/poser_turveytori.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/poser_turveytori.c b/src/poser_turveytori.c index d737723..62deccf 100644 --- a/src/poser_turveytori.c +++ b/src/poser_turveytori.c @@ -729,7 +729,7 @@ static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstim // in fact, it probably could probably be 1 without any issue. The main place where g is decremented // is in the block below when we've made a jump that results in a worse fitness than we're starting at. // In those cases, we don't take the jump, and instead lower the value of g and try again. - for (FLT g = 0.2; g > 0.00001; g *= 0.99) + for (FLT g = 0.2; g > 0.000000001; g *= 0.99) { i++; FLT point1[4]; @@ -781,7 +781,7 @@ static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstim //#ifdef TORI_DEBUG printf("+ %8.8f, (%8.8f, %8.8f, %8.8f) %f\n", newMatchFitness, point4[0], point4[1], point4[2], point4[3]); //#endif - g *= 1.03; + g *= 1.02; } else -- cgit v1.2.3 From f86a7a9927c484f46768b14a5944a03863beee34 Mon Sep 17 00:00:00 2001 From: mwturvey Date: Mon, 27 Mar 2017 15:30:54 -0700 Subject: sucky angle estimators (quat & angle axis) --- src/poser_turveytori.c | 265 ++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 242 insertions(+), 23 deletions(-) (limited to 'src') diff --git a/src/poser_turveytori.c b/src/poser_turveytori.c index 62deccf..da64850 100644 --- a/src/poser_turveytori.c +++ b/src/poser_turveytori.c @@ -602,7 +602,7 @@ FLT RotationEstimateFitnessOld(Point lhPoint, FLT *quaternion, TrackedObject *ob // interesting-- this is one place where we could use any sensors that are only hit by // just an x or y axis to make our estimate better. TODO: bring that data to this fn. -FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj) +FLT RotationEstimateFitnessAxisAngle(Point lhPoint, FLT *quaternion, TrackedObject *obj) { FLT fitness = 0; for (size_t i = 0; i < obj->numSensors; i++) @@ -670,26 +670,121 @@ FLT RotationEstimateFitness(Point lhPoint, FLT *quaternion, TrackedObject *obj) return 1/fitness; } -void getRotationGradient(FLT *gradientOut, Point lhPoint, FLT *quaternion, TrackedObject *obj, FLT precision) +// interesting-- this is one place where we could use any sensors that are only hit by +// just an x or y axis to make our estimate better. TODO: bring that data to this fn. +FLT RotationEstimateFitnessQuaternion(Point lhPoint, FLT *quaternion, TrackedObject *obj) { + FLT fitness = 0; + for (size_t i = 0; i < obj->numSensors; i++) + { + // first, get the normal of the plane for the horizonal sweep + FLT theta = obj->sensor[i].theta; + // make two vectors that lie on the plane + FLT t1H[3] = { 1, tan(theta-LINMATHPI/2), 0 }; + FLT t2H[3] = { 1, tan(theta-LINMATHPI/2), 1 }; + + FLT tNormH[3]; + + // the normal is the cross of two vectors on the plane. + cross3d(tNormH, t1H, t2H); + + normalize3d(tNormH, tNormH); + + // Now do the same for the vertical sweep + + // first, get the normal of the plane for the horizonal sweep + FLT phi = obj->sensor[i].phi; + // make two vectors that lie on the plane + FLT t1V[3] = { 0, 1, tan(phi-LINMATHPI/2)}; + FLT t2V[3] = { 1, 1, tan(phi-LINMATHPI/2)}; + + FLT tNormV[3]; + + // the normal is the cross of two vectors on the plane. + cross3d(tNormV, t1V, t2V); + + normalize3d(tNormV, tNormV); + + + // First, where is the sensor in the object's reference frame? + FLT sensor_in_obj_reference_frame[3] = {obj->sensor->point.x, obj->sensor->point.y, obj->sensor->point.z}; + // Where is the point, in the reference frame of the lighthouse? + // This has two steps, first we translate from the object's location being the + // origin to the lighthouse being the origin. + // And second, we apply the quaternion to rotate into the proper reference frame for the lighthouse. + + FLT sensor_in_lh_reference_frame[3]; + sub3d(sensor_in_lh_reference_frame, sensor_in_obj_reference_frame, (FLT[3]){lhPoint.x, lhPoint.y, lhPoint.z}); + + quatrotatevector(sensor_in_lh_reference_frame, quaternion, sensor_in_lh_reference_frame); + //rotatearoundaxis(sensor_in_lh_reference_frame, sensor_in_lh_reference_frame, quaternion, quaternion[3]); - FLT baseFitness = RotationEstimateFitness(lhPoint, quaternion, obj); + // now the we've got the location of the sensor in the lighthouses's reference frame, given lhPoint and quaternion inputs. + + // We need an arbitrary vector from the plane to the point. + // Since the plane goes through the origin, this is trivial. + // The sensor point itself is such a vector! + + // And go calculate the distances! + // TODO: don't need to ABS these because we square them below. + FLT dH = FLT_FABS(dot3d(sensor_in_lh_reference_frame, tNormH)); + FLT dV = FLT_FABS(dot3d(sensor_in_lh_reference_frame, tNormV)); + + + fitness += SQUARED(dH); + fitness += SQUARED(dV); + } + + fitness = FLT_SQRT(fitness); + + return 1/fitness; +} + + +void getRotationGradientQuaternion(FLT *gradientOut, Point lhPoint, FLT *quaternion, TrackedObject *obj, FLT precision) +{ + + FLT baseFitness = RotationEstimateFitnessQuaternion(lhPoint, quaternion, obj); FLT tmp0plus[4]; quatadd(tmp0plus, quaternion, (FLT[4]){precision, 0, 0, 0}); - gradientOut[0] = RotationEstimateFitness(lhPoint, tmp0plus, obj) - baseFitness; + gradientOut[0] = RotationEstimateFitnessQuaternion(lhPoint, tmp0plus, obj) - baseFitness; FLT tmp1plus[4]; quatadd(tmp1plus, quaternion, (FLT[4]){0, precision, 0, 0}); - gradientOut[1] = RotationEstimateFitness(lhPoint, tmp1plus, obj) - baseFitness; + gradientOut[1] = RotationEstimateFitnessQuaternion(lhPoint, tmp1plus, obj) - baseFitness; FLT tmp2plus[4]; quatadd(tmp2plus, quaternion, (FLT[4]){0, 0, precision, 0}); - gradientOut[2] = RotationEstimateFitness(lhPoint, tmp2plus, obj) - baseFitness; + gradientOut[2] = RotationEstimateFitnessQuaternion(lhPoint, tmp2plus, obj) - baseFitness; FLT tmp3plus[4]; quatadd(tmp3plus, quaternion, (FLT[4]){0, 0, 0, precision}); - gradientOut[3] = RotationEstimateFitness(lhPoint, tmp3plus, obj) - baseFitness; + gradientOut[3] = RotationEstimateFitnessQuaternion(lhPoint, tmp3plus, obj) - baseFitness; + + return; +} + +void getRotationGradientAxisAngle(FLT *gradientOut, Point lhPoint, FLT *quaternion, TrackedObject *obj, FLT precision) +{ + + FLT baseFitness = RotationEstimateFitnessAxisAngle(lhPoint, quaternion, obj); + + FLT tmp0plus[4]; + quatadd(tmp0plus, quaternion, (FLT[4]){precision, 0, 0, 0}); + gradientOut[0] = RotationEstimateFitnessAxisAngle(lhPoint, tmp0plus, obj) - baseFitness; + + FLT tmp1plus[4]; + quatadd(tmp1plus, quaternion, (FLT[4]){0, precision, 0, 0}); + gradientOut[1] = RotationEstimateFitnessAxisAngle(lhPoint, tmp1plus, obj) - baseFitness; + + FLT tmp2plus[4]; + quatadd(tmp2plus, quaternion, (FLT[4]){0, 0, precision, 0}); + gradientOut[2] = RotationEstimateFitnessAxisAngle(lhPoint, tmp2plus, obj) - baseFitness; + + FLT tmp3plus[4]; + quatadd(tmp3plus, quaternion, (FLT[4]){0, 0, 0, precision}); + gradientOut[3] = RotationEstimateFitnessAxisAngle(lhPoint, tmp3plus, obj) - baseFitness; return; } @@ -709,10 +804,20 @@ void getNormalizedAndScaledRotationGradient(FLT *vectorToScale, FLT desiredMagni return; } -static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstimate, TrackedObject *obj) +static void WhereIsTheTrackedObjectAxisAngle(FLT *rotation, Point lhPoint) +{ + FLT reverseRotation[4] = {rotation[0], rotation[1], rotation[2], -rotation[3]}; + FLT objPoint[3] = {lhPoint.x, lhPoint.y, lhPoint.z}; + + rotatearoundaxis(objPoint, objPoint, reverseRotation, reverseRotation[3]); + + printf("The tracked object is at location (%f, %f, %f)\n", objPoint[0], objPoint[1], objPoint[2]); +} + +static void RefineRotationEstimateAxisAngle(FLT *rotOut, Point lhPoint, FLT *initialEstimate, TrackedObject *obj) { int i = 0; - FLT lastMatchFitness = RotationEstimateFitness(lhPoint, initialEstimate, obj); + FLT lastMatchFitness = RotationEstimateFitnessAxisAngle(lhPoint, initialEstimate, obj); quatcopy(rotOut, initialEstimate); @@ -729,7 +834,7 @@ static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstim // in fact, it probably could probably be 1 without any issue. The main place where g is decremented // is in the block below when we've made a jump that results in a worse fitness than we're starting at. // In those cases, we don't take the jump, and instead lower the value of g and try again. - for (FLT g = 0.2; g > 0.000000001; g *= 0.99) + for (FLT g = 0.1; g > 0.000000001; g *= 0.99) { i++; FLT point1[4]; @@ -737,19 +842,26 @@ static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstim // let's get 3 iterations of gradient descent here. FLT gradient1[4]; - getRotationGradient(gradient1, lhPoint, point1, obj, g/1000); + normalize3d(point1, point1); + + getRotationGradientAxisAngle(gradient1, lhPoint, point1, obj, g/10000); getNormalizedAndScaledRotationGradient(gradient1,g); FLT point2[4]; quatadd(point2, gradient1, point1); //quatnormalize(point2,point2); + normalize3d(point1, point1); + FLT gradient2[4]; - getRotationGradient(gradient2, lhPoint, point2, obj, g/1000); + getRotationGradientAxisAngle(gradient2, lhPoint, point2, obj, g/10000); getNormalizedAndScaledRotationGradient(gradient2,g); FLT point3[4]; quatadd(point3, gradient2, point2); + + normalize3d(point1, point1); + //quatnormalize(point3,point3); // remember that gradient descent has a tendency to zig-zag when it encounters a narrow valley? @@ -770,8 +882,9 @@ static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstim FLT point4[4]; quatadd(point4, specialGradient, point3); //quatnormalize(point4,point4); + normalize3d(point1, point1); - FLT newMatchFitness = RotationEstimateFitness(lhPoint, point4, obj); + FLT newMatchFitness = RotationEstimateFitnessAxisAngle(lhPoint, point4, obj); if (newMatchFitness > lastMatchFitness) { @@ -797,18 +910,117 @@ static void RefineRotationEstimate(FLT *rotOut, Point lhPoint, FLT *initialEstim } printf("\nRi=%d\n", i); } - -static void WhereIsTheTrackedObject(FLT *rotation, Point lhPoint) +static void WhereIsTheTrackedObjectQuaternion(FLT *rotation, Point lhPoint) { FLT reverseRotation[4] = {rotation[0], rotation[1], rotation[2], -rotation[3]}; FLT objPoint[3] = {lhPoint.x, lhPoint.y, lhPoint.z}; - rotatearoundaxis(objPoint, objPoint, reverseRotation, reverseRotation[3]); - + //rotatearoundaxis(objPoint, objPoint, reverseRotation, reverseRotation[3]); + quatrotatevector(objPoint, rotation, objPoint); printf("The tracked object is at location (%f, %f, %f)\n", objPoint[0], objPoint[1], objPoint[2]); } + +static void RefineRotationEstimateQuaternion(FLT *rotOut, Point lhPoint, FLT *initialEstimate, TrackedObject *obj) +{ + int i = 0; + FLT lastMatchFitness = RotationEstimateFitnessQuaternion(lhPoint, initialEstimate, obj); + + quatcopy(rotOut, initialEstimate); + + // The values below are somewhat magic, and definitely tunable + // The initial vlue of g will represent the biggest step that the gradient descent can take at first. + // bigger values may be faster, especially when the initial guess is wildly off. + // The downside to a bigger starting guess is that if we've picked a good guess at the local minima + // if there are other local minima, we may accidentally jump to such a local minima and get stuck there. + // That's fairly unlikely with the lighthouse problem, from expereince. + // The other downside is that if it's too big, we may have to spend a few iterations before it gets down + // to a size that doesn't jump us out of our minima. + // The terminal value of g represents how close we want to get to the local minima before we're "done" + // The change in value of g for each iteration is intentionally very close to 1. + // in fact, it probably could probably be 1 without any issue. The main place where g is decremented + // is in the block below when we've made a jump that results in a worse fitness than we're starting at. + // In those cases, we don't take the jump, and instead lower the value of g and try again. + for (FLT g = 0.1; g > 0.000000001; g *= 0.99) + { + i++; + FLT point1[4]; + quatcopy(point1, rotOut); + // let's get 3 iterations of gradient descent here. + FLT gradient1[4]; + + //normalize3d(point1, point1); + + getRotationGradientQuaternion(gradient1, lhPoint, point1, obj, g/10000); + getNormalizedAndScaledRotationGradient(gradient1,g); + + FLT point2[4]; + quatadd(point2, gradient1, point1); + quatnormalize(point2,point2); + + //normalize3d(point1, point1); + + FLT gradient2[4]; + getRotationGradientQuaternion(gradient2, lhPoint, point2, obj, g/10000); + getNormalizedAndScaledRotationGradient(gradient2,g); + + FLT point3[4]; + quatadd(point3, gradient2, point2); + + //normalize3d(point1, point1); + + quatnormalize(point3,point3); + + // remember that gradient descent has a tendency to zig-zag when it encounters a narrow valley? + // Well, solving the lighthouse problem presents a very narrow valley, and the zig-zag of a basic + // gradient descent is kinda horrible here. Instead, think about the shape that a zig-zagging + // converging gradient descent makes. Instead of using the gradient as the best indicator of + // the direction we should follow, we're looking at one side of the zig-zag pattern, and specifically + // following *that* vector. As it turns out, this works *amazingly* well. + + FLT specialGradient[4]; + quatsub(specialGradient,point3,point1); + + // The second parameter to this function is very much a tunable parameter. Different values will result + // in a different number of iterations before we get to the minimum. Numbers between 3-10 seem to work well + // It's not clear what would be optimum here. + getNormalizedAndScaledRotationGradient(specialGradient,g/4); + + FLT point4[4]; + quatadd(point4, specialGradient, point3); + quatnormalize(point4,point4); + //normalize3d(point1, point1); + + FLT newMatchFitness = RotationEstimateFitnessQuaternion(lhPoint, point4, obj); + + if (newMatchFitness > lastMatchFitness) + { + + lastMatchFitness = newMatchFitness; + quatcopy(rotOut, point4); +//#ifdef TORI_DEBUG + //printf("+ %8.8f, (%8.8f, %8.8f, %8.8f) %f\n", newMatchFitness, point4[0], point4[1], point4[2], point4[3]); +//#endif + g *= 1.02; + printf("+"); + WhereIsTheTrackedObjectQuaternion(rotOut, lhPoint); + } + else + { +//#ifdef TORI_DEBUG + //printf("- , %f\n", point4[3]); +//#endif + g *= 0.7; + printf("-"); + } + + + } + printf("\nRi=%d Fitness=%3f\n", i, lastMatchFitness); +} + + void SolveForRotation(FLT rotOut[4], TrackedObject *obj, Point lh) { @@ -816,16 +1028,23 @@ void SolveForRotation(FLT rotOut[4], TrackedObject *obj, Point lh) // This should have the lighthouse directly facing the tracked object. Point trackedObjRelativeToLh = { .x = -lh.x,.y = -lh.y,.z = -lh.z }; FLT theta = atan2(-lh.x, -lh.y); - FLT zAxis[4] = { 0, 0, 1 , theta}; - //FLT quat1[4]; - //quatfromaxisangle(quat1, zAxis, theta); + FLT zAxis[4] = { 0, 0, 1 , theta-LINMATHPI/2}; + FLT quat1[4]; + quatfromaxisangle(quat1, zAxis, theta); + + //quatfrom2vectors(0,0) // not correcting for phi, but that's less important. - // Step 2, optimize the quaternion to match the data. - RefineRotationEstimate(rotOut, lh, zAxis, obj); + // Step 2, optimize the axis/ angle to match the data. + RefineRotationEstimateAxisAngle(rotOut, lh, zAxis, obj); + + WhereIsTheTrackedObjectAxisAngle(rotOut, lh); + + //// Step 2, optimize the quaternion to match the data. + //RefineRotationEstimateQuaternion(rotOut, lh, quat1, obj); - WhereIsTheTrackedObject(rotOut, lh); + //WhereIsTheTrackedObjectQuaternion(rotOut, lh); } -- cgit v1.2.3 From d8c4b23789fd3aedb150144bed6beb286d1504a9 Mon Sep 17 00:00:00 2001 From: mwturvey Date: Mon, 27 Mar 2017 16:21:52 -0700 Subject: Reliable detection of orientation of lighthouse Lots of cruft. Lots of room for improving performance. --- src/poser_turveytori.c | 44 +++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) (limited to 'src') diff --git a/src/poser_turveytori.c b/src/poser_turveytori.c index da64850..5c3350b 100644 --- a/src/poser_turveytori.c +++ b/src/poser_turveytori.c @@ -600,9 +600,47 @@ FLT RotationEstimateFitnessOld(Point lhPoint, FLT *quaternion, TrackedObject *ob return fitness; } +FLT RotationEstimateFitnessAxisAngle(Point lh, FLT *AxisAngle, TrackedObject *obj) +{ + // For this fitness calculator, we're going to use the rotation information to figure out where + // we expect to see the tracked object sensors, and we'll do a sum of squares to grade + // the quality of the guess formed by the AxisAngle; + + FLT fitness = 0; + + // for each point in the tracked object + for (int i=0; i< obj->numSensors; i++) + { + + + + // let's see... we need to figure out where this sensor should be in the LH reference frame. + FLT sensorLocation[3] = {obj->sensor[i].point.x-lh.x, obj->sensor[i].point.y-lh.y, obj->sensor[i].point.z-lh.z}; + + // And this puppy needs to be rotated... + + rotatearoundaxis(sensorLocation, sensorLocation, AxisAngle, AxisAngle[3]); + + // Now, the vector indicating the position of the sensor, as seen by the lighthouse is: + FLT realVectFromLh[3] = {1, tan(obj->sensor[i].theta - LINMATHPI/2), tan(obj->sensor[i].phi - LINMATHPI/2)}; + + // and the vector we're calculating given the rotation passed in is the same as the sensor location: + FLT calcVectFromLh[3] = {sensorLocation[0], sensorLocation[1], sensorLocation[2]}; + + FLT angleBetween = anglebetween3d( realVectFromLh, calcVectFromLh ); + + fitness += SQUARED(angleBetween); + } + + return 1/FLT_SQRT(fitness); +} + +// This figures out how far away from the scanned planes each point is, then does a sum of squares +// for the fitness. +// // interesting-- this is one place where we could use any sensors that are only hit by // just an x or y axis to make our estimate better. TODO: bring that data to this fn. -FLT RotationEstimateFitnessAxisAngle(Point lhPoint, FLT *quaternion, TrackedObject *obj) +FLT RotationEstimateFitnessAxisAngleOriginal(Point lhPoint, FLT *quaternion, TrackedObject *obj) { FLT fitness = 0; for (size_t i = 0; i < obj->numSensors; i++) @@ -892,7 +930,7 @@ static void RefineRotationEstimateAxisAngle(FLT *rotOut, Point lhPoint, FLT *ini lastMatchFitness = newMatchFitness; quatcopy(rotOut, point4); //#ifdef TORI_DEBUG - printf("+ %8.8f, (%8.8f, %8.8f, %8.8f) %f\n", newMatchFitness, point4[0], point4[1], point4[2], point4[3]); + //printf("+ %8.8f, (%8.8f, %8.8f, %8.8f) %f\n", newMatchFitness, point4[0], point4[1], point4[2], point4[3]); //#endif g *= 1.02; @@ -900,7 +938,7 @@ static void RefineRotationEstimateAxisAngle(FLT *rotOut, Point lhPoint, FLT *ini else { //#ifdef TORI_DEBUG - printf("- , %f\n", point4[3]); + //printf("- , %f\n", point4[3]); //#endif g *= 0.7; -- cgit v1.2.3