@Override public double computeResidual(AssociatedPair observation) { GeometryMath_F64.multTran(F,observation.p2,temp); return temp.x*observation.p1.x + temp.y*observation.p1.y + temp.z; } }
@Override public double computeResidual(AssociatedPair observation) { GeometryMath_F64.multTran(F,observation.p2,temp); return temp.x*observation.p1.x + temp.y*observation.p1.y + temp.z; } }
@Override public double computeResidual(AssociatedPair observation) { double bottom = 0; GeometryMath_F64.mult(K2E, observation.p1, temp); bottom += temp.x*temp.x + temp.y*temp.y; GeometryMath_F64.multTran(EK1, observation.p2, temp); bottom += temp.x*temp.x + temp.y*temp.y; if( bottom == 0) { return Double.MAX_VALUE; } else { GeometryMath_F64.multTran(E,observation.p2,temp); return (temp.x*observation.p1.x + temp.y*observation.p1.y + temp.z)/bottom; } } }
@Override public double computeResidual(AssociatedPair observation) { double bottom = 0; GeometryMath_F64.mult(F, observation.p1, temp); bottom += temp.x*temp.x + temp.y*temp.y; GeometryMath_F64.multTran(F, observation.p2, temp); bottom += temp.x*temp.x + temp.y*temp.y; if( bottom == 0) { return Double.MAX_VALUE; } else { GeometryMath_F64.multTran(F,observation.p2,temp); return (temp.x*observation.p1.x + temp.y*observation.p1.y + temp.z)/bottom; } } }
@Override public double computeResidual(AssociatedPair observation) { double bottom = 0; GeometryMath_F64.mult(F, observation.p1, temp); bottom += temp.x*temp.x + temp.y*temp.y; GeometryMath_F64.multTran(F, observation.p2, temp); bottom += temp.x*temp.x + temp.y*temp.y; bottom = Math.sqrt(bottom); if( bottom <= UtilEjml.EPS) { return Double.MAX_VALUE; } else { GeometryMath_F64.multTran(F,observation.p2,temp); return (temp.x*observation.p1.x + temp.y*observation.p1.y + temp.z)/bottom; } } }
void originalCoordinates( DMatrixRMaj T , DMatrixRMaj R , Point3D_F64 p ) { GeometryMath_F64.multTran(R,p,p); GeometryMath_F64.mult(T,p,p); }
GeometryMath_F64.multTran(F, p, left);
GeometryMath_F64.multTran(F, p, left);
/** * Transfer a point to third view give its observed location in view one and three. * * @param x1 (Input) Observation in view 1. pixels. * @param y1 (Input) Observation in view 1. pixels. * @param x3 (Input) Observation in view 3. pixels. * @param y3 (Input) Observation in view 3. pixels. * @param p2 (Output) Estimated location in view 2. */ public void transfer_1_to_2(double x1 , double y1 , double x3 , double y3 , Point3D_F64 p2) { // Adjust the observations so that they lie on the epipolar lines exactly adjuster.process(F31,x1,y1,x3,y3,pa,pb); GeometryMath_F64.multTran(F31,pa,la); // line through pb and perpendicular to la l.x = la.y; l.y = -la.x; l.z = -pb.x*la.y + pb.y*la.x; MultiViewOps.transfer_1_to_2(tensor,pa,l,p2); } }
private void applyScaleTranslation3D(SceneStructureMetric structure) { double scale = desiredDistancePoint / medianDistancePoint; Point3D_F64 c = new Point3D_F64(); for (int i = 0; i < structure.views.length; i++) { SceneStructureMetric.View view = structure.views[i]; // X_w = R'*(X_c - T) let X_c = 0 then X_w = -R'*T is center of camera in world GeometryMath_F64.multTran(view.worldToView.R,view.worldToView.T,c); // Apply transform c.x = -scale*(c.x + medianPoint.x); c.y = -scale*(c.y + medianPoint.y); c.z = -scale*(c.z + medianPoint.z); // -R*T GeometryMath_F64.mult(view.worldToView.R,c,view.worldToView.T); view.worldToView.T.scale(-1); } }
/** * <p> * Given two observations of the same point from two views and a known motion between the * two views, triangulate the point's 3D position in camera 'a' reference frame. * </p> * * @param a Observation from camera view 'a' in normalized coordinates. Not modified. * @param b Observation from camera view 'b' in normalized coordinates. Not modified. * @param fromAtoB Transformation from camera view 'a' to 'b' Not modified. * @param foundInA (Output) Found 3D position of the point in reference frame 'a'. Modified. */ public void triangulate( Point2D_F64 a , Point2D_F64 b , Se3_F64 fromAtoB , Point3D_F64 foundInA ) { // set camera B's principle point Vector3D_F64 t = fromAtoB.getT(); rayB.p.set(-t.x, -t.y, -t.z); // rotate observation in B into camera A's view GeometryMath_F64.multTran(fromAtoB.getR(),rayB.p,rayB.p); GeometryMath_F64.multTran(fromAtoB.getR(),b,rayB.slope); rayA.slope.set(a.x,a.y,1); ClosestPoint3D_F64.closestPoint(rayA,rayB,foundInA); } }
/** * Undoes scale transform for metric. * * @param structure scene's structure * @param observations observations of the scene */ public void undoScale( SceneStructureMetric structure , SceneObservations observations ) { if( structure.homogenous ) return; double scale = desiredDistancePoint / medianDistancePoint; undoNormPoints3D(structure, scale); Point3D_F64 c = new Point3D_F64(); for (int i = 0; i < structure.views.length; i++) { SceneStructureMetric.View view = structure.views[i]; // X_w = R'*(X_c - T) let X_c = 0 then X_w = -R'*T is center of camera in world GeometryMath_F64.multTran(view.worldToView.R,view.worldToView.T,c); // Apply transform c.x = (-c.x/scale + medianPoint.x); c.y = (-c.y/scale + medianPoint.y); c.z = (-c.z/scale + medianPoint.z); // -R*T GeometryMath_F64.mult(view.worldToView.R,c,view.worldToView.T); view.worldToView.T.scale(-1); } }
/** * <p> * Trifocal tensor with point-line-point correspondence:<br> * (l2<sup>T</sup>(sum p1<sup>i</sup>*T<sub>i</sub>)[p3]<sub>x</sub> = 0 * </p> * * @param tensor Trifocal tensor * @param p1 A point in the first view. * @param l2 A line in the second view. * @param p3 A point in the third view. * @return Result of applying the constraint. With perfect inputs will be zero. */ public static Vector3D_F64 constraint(TrifocalTensor tensor, Point2D_F64 p1, Vector3D_F64 l2, Point2D_F64 p3, Vector3D_F64 ret) { if( ret == null ) ret = new Vector3D_F64(); DenseMatrix64F sum = new DenseMatrix64F(3,3); CommonOps.add(p1.x,tensor.T1,sum,sum); CommonOps.add(p1.y,tensor.T2,sum,sum); CommonOps.add(tensor.T3,sum,sum); Vector3D_F64 tempV = new Vector3D_F64(); GeometryMath_F64.multTran(sum, l2, tempV); GeometryMath_F64.cross(tempV, new Vector3D_F64(p3.x, p3.y, 1), ret); return ret; }
/** * <p> * Trifocal tensor with point-line-point correspondence:<br> * (l2<sup>T</sup>(sum p1<sup>i</sup>*T<sub>i</sub>)[p3]<sub>x</sub> = 0 * </p> * * @param tensor Trifocal tensor * @param p1 A point in the first view. * @param l2 A line in the second view. * @param p3 A point in the third view. * @return Result of applying the constraint. With perfect inputs will be zero. */ public static Vector3D_F64 constraint(TrifocalTensor tensor, Point2D_F64 p1, Vector3D_F64 l2, Point2D_F64 p3, Vector3D_F64 ret) { if( ret == null ) ret = new Vector3D_F64(); DMatrixRMaj sum = new DMatrixRMaj(3,3); CommonOps_DDRM.add(p1.x,tensor.T1,sum,sum); CommonOps_DDRM.add(p1.y,tensor.T2,sum,sum); CommonOps_DDRM.add(tensor.T3,sum,sum); Vector3D_F64 tempV = new Vector3D_F64(); GeometryMath_F64.multTran(sum, l2, tempV); GeometryMath_F64.cross(tempV, new Vector3D_F64(p3.x, p3.y, 1), ret); return ret; }
F21.set(2,i,column.z); GeometryMath_F64.multTran(T,e2,temp0); GeometryMath_F64.cross(e3,temp0,column);
F2.set(2,i,column.z); GeometryMath_F64.multTran(T,e2,temp0); GeometryMath_F64.cross(e3,temp0,column);
P2.set(i, 3, e2.getIdx(i)); GeometryMath_F64.multTran(T, e2, temp0); GeometryMath_F64.mult(temp1, temp0, column);