@Override
  public void decode(double[] param, CalibratedPoseAndPoint outputModel) {

    int paramIndex = 0;

    // first decode the transformation
    for (int i = 0; i < numViews; i++) {
      // don't decode if it is already known
      if (knownView[i]) continue;

      Se3_F64 se = outputModel.getWorldToCamera(i);

      rotation.setParamVector(param[paramIndex++], param[paramIndex++], param[paramIndex++]);

      RotationMatrixGenerator.rodriguesToMatrix(rotation, se.getR());

      Vector3D_F64 T = se.getT();
      T.x = param[paramIndex++];
      T.y = param[paramIndex++];
      T.z = param[paramIndex++];
    }

    // now decode the points
    for (int i = 0; i < numPoints; i++) {
      Point3D_F64 p = outputModel.getPoint(i);
      p.x = param[paramIndex++];
      p.y = param[paramIndex++];
      p.z = param[paramIndex++];
    }
  }
  @Override
  public void encode(CalibratedPoseAndPoint model, double[] param) {
    int paramIndex = 0;

    // first decode the transformation
    for (int i = 0; i < numViews; i++) {
      // don't encode if it is already known
      if (knownView[i]) continue;

      Se3_F64 se = model.getWorldToCamera(i);

      // force the "rotation matrix" to be an exact rotation matrix
      // otherwise Rodrigues will have issues with the noise
      if (!svd.decompose(se.getR())) throw new RuntimeException("SVD failed");

      DenseMatrix64F U = svd.getU(null, false);
      DenseMatrix64F V = svd.getV(null, false);

      CommonOps.multTransB(U, V, R);

      // extract Rodrigues coordinates
      RotationMatrixGenerator.matrixToRodrigues(R, rotation);

      param[paramIndex++] = rotation.unitAxisRotation.x * rotation.theta;
      param[paramIndex++] = rotation.unitAxisRotation.y * rotation.theta;
      param[paramIndex++] = rotation.unitAxisRotation.z * rotation.theta;

      Vector3D_F64 T = se.getT();

      param[paramIndex++] = T.x;
      param[paramIndex++] = T.y;
      param[paramIndex++] = T.z;
    }

    for (int i = 0; i < numPoints; i++) {
      Point3D_F64 p = model.getPoint(i);

      param[paramIndex++] = p.x;
      param[paramIndex++] = p.y;
      param[paramIndex++] = p.z;
    }
  }
Ejemplo n.º 3
0
  public void process(CalibratedPoseAndPoint model, double[] output) {
    int outputIndex = 0;

    for (int view = 0; view < model.getNumViews(); view++) {
      Se3_F64 worldToCamera = model.getWorldToCamera(view);

      FastQueue<PointIndexObservation> observedPts = observations.get(view).getPoints();

      for (int i = 0; i < observedPts.size; i++) {
        PointIndexObservation o = observedPts.data[i];

        Point3D_F64 worldPt = model.getPoint(o.pointIndex);

        SePointOps_F64.transform(worldToCamera, worldPt, cameraPt);

        output[outputIndex++] = cameraPt.x / cameraPt.z - o.obs.x;
        output[outputIndex++] = cameraPt.y / cameraPt.z - o.obs.y;
      }
    }
  }