//calibrate.cpp: a rewritten Zhang's method that can be fooled with // //input: a model file, any number of data input files with (x, y) coords of corners on each line (one per image) //--all files must have points in the same order // //first line of model file should contain number of points to read // //output: a calibration info file of format: //a c b u0 v0 k1 k2 //where a is horizontal focal length, b is vertical focal length, c is skew, k1 and k2 are lens distortion parameters // //R0 //t0 // //R1 //t1 //etc. ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////// CURRENTLY WORKING ON calculatePointEstimateErrorsWithCompleteModel ///////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include #include #include #include "matrivec.h" #include "quaternion.h" #include "lmdif.c" double distance(vector& a, vector& b, int numDims) { if(a.length < numDims || b.length < numDims) { cout << "Error in distance(" << numDims << "): vectors have lengths " << a.length << " and " << b.length << "!\n"; exit(0); } double sum = 0; for(int i = 0; i < numDims; i++) sum += (a[i] - b[i]) * (a[i] - b[i]); return sqrt(sum); } vector* modelCoords, **inputCoords; //must be global to be used by calculatePointEstimateErrorsWithSimplifiedHomography() vector** normalizedInputCoords; //filled by normalizeInputCoordinates() vector* normalizedModelCoords; matrix* imageNormalizations; //the Ts of normalizeInputCoordinates() matrix modelNormalization; int curImgNum; #define modelPts normalizedModelCoords #define imagePts normalizedInputCoords void normalizeInputCoordinates(int numInputFiles, int numPoints); void estimateImageHomographies(int numInputFiles, int numPoints, matrix* imageHomographies); void estimateIntrinsicParameters(int numInputFiles, int numPoints, matrix* imageHomographies, vector& intrinsicParameters); void estimateImageExtrinsicParameters(matrix& A, int numInputFiles, matrix* imageHomographies, matrix* imageRotations, vector* imageTranslations); void optimizeAllEstimatedParameters(int numInputFiles, int numPoints, matrix& A, matrix* imageRotations, vector* imageTranslations); void estimateDistortionParameters(int numInputFiles, int numPoints, double* distortionParameters, vector& intrinsicParameters, matrix* imageHomographies); void optimizeAll(int numInputFiles, int numPoints, matrix& A, matrix* imageRotations, vector* imageTranslations, double* distortionParameters); //subroutines to lmdif(): //with estimateImageHomographies() void calculatePointEstimateErrorsWithSimplifiedHomography(int* numPoints, int* estimateVectorSize, double* homographyMatrixEstimates, double* calculatedErrors, int* flag); //with optimizeAllEstimatedParameters() void calculatePointEstimateErrorsWithNonsimplifiedHomography(int* numValues, int* estimateVectorSize, double* parameterEstimates, double* calculatedErrors, int* flag); //with optimizeAll() void calculatePointEstimateErrorsWithCompleteModel(int* numValues, int* estimateVectorSize, double* parameterEstimates, double* calculatedErrors, int* flag); int main(int argc, char* argv[]) { if(argc < 5) { cout << "Error in calibrate: not enough inputs!\n"; exit(0); } //open and read model file const int numImages = argc - 3; int numPoints; FILE* modelFile = fopen(argv[1], "r"); if(!modelFile) { cout << "could not open model file\n"; exit(0); } fscanf(modelFile, "%d\n", &numPoints); modelCoords = new vector[numPoints]; normalizedModelCoords = new vector[numPoints]; for(int i = 0; i < numPoints; i++) { modelCoords[i].resize(3); normalizedModelCoords[i].resize(3); fscanf(modelFile, "%lf %lf\n", &modelCoords[i][0], &modelCoords[i][1]); modelCoords[i][2] = 1; } fclose(modelFile); //open and read input files; make coordinate vectors of length 3 and homogeneous inputCoords = new vector*[numImages]; normalizedInputCoords = new vector*[numImages]; FILE* inputFile; for(int i = 0; i < numImages; i++) { inputFile = fopen(argv[2 + i], "r"); if(!inputFile) { cout << "could not open input file " << argv[2 + i] << endl; exit(0); } inputCoords[i] = new vector[numPoints]; normalizedInputCoords[i] = new vector[numPoints]; for(int j = 0; j < numPoints; j++) { inputCoords[i][j].resize(3); normalizedInputCoords[i][j].resize(3); fscanf(inputFile, "%lf %lf\n", &inputCoords[i][j][0], &inputCoords[i][j][1]); inputCoords[i][j][2] = 1; } } //normalize coord estimates for each image in turn so that avg. distance from each image center to each point is 1.0 normalizeInputCoordinates(numImages, numPoints); //fills normalizedInputCoords, normalizedModelCoords //estimate homographies from the normalized model plane to each normalized image plane cout << "estimating image homographies\n"; matrix imageHomographies[numImages]; for(int i = 0; i < numImages; i++) imageHomographies[i].resize(3, 3); estimateImageHomographies(numImages, numPoints, imageHomographies); cout << "estimating intrinsic parameters\n"; //calculate intrinsic parameters: alpha, beta, gamma, lambda, lensCenterX, lensCenterY (lambda is an arbitrary scalar) vector intrinsicParameters(6); estimateIntrinsicParameters(numImages, numPoints, imageHomographies, intrinsicParameters); printf("intrinsic parameters are:\nalpha: %lf\nbeta: %lf\ngamma: %lf\nlambda: %lf\nu0: %lf\nv0: %lf\n\n", intrinsicParameters[0], intrinsicParameters[1], intrinsicParameters[2], intrinsicParameters[3], intrinsicParameters[4], intrinsicParameters[5]); //fill matrix A (camera intrinsic matrix) from intrinsic parameter values matrix A(3, 3); A[0][0] = intrinsicParameters[0]; A[0][1] = intrinsicParameters[2]; A[0][2] = intrinsicParameters[4]; A[1][0] = 0; A[1][1] = intrinsicParameters[1]; A[1][2] = intrinsicParameters[5]; A[2][0] = 0; A[2][1] = 0; A[2][2] = 1; //calculate extrinsic parameters for each image matrix imageRotations[numImages]; vector imageTranslations[numImages]; for(int i = 0; i < numImages; i++) { imageRotations[i].resize(3, 3); imageTranslations[i].resize(3); } estimateImageExtrinsicParameters(A, numImages, imageHomographies, imageRotations, imageTranslations); double sinTheta, theta; optimizeAllEstimatedParameters(numImages, numPoints, A, imageRotations, imageTranslations); //calculate distortion parameters double distortionParameters[2]; //k1 and k2 estimateDistortionParameters(numImages, numPoints, distortionParameters, intrinsicParameters, imageHomographies); //Zhang section 3.3: calculate k1 and k2 //optimize everything together optimizeAll(numImages, numPoints, A, imageRotations, imageTranslations, distortionParameters); matrix temp(3, 3); for(int i = 0; i < numImages; i++) { cout << "\nfinal homography #" << i + 1 << ":\n"; for(int j = 0; j < 3; j++) { temp[j][0] = imageRotations[i][j][0]; temp[j][1] = imageRotations[i][j][1]; temp[j][2] = imageTranslations[i][j]; } temp = A * temp; temp.scale(1 / temp[2][2]); temp.print(); } getchar(); //write output FILE* outputFile = fopen(argv[argc - 1], "w"); fprintf(outputFile, "%lf %lf %lf %lf %lf\n\n%lf %lf\n\n", intrinsicParameters[0], intrinsicParameters[2], intrinsicParameters[1], intrinsicParameters[4], intrinsicParameters[5], distortionParameters[0], distortionParameters[1]); printf("%lf %lf %lf %lf %lf\n\n%lf %lf\n\n", intrinsicParameters[0], intrinsicParameters[2], intrinsicParameters[1], intrinsicParameters[4], intrinsicParameters[5], distortionParameters[0], distortionParameters[1]); for(int i = 0; i < numImages; i++) { for(int j = 0; j < 3; j++) { for(int k = 0; k < 3; k++) { fprintf(outputFile, "%lf ", imageRotations[i][j][k]); printf("%lf ", imageRotations[i][j][k]); } fprintf(outputFile, "\n"); printf("\n"); } for(int j = 0; j < 3; j++) { fprintf(outputFile, "%lf ", imageTranslations[i][j]); printf("%lf ", imageTranslations[i][j]); } fprintf(outputFile, "\n\n"); printf("\n\n"); } fclose(outputFile); cout << "Done writing output\n"; return 0; } /////////////// end main /////////////// void normalizeInputCoordinates(int numInputFiles, int numPoints) //normalizes inputCoords to avg. distance of 1 from image center, { //puts results in normalizedInputCoords vector centroid(3); double avgPointMagnitude, temp, tempC[2]; //create normalization for model file modelNormalization.resize(3, 3); #define mNorm modelNormalization mNorm.resize(3, 3); //fill transformation matrix for file i centroid[0] = centroid[1] = avgPointMagnitude = 0; //calculate average x-, y- coords for(int j = 0; j < numPoints; j++) centroid += modelCoords[j]; centroid.scale(1.0 / numPoints); //move points so centroid is origin mNorm[0][2] = -centroid[0]; mNorm[1][2] = -centroid[1]; //calculate average distance for(int j = 0; j < numPoints; j++) avgPointMagnitude += distance(centroid, modelCoords[j], 2); avgPointMagnitude /= numPoints; //scale points so avg. dist. is 1 mNorm[0][0] = mNorm[1][1] = 1 / avgPointMagnitude; //simulate matrix multiplication (T = s * t) mNorm[0][2] *= mNorm[0][0]; mNorm[1][2] *= mNorm[1][1]; //finish creating transformation matrix mNorm[2][2] = 1; mNorm[0][1] = mNorm[1][0] = mNorm[2][0] = mNorm[2][1] = 0; //transform points for(int j = 0; j < numPoints; j++) modelPts[j] = mNorm * modelCoords[j]; #undef mNorm //create normalizations for input files #define T imageNormalizations T = new matrix[numInputFiles]; for(int i = 0; i < numInputFiles; i++) { T[i].resize(3, 3); //fill transformation matrix for file i centroid[0] = centroid[1] = avgPointMagnitude = 0; //calculate average x-, y- coords for(int j = 0; j < numPoints; j++) centroid += inputCoords[i][j]; centroid.scale(1.0 / numPoints); //cout << "average coord is (" << centroid[0] << ", " << centroid[1] << ")\n"; //move points so centroid is origin T[i][0][2] = -centroid[0]; T[i][1][2] = -centroid[1]; //calculate average distance for(int j = 0; j < numPoints; j++) avgPointMagnitude += distance(centroid, inputCoords[i][j], 2); avgPointMagnitude /= numPoints; //cout << "avg point magnitude is " << avgPointMagnitude[i] << endl; //scale points so avg. dist. is 1 T[i][0][0] = T[i][1][1] = 1 / avgPointMagnitude; //simulate matrix multiplication (T = s * t) T[i][0][2] *= T[i][0][0]; T[i][1][2] *= T[i][1][1]; //finish creating transformation matrix T[i][2][2] = 1; T[i][0][1] = T[i][1][0] = T[i][2][0] = T[i][2][1] = 0; //transform points temp = tempC[0] = tempC[1] = 0; for(int j = 0; j < numPoints; j++) { imagePts[i][j] = T[i] * inputCoords[i][j]; temp += sqrt(imagePts[i][j][0] * imagePts[i][j][0] + imagePts[i][j][1] * imagePts[i][j][1]); tempC[0] += imagePts[i][j][0]; tempC[1] += imagePts[i][j][1]; } } #undef T } //implement Zhang Appendix A, estimating image homographies from model plane to image planes //each homography matrix (one per image) is 3 x 3 //return estimates of homographies in imageHomographies void estimateImageHomographies(int numInputFiles, int numPoints, matrix* imageHomographies) { int minValueIndex, lmdifCode; matrix L(2 * numPoints, 9), lsvL(2 * numPoints, 2 * numPoints), rsvL(9, 9), temp(3, 3); vector x(9); //homography matrix values vector svL(9); //assume we have at least 5 points detected //parameters for lmdif() double calculatedErrors[numPoints], tempVec[9], fwdJacobian[numPoints * 9], outputData[9], tempVec2[9], tempVec3[9], tempVec4[9], tempVec5[numPoints], maxFuncError = .001, maxRelativeError = 1e-7, maxCosineError = 1e-4, stepLength = 1e-14, factor = 100; int flag, numCalculatorFunctionCalls, permutationMatrixInfo[9], numParameters = 9, mode = 1, maxFuncEvals = 100000, printInfo = 0; matrix imageNormalizationInverse; //the inv(E) of finding H for(int i = 0; i < numInputFiles; i++) { //obtain initial homography guess x, where Lx = 0 //fill up L //each two rows are: | ~M^T 0^T -u~M^T | // | 0^T ~M^T -v~M^T | //where ~M is the model point (x, y, 1) in world coords and (u, v) is the image point in pixels for(int j = 0; j < numPoints; j++) { L[2 * j][0] = modelPts[j][0]; L[2 * j][1] = modelPts[j][1]; L[2 * j][2] = 1; L[2 * j][3] = L[2 * j][4] = L[2 * j][5] = 0; L[2 * j][6] = -imagePts[i][j][0] * modelPts[j][0]; L[2 * j][7] = -imagePts[i][j][0] * modelPts[j][1]; L[2 * j][8] = -imagePts[i][j][0]; L[2 * j + 1][0] = L[2 * j + 1][1] = L[2 * j + 1][2] = 0; L[2 * j + 1][3] = modelPts[j][0]; L[2 * j + 1][4] = modelPts[j][1]; L[2 * j + 1][5] = 1; L[2 * j + 1][6] = -imagePts[i][j][1] * modelPts[j][0]; L[2 * j + 1][7] = -imagePts[i][j][1] * modelPts[j][1]; L[2 * j + 1][8] = -imagePts[i][j][1]; } //find right singular vector of L assoc. with smallest singular value: this is x L.svd(lsvL, svL, rsvL); //columns of V are eigenvectors w/ corresponding eigenvalues in w //find index of minimum eigenvalue minValueIndex = 0; for(int j = 1; j < 9; j++) if(svL[j] < svL[minValueIndex]) minValueIndex = j; rsvL.transpose(); //so we can use its rows, rather than columns, as vectors (because they already are vectors); V[minValueIndex] is now Zhang's vector x //minimize sum of squares of distances between detected image points and model points moved by using imageHomographies //number of estimated parameters (n) is 3 (for homography matrix) //number of functions (m) is number of points (one distance per point): thus must have at least 9 points curImgNum = i; //for calculatePointEstimateErrorsWithSimplifiedHomography() lmdifCode = lmdif(calculatePointEstimateErrorsWithSimplifiedHomography, &numPoints, &numParameters, rsvL[minValueIndex].values, calculatedErrors, &maxFuncError, &maxRelativeError, &maxCosineError, &maxFuncEvals, &stepLength, tempVec, &mode, &factor, &printInfo, &flag, &numCalculatorFunctionCalls, fwdJacobian, &numPoints, permutationMatrixInfo, outputData, tempVec2, tempVec3, tempVec4, tempVec5); cout << "lmdif returns " << lmdifCode << endl; cout << "info is " << flag << endl; //insert values of x into imageHomographies (x is concatenated rows of H) for(int j = 0; j < 3; j++) for(int k = 0; k < 3; k++) imageHomographies[i][j][k] = rsvL[minValueIndex][j * 3 + k] / rsvL[minValueIndex][8]; //find H = inv(E) * H * D, where E is the image normalization and D is the model normalization imageNormalizationInverse = imageNormalizations[i]; imageNormalizationInverse.invert(); imageHomographies[i] = imageNormalizationInverse * (imageHomographies[i] * modelNormalization); imageHomographies[i].scale(1 / imageHomographies[i][2][2]); cout << "end estimate of homography matrix " << i + 1 << ":\n"; imageHomographies[i].print(); } getchar(); } //form matrix V in Zhang section 3.1, SVD it, //use elements of b to find six useful parameters (Appendix B) void estimateIntrinsicParameters(int numInputFiles, int numPoints, matrix* imageHomographies, vector& intrinsicParameters) { matrix V(2 * numInputFiles, 6); //fill V with functions of the homography matrices #define h(a, i, j) imageHomographies[a][j - 1][i - 1] for(int i = 0; i < numInputFiles; i++) { //where column is indexed before row, and indices range from 1 to 3: //v_ij = [ h_i1 * h_j1 , h_i1 * h_j2 + h_i2 * h_j1 , h_i2 * h_j2 , h_i3 * h_j1 + h_i1 * h_j3 , h_i3 * h_j2 + h_i2 * h_j3 , h_i3 * h_j3 ] //each two rows of V are: // [ v_12 ] // [ v_11 - v_22 ] V[2 * i][0] = h(i, 1, 1) * h(i, 2, 1); V[2 * i][1] = h(i, 1, 1) * h(i, 2, 2) + h(i, 1, 2) * h(i, 2, 1); V[2 * i][2] = h(i, 1, 2) * h(i, 2, 2); V[2 * i][3] = h(i, 1, 3) * h(i, 2, 1) + h(i, 1, 1) * h(i, 2, 3); V[2 * i][4] = h(i, 1, 3) * h(i, 2, 2) + h(i, 1, 2) * h(i, 2, 3); V[2 * i][5] = h(i, 1, 3) * h(i, 2, 3); V[2 * i + 1][0] = h(i, 1, 1) * h(i, 1, 1) - h(i, 2, 1) * h(i, 2, 1); V[2 * i + 1][1] = h(i, 1, 1) * h(i, 1, 2) + h(i, 1, 2) * h(i, 1, 1) - h(i, 2, 1) * h(i, 2, 2) - h(i, 2, 2) * h(i, 1, 1); V[2 * i + 1][2] = h(i, 1, 2) * h(i, 1, 2) - h(i, 2, 2) * h(i, 2, 2); V[2 * i + 1][3] = h(i, 1, 3) * h(i, 1, 1) + h(i, 1, 1) * h(i, 1, 3) - h(i, 2, 3) * h(i, 2, 1) - h(i, 2, 1) * h(i, 2, 3); V[2 * i + 1][4] = h(i, 1, 3) * h(i, 1, 2) + h(i, 1, 2) * h(i, 1, 3) - h(i, 2, 3) * h(i, 2, 2) - h(i, 2, 2) * h(i, 2, 3); V[2 * i + 1][5] = h(i, 1, 3) * h(i, 1, 3) - h(i, 2, 3) * h(i, 2, 3); } #undef h(a, i, j) //find right singular vector of V assoc. with smallest singular value: this is b vector w(6); matrix lsv(2 * numInputFiles, 2 * numInputFiles), rsv(6, 6); V.svd(lsv, w, rsv); //columns of rsv are right singular vectors w/ corresponding singular values in w /* cout << "singular values of V:\n"; for(int j = 0; j < 6; j++) cout << w[j] << ' '; cout << "\nright singular vectors of V:\n"; for(int j = 0; j < 6; j++) { for(int k = 0; k < 6; k++) cout << rsv[k][j] << ' '; cout << endl; } getchar(); */ //find index of minimum eigenvalue int minValueIndex = 0; for(int i = 1; i < 6; i++) if(w[i] < w[minValueIndex]) minValueIndex = i; #define b(i) rsv[i][minValueIndex] //use notation b for eigenvector corresponding to smallest eigenvalue /*cout << "printing estimated b\n"; for(int i = 0; i < 6; i++) cout << b(i) << ' '; cout << endl << endl; getchar();*/ //calculate intrinsic parameters from vector b (Zhang Appendix B) //intrinsic parameters: alpha, beta, gamma, lambda, lensCenterX (u0), lensCenterY (v0) /* v0 */ intrinsicParameters[5] = (b(1) * b(3) - b(0) * b(4)) / (b(0) * b(2) - b(1) * b(1)); /* lambda */ intrinsicParameters[3] = b(5) - (b(3) * b(3) + intrinsicParameters[5] * (b(1) * b(3) - b(0) * b(4))) / b(0); /* alpha */ intrinsicParameters[0] = sqrt(intrinsicParameters[3] / b(0)); /* beta */ intrinsicParameters[1] = sqrt(intrinsicParameters[3] * b(0) / (b(0) * b(2) - b(1) * b(1))); /* gamma */ intrinsicParameters[2] = -b(1) * intrinsicParameters[0] * intrinsicParameters[0] * intrinsicParameters[1] / intrinsicParameters[3]; /* u0 */ intrinsicParameters[4] = intrinsicParameters[2] * intrinsicParameters[5] / intrinsicParameters[0] - b(3) * intrinsicParameters[0] * intrinsicParameters[0] / intrinsicParameters[3]; #undef b(i) } //Zhang section 3.1: get R, t from A, H void estimateImageExtrinsicParameters(matrix& A, int numInputFiles, matrix* imageHomographies, matrix* imageRotations, vector* imageTranslations) { cout << "estimating R, t:\n"; matrix invA; invA = A; invA.invert(); //for SVDing R matrix U(3, 3), V(3, 3); vector w(3); for(int i = 0; i < numInputFiles; i++) { //obtain initial guesses at rotation and translation matrices for each input file imageRotations[i] = invA * imageHomographies[i]; //asign [r_1 r_2 t] = A^-1 * H imageRotations[i].transpose(); //cout << "norm magnitudes in estimateExtrinsics: \n" << imageRotations[i][0].norm() << ' ' << imageRotations[i][1].norm() << endl; imageRotations[i].scale(1 / imageRotations[i][0].norm()); imageTranslations[i] = imageRotations[i][2]; imageRotations[i][2] = imageRotations[i][0].crossProduct(imageRotations[i][1]); //assign r_3 = r_1 x r_2 imageRotations[i].transpose(); cout << "image # " << i + 1 << " translation:\n"; imageTranslations[i].print(); /// NOTE: the procedure in Zhang appendix C appears to make the rotation matrices worse, not better ///////////////////////////////////// //change R to a rotation-matrix format (Zhang appendix C): take SVD of R = U*s*V^T, set R to U*V^T //imageRotations[i].svd(U, w, V); //U.multiply(V, imageRotations[i]); cout << "\nimage # " << i + 1 << " rotation:\n"; imageRotations[i].print(); //getchar(); } } //Zhang section 3.2: minimize sum of squares of all points to refine estimate of A, Rs and ts void optimizeAllEstimatedParameters(int numInputFiles, int numPoints, matrix& A, matrix* imageRotations, vector* imageTranslations) { cout << "optimizing all parameters without distortion\n"; //parameters to lmdif() int numValues = numPoints * numInputFiles, numParameters = 5 + 6 * numInputFiles; double initialEstimate[numParameters], calculatedErrors[numValues], tempVec[numParameters], fwdJacobian[numValues * numParameters], outputData[numParameters], tempVec2[numParameters], tempVec3[numParameters], tempVec4[numParameters], tempVec5[numValues], maxFuncError = .001, maxRelativeError = 1e-7, maxCosineError = 1e-4, stepLength = 1e-14, factor = 100; int flag, numCalculatorFunctionCalls, permutationMatrixInfo[numParameters], mode = 1, maxFuncEvals = 100000, printInfo = 0, lmdifCode; //load values from structures into lmdif array initialEstimate[0] = A[0][0]; initialEstimate[1] = A[1][1]; initialEstimate[2] = A[0][1]; initialEstimate[3] = A[0][2]; initialEstimate[4] = A[1][2]; quaternion q; double theta, sinTheta; //to be used with q for(int i = 0; i < numInputFiles; i++) { //glean three parameters related to the rotation axis and angle from a quaternion taken from the rotation matrix cout << "in optimizeAll: image rotation #" << i + 1 << ":\n"; imageRotations[i].print(); q.createFromRotationMatrix(imageRotations[i]); //q is [v sin theta, cos theta] sinTheta = sqrt(q[X] * q[X] + q[Y] * q[Y] + q[Z] * q[Z]); theta = atan2(sinTheta, q[W]) * 2; for(int j = 0; j < 3; j++) { initialEstimate[5 + i * 6 + j] = q[j] * theta / sinTheta; //Rodrigues parameters: components of axis of rotation, scaled by angle initialEstimate[5 + i * 6 + 3 + j] = imageTranslations[i][j]; } cout << "\nRodrigues #" << i + 1 << ": " << initialEstimate[5 + i * 6] << ' ' << initialEstimate[5 + i * 6 + 1] << ' ' << initialEstimate[5 + i * 6 + 2]; //getchar(); } //minimize sum of squares of distances between detected image points and model points moved by using A, R, t as follows: //s * ~m = A * [ r1 r2 t ] * ~M, where s is an arbitrary scalar //number of estimated parameters (n) is 5 (intrinsic parameters) + 6 * number of files (3 for R, 3 for t) //number of functions (m) is number of points times number of files (one distance per point) cout << "calling lmdif...\n"; /*for(int i = 0; i < numParameters; i++) cout << initialEstimate[i] << ' '; getchar();*/ lmdifCode = lmdif(calculatePointEstimateErrorsWithNonsimplifiedHomography, &numValues, &numParameters, initialEstimate, calculatedErrors, &maxFuncError, &maxRelativeError, &maxCosineError, &maxFuncEvals, &stepLength, tempVec, &mode, &factor, &printInfo, &flag, &numCalculatorFunctionCalls, fwdJacobian, &numValues, permutationMatrixInfo, outputData, tempVec2, tempVec3, tempVec4, tempVec5); cout << "lmdif returns " << lmdifCode << endl; cout << "function called " << numCalculatorFunctionCalls << " times\n"; cout << "info is " << flag << endl; //put optimized values back into structures A[0][0] = initialEstimate[0]; A[1][1] = initialEstimate[1]; A[0][1] = initialEstimate[2]; A[0][2] = initialEstimate[3]; A[1][2] = initialEstimate[4]; for(int i = 0; i < numInputFiles; i++) { for(int j = 0; j < 3; j++) { q[j] = initialEstimate[5 + i * 6 + j]; imageTranslations[i][j] = initialEstimate[5 + i * 6 + 3 + j]; } //find rotation matrix from angle and axis of rotation by using a quaternion theta = sqrt(q[X] * q[X] + q[Y] * q[Y] + q[Z] * q[Z]) / 2; q[W] = cos(theta); for(int j = 0; j < 3; j++) q[j] *= sin(theta) / (2 * theta); imageRotations[i] = q.createRotationMatrix(); } } void estimateDistortionParameters(int numInputFiles, int numPoints, double* distortionParameters, vector& intrinsicParameters, matrix* imageHomographies) { cout << "\nestimating distortion parameters\n"; matrix D(2 * numInputFiles * numPoints, 2), dTrans, dTransDInv; vector k(2), d(2 * numInputFiles * numPoints), tempPoint(3), tempPoint2(3); #define square(x) (x) * (x) for(int i = 0; i < numInputFiles; i++) { //(u0, v0) are cameraIntrinsicParameters [4] and [5] //(~u, ~v) are inputCoords //fill D, d for(int j = 0; j < numPoints; j++) { tempPoint = imageHomographies[i] * modelCoords[j]; //tempPoint is (u, v, ~) tempPoint.scale(1 / tempPoint[2]); //cout << "(u, v) = " << tempPoint[0] << ", " << tempPoint[1] << "; " << tempPoint[2] << endl; tempPoint2 = imageNormalizations[i] * tempPoint; //tempPoint2 is (x, y, ~) //cout << "(x, y) = " << tempPoint2[0] << ", " << tempPoint2[1] << "; " << tempPoint2[2] << endl; D[i * numPoints * 2 + j * 2][0] = (tempPoint[0] - intrinsicParameters[4]) * (tempPoint2[0] * tempPoint2[0] + tempPoint2[1] * tempPoint2[1]); D[i * numPoints * 2 + j * 2][1] = (tempPoint[0] - intrinsicParameters[4]) * square(tempPoint2[0] * tempPoint2[0] + tempPoint2[1] * tempPoint2[1]); D[i * numPoints * 2 + j * 2 + 1][0] = (tempPoint[1] - intrinsicParameters[5]) * (tempPoint2[0] * tempPoint2[0] + tempPoint2[1] * tempPoint2[1]); D[i * numPoints * 2 + j * 2 + 1][1] = (tempPoint[1] - intrinsicParameters[5]) * square(tempPoint2[0] * tempPoint2[0] + tempPoint2[1] * tempPoint2[1]); d[i * numPoints * 2 + j * 2] = inputCoords[i][j][0] - tempPoint[0]; d[i * numPoints * 2 + j * 2 + 1] = inputCoords[i][j][1] - tempPoint[1]; } //getchar(); } #undef square(x) dTrans = D; dTrans.transpose(); dTransDInv = dTrans * D; dTransDInv.invert(); k = dTransDInv * (dTrans * d); cout << "distortionParameters initial estimate: "; k.print(); cout << endl; getchar(); distortionParameters[0] = k[0]; distortionParameters[1] = k[1]; } //Zhang section 3.3: final optimization of all parameters including distortion void optimizeAll(int numInputFiles, int numPoints, matrix& A, matrix* imageRotations, vector* imageTranslations, double* distortionParameters) { cout << "optimizing all parameters with distortion\n"; //parameters to lmdif() int numValues = numPoints * numInputFiles, numParameters = 7 + 6 * numInputFiles; double initialEstimate[numParameters], calculatedErrors[numValues], tempVec[numParameters], fwdJacobian[numValues * numParameters], outputData[numParameters], tempVec2[numParameters], tempVec3[numParameters], tempVec4[numParameters], tempVec5[numValues], maxFuncError = .001, maxRelativeError = 1e-7, maxCosineError = 1e-4, stepLength = 1e-14, factor = 100; int flag, numCalculatorFunctionCalls, permutationMatrixInfo[numParameters], mode = 1, maxFuncEvals = 100000, printInfo = 0, lmdifCode; //load values from structures into lmdif array initialEstimate[0] = A[0][0]; initialEstimate[1] = A[1][1]; initialEstimate[2] = A[0][1]; initialEstimate[3] = A[0][2]; initialEstimate[4] = A[1][2]; initialEstimate[5] = distortionParameters[0]; initialEstimate[6] = distortionParameters[1]; quaternion q; double theta, sinTheta; //to be used with q for(int i = 0; i < numInputFiles; i++) { //glean three parameters related to the rotation axis and angle from a quaternion taken from the rotation matrix cout << "in optimizeAll: image rotation #" << i + 1 << ":\n"; imageRotations[i].print(); q.createFromRotationMatrix(imageRotations[i]); //q is [v sin theta, cos theta] sinTheta = sqrt(q[X] * q[X] + q[Y] * q[Y] + q[Z] * q[Z]); theta = atan2(sinTheta, q[W]) * 2; for(int j = 0; j < 3; j++) { initialEstimate[7 + i * 6 + j] = q[j] * theta / sinTheta; //Rodrigues parameters: components of axis of rotation, scaled by angle initialEstimate[7 + i * 6 + 3 + j] = imageTranslations[i][j]; } cout << "\nRodrigues #" << i + 1 << ": " << initialEstimate[7 + i * 6] << ' ' << initialEstimate[7 + i * 6 + 1] << ' ' << initialEstimate[7 + i * 6 + 2]; getchar(); } //minimize sum of squares of distances between detected image points and model points moved by using A, R, t as follows: //s * ~m = A * [ r1 r2 t ] * ~M, where s is an arbitrary scalar //number of estimated parameters (n) is 5 (intrinsic parameters) + 6 * number of files (3 for R, 3 for t) //number of functions (m) is number of points times number of files (one distance per point) cout << "calling lmdif...\n"; lmdifCode = lmdif(calculatePointEstimateErrorsWithCompleteModel, &numValues, &numParameters, initialEstimate, calculatedErrors, &maxFuncError, &maxRelativeError, &maxCosineError, &maxFuncEvals, &stepLength, tempVec, &mode, &factor, &printInfo, &flag, &numCalculatorFunctionCalls, fwdJacobian, &numValues, permutationMatrixInfo, outputData, tempVec2, tempVec3, tempVec4, tempVec5); cout << "lmdif returns " << lmdifCode << endl; cout << "function called " << numCalculatorFunctionCalls << " times\n"; cout << "info is " << flag << endl; //put optimized values back into structures A[0][0] = initialEstimate[0]; A[1][1] = initialEstimate[1]; A[0][1] = initialEstimate[2]; A[0][2] = initialEstimate[3]; A[1][2] = initialEstimate[4]; distortionParameters[0] = initialEstimate[5]; distortionParameters[1] = initialEstimate[6]; for(int i = 0; i < numInputFiles; i++) { for(int j = 0; j < 3; j++) { q[j] = initialEstimate[7 + i * 6 + j]; imageTranslations[i][j] = initialEstimate[7 + i * 6 + 3 + j]; } //find rotation matrix from angle and axis of rotation by using a quaternion theta = sqrt(q[X] * q[X] + q[Y] * q[Y] + q[Z] * q[Z]) / 2; q[W] = cos(theta); for(int j = 0; j < 3; j++) q[j] *= sin(theta) / (2 * theta); imageRotations[i] = q.createRotationMatrix(); } } /*subroutine fcn(m,n,x,fvec,iflag) */ /* integer m,n,iflag */ /* double precision x(n),fvec(m) */ /* ---------- */ /* calculate the functions at x and */ /* return this vector in fvec. */ /* ---------- */ /* return */ /* end */ /* the value of iflag should not be changed by fcn unless */ /* the user wants to terminate execution of lmdif. */ /* in this case set iflag to a negative integer. */ /* m is a positive integer input variable set to the number */ /* of functions. */ /* n is a positive integer input variable set to the number */ /* of variables. n must not exceed m. */ /* x is an array of length n. on input x must contain */ /* an initial estimate of the solution vector. on output x */ /* contains the final estimate of the solution vector. */ /* fvec is an output array of length m which contains */ /* the functions evaluated at the output x. */ //given: double** modelCoords; double*** inputCoords; int curImgNum //compute value of first equation in Zhang appendix A (for use by estimateImageHomographies()) void calculatePointEstimateErrorsWithSimplifiedHomography(int* numPoints, int* estimateVectorSize, double* homographyMatrixEstimates, double* calculatedErrors, int* flag) { //homographyMatrixEstimates are the params being manipulated; calculatedErrors are the values being minimized vector homographizedModelPoint(3); matrix estimatedHomographyMatrix(3, 3); //fill homography matrix from estimates for(int i = 0; i < 3; i++) for(int j = 0; j < 3; j++) estimatedHomographyMatrix[i][j] = homographyMatrixEstimates[i * 3 + j]; //calculate errors #define square(a) a * a for(int i = 0; i < (*numPoints); i++) { //inputCoords[curImgNum][i] is m_i //modelCoords[i] is M_i //homographizedModelPoint is ^m_i //^m_i = 1 / (h_3 * M_i) * [[ h_1 * M_i ];[ h_2 * M_i ] [ unnec.]] homographizedModelPoint = estimatedHomographyMatrix * normalizedModelCoords[i]; homographizedModelPoint.scale(1 / homographizedModelPoint[2]); //make z = 1 calculatedErrors[i] = square(distance(normalizedInputCoords[curImgNum][i], homographizedModelPoint, 2)); /*cout << "\ninput coords: "; normalizedInputCoords[curImgNum][i].print(); cout << "\nH * model coords: "; homographizedModelPoint.print();*/ } //getchar(); #undef square(a) } //for use by optimizeAllEstimatedParameters() //calculate sum of squares of distances between detected image points and model points moved by using A, R, t as follows: //s * ~m = A * [ r1 r2 t ] * ~M, where s is an arbitrary scalar void calculatePointEstimateErrorsWithNonsimplifiedHomography(int* numValues, int* estimateVectorSize, double* parameterEstimates, double* calculatedErrors, int* flag) { cout << "in cPEEWNH... "; int numImages = ((*estimateVectorSize) - 5) / 6, numPoints = (*numValues) / numImages; matrix A(3, 3), imageTransformations[numImages], constructedHomographies[numImages], rotationT; //iT = [ r_1 r_2 t ] vector homographizedModelPoint(3); quaternion q; double theta, temp; //fill A A[0][0] = parameterEstimates[0]; A[1][1] = parameterEstimates[1]; A[0][1] = parameterEstimates[2]; A[0][2] = parameterEstimates[3]; A[1][2] = parameterEstimates[4]; A[2][2] = 1; A[1][0] = A[2][0] = A[2][1] = 0; //cout << "A:"; //A.print(); #define square(a) a * a for(int i = 0; i < numImages; i++) { imageTransformations[i].resize(3, 3); constructedHomographies[i].resize(3, 3); //find rotation matrix from angle and axis of rotation by using a quaternion //cout << "Rodrigues:\n"; for(int j = 0; j < 3; j++) { q[j] = parameterEstimates[5 + i * 6 + j]; //cout << q[j] << ' '; imageTransformations[i][j][2] = parameterEstimates[5 + i * 6 + 3 + j]; } //cout << "\n"; theta = sqrt(q[X] * q[X] + q[Y] * q[Y] + q[Z] * q[Z]) / 2; q[W] = cos(theta); for(int j = 0; j < 3; j++) q[j] *= sin(theta) / (2 * theta); //cout << "quaternion #" << i + 1 << ":\n"; //q.print(); rotationT = q.createRotationMatrix(); //cout << "\nrotation #" << i + 1 << ":\n"; //rotationT.print(); //getchar(); for(int j = 0; j < 2; j++) for(int k = 0; k < 3; k++) imageTransformations[i][k][j] = rotationT[k][j]; constructedHomographies[i] = A * imageTransformations[i]; //cout << "homography #" << i + 1 << ":"; //constructedHomographies[i].print(); //getchar(); temp = 0; //cout << "points and calculated points:\n"; for(int j = 0; j < numPoints; j++) { homographizedModelPoint = constructedHomographies[i] * modelCoords[j]; homographizedModelPoint.scale(1 / homographizedModelPoint[2]); //move to same xy-plane as inputCoords //homographizedModelPoint.print(); //cout << "\n; "; //inputCoords[i][j].print(); //getchar(); calculatedErrors[i * numPoints + j] = square(distance(homographizedModelPoint, inputCoords[i][j], 2)); temp += calculatedErrors[i * numPoints + j]; } } cout << "total error: " << temp << endl; //getchar(); #undef square(a) } //called by optimizeAll() before main() writes output file //calculates points allowing for distortion void calculatePointEstimateErrorsWithCompleteModel(int* numValues, int* estimateVectorSize, double* parameterEstimates, double* calculatedErrors, int* flag) { cout << "in cPEEWCM... "; int numImages = ((*estimateVectorSize) - 7) / 6, numPoints = (*numValues) / numImages; matrix A(3, 3), imageTransformations[numImages], constructedHomographies[numImages], rotationT; //iT = [ r_1 r_2 t ] vector homographizedModelPoint(3), normalizedImagePoint(3); quaternion q; double theta, temp, distortionParameters[2]; //fill A A[0][0] = parameterEstimates[0]; A[1][1] = parameterEstimates[1]; A[0][1] = parameterEstimates[2]; A[0][2] = parameterEstimates[3]; A[1][2] = parameterEstimates[4]; A[2][2] = 1; A[1][0] = A[2][0] = A[2][1] = 0; //extract distortion parameters distortionParameters[0] = parameterEstimates[5]; distortionParameters[1] = parameterEstimates[6]; //cout << "distortion: " << distortionParameters[0] << ", " << distortionParameters[1] << endl; //cout << "A:"; //A.print(); #define square(a) (a) * (a) for(int i = 0; i < numImages; i++) { imageTransformations[i].resize(3, 3); constructedHomographies[i].resize(3, 3); //find rotation matrix from angle and axis of rotation by using a quaternion //cout << "Rodrigues:\n"; for(int j = 0; j < 3; j++) { q[j] = parameterEstimates[7 + i * 6 + j]; //cout << q[j] << ' '; imageTransformations[i][j][2] = parameterEstimates[7 + i * 6 + 3 + j]; } //cout << "\n"; theta = sqrt(q[X] * q[X] + q[Y] * q[Y] + q[Z] * q[Z]) / 2; q[W] = cos(theta); for(int j = 0; j < 3; j++) q[j] *= sin(theta) / (2 * theta); //cout << "quaternion #" << i + 1 << ":\n"; //q.print(); rotationT = q.createRotationMatrix(); //cout << "\nrotation #" << i + 1 << ":\n"; //rotationT.print(); //getchar(); for(int j = 0; j < 2; j++) for(int k = 0; k < 3; k++) imageTransformations[i][k][j] = rotationT[k][j]; constructedHomographies[i] = A * imageTransformations[i]; //cout << "homography #" << i + 1 << ":"; //constructedHomographies[i].print(); //getchar(); temp = 0; //cout << "points and calculated points:\n"; for(int j = 0; j < numPoints; j++) /// getting wrong numbers in homographizedModelPoint below where marked { homographizedModelPoint = constructedHomographies[i] * modelCoords[j]; homographizedModelPoint.scale(1 / homographizedModelPoint[2]); //move to same xy-plane as inputCoords //cout << "\nideal: "; //homographizedModelPoint.print(); normalizedImagePoint = imageNormalizations[i] * (constructedHomographies[i] * modelCoords[j]); normalizedImagePoint.scale(1 / normalizedImagePoint[2]); //cout << "; norm ideal: "; //normalizedImagePoint.print(); //cout << endl; for(int k = 0; k < 2; k++) //turn ideal coords into distorted ideal coords to compare them to observed coords { //cout << " u - u0: " << homographizedModelPoint[k] - parameterEstimates[3 + k] << endl; //cout << " k1 term = " << parameterEstimates[5] << " * " << (square(normalizedImagePoint[0]) + square(normalizedImagePoint[1])) << endl; //cout << " k2 term = " << parameterEstimates[6] << " * " << square(square(normalizedImagePoint[0]) + square(normalizedImagePoint[1])) << endl; homographizedModelPoint[k] += (homographizedModelPoint[k] - parameterEstimates[3 + k]) * (parameterEstimates[5] * (square(normalizedImagePoint[0]) + square(normalizedImagePoint[1])) + parameterEstimates[6] * square(square(normalizedImagePoint[0]) + square(normalizedImagePoint[1]))); } //inputCoords[i][j].print(); //cout << " : "; //homographizedModelPoint.print(); //////////////////// errors here //cout << "\n"; //getchar(); calculatedErrors[i * numPoints + j] = square(distance(homographizedModelPoint, inputCoords[i][j], 2)); //find distance between the sides of this equation: temp += calculatedErrors[i * numPoints + j]; //~u = u + (u - u0) * (k1(x^2 + y^2) + k2(x^2 + y^2)^2) } //where (~u, ~v) is inputCoords and (u, v) is the right side } cout << "total error: " << temp << endl; //getchar(); #undef square(a) }