안녕하세요.프로그래밍초보자입니다.opencv질문을좀해도될까요??
DevilsTears
// restart_1.cpp : Defines the entry point for the console application.
//
#include cv.h
#include cxcore.h
#include highgui.h
#include stdio.h
//Functions 함수
void doWork_A();
void doWork_B();
void on_qulity_level_A(int pos);
void on_qulity_level_B(int pos);
void on_min_distance_A(int pos);
void on_min_distance_B(int pos);
void CalibrateCamera(CvPoint3D32f* m_3DWorldPoint, CvPoint2D32f* m_ImgCoord, CvMat* m_ProjectionMatrix);
void LoadCoordinate(void);
//Groval valiables 전역변수
const int MAX_CORNERS = 500;
IplImage *imgA;
IplImage *imgB;
int ql=1; //Qulity level valiables
float fql=0.1f;
int md=5; //Min distance
double dmd=5;
CvPoint2D32f m_LeftFeatures[6], m_RightFeatures[6];
CvPoint3D32f _3DWorldPoint[6];
CvMat* m_ProMat = cvCreateMat(3,4,CV_32FC1);
//main() 메인
int main()
{
LoadCoordinate();
/*사진1*/
imgA= cvLoadImage(test1111.jpg, CV_LOAD_IMAGE_GRAYSCALE);
cvNamedWindow(Left Image);
//화면에출력
// doWork_A();
// cvWaitKey(0);
// cvReleaseImage(&imgA);
/*사진2*/
imgB= cvLoadImage(test2222.jpg, CV_LOAD_IMAGE_GRAYSCALE);
cvNamedWindow(Right Image);
//화면에출력
doWork_A();
doWork_B();
cvWaitKey(0);
cvReleaseImage(&imgB);
cvReleaseImage(&imgA);
return 0;
}
//fql, dmd 값에의해서cvGoodFreaturesToTrack함수를수행
// ...결과를화면에그림
void doWork_A()
{
//원본이미지복사
IplImage *img = cvCloneImage(imgA);
//cvGoodFeaturesToTrack을수행하기위해서변수값설정
//
int corner_count = MAX_CORNERS;
CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
//cvGoodFeaturesToTrack 함수를위한버퍼이미지생성
IplImage* eig_image = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );
IplImage* tmp_image = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );
//변수값화면에출력
printf(Qulity : %g, MinDist:%g\n, fql, dmd);
//함수수행
cvGoodFeaturesToTrack( img, eig_image, tmp_image,cornersA, &corner_count,0.08, 93);
//수행결과를그림에표시한다.
for(int i=0;icorner_count;i++)
{
CvPoint tmp[2];
tmp[0].x = cornersA[i].x-3;
tmp[0].y = cornersA[i].y-3;
tmp[1].x = cornersA[i].x+3;
tmp[1].y = cornersA[i].y+3;
// cvRectangle(img, tmp[0], tmp[1], cvScalarAll(0));
tmp[0].x = m_LeftFeatures[i].x;
tmp[0].y = m_LeftFeatures[i].y;
cvCircle(img, tmp[0], 5, cvScalarAll(0), 1);
}
CalibrateCamera(_3DWorldPoint, m_LeftFeatures, m_ProMat);
printf(%.3lf\t%.3lf\t%.3lf\t%.3lf\n, m_ProMat-data.fl[0], m_ProMat-data.fl[1], m_ProMat-data.fl[2], m_ProMat-data.fl[3] );
printf(%.3lf\t%.3lf\t%.3lf\t%.3lf\n, m_ProMat-data.fl[4], m_ProMat-data.fl[5], m_ProMat-data.fl[6], m_ProMat-data.fl[7] );
printf(%.3lf\t%.3lf\t%.3lf\t%.3lf\n, m_ProMat-data.fl[8], m_ProMat-data.fl[9], m_ProMat-data.fl[10], m_ProMat-data.fl[11] );
//윈도우창에출력
cvShowImage(Left Image, img);
}
void doWork_B()
{
//원본이미지복사
IplImage *img = cvCloneImage(imgB);
//cvGoodFeaturesToTrack을수행하기위해서변수값설정
int corner_count = MAX_CORNERS;
CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
//cvGoodFeaturesToTrack 함수를위한버퍼이미지생성
IplImage* eig_image = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );
IplImage* tmp_image = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );
//변수값화면에출력
printf(Qulity : %g, MinDist:%g\n, fql, dmd);
//함수수행
cvGoodFeaturesToTrack( img, eig_image, tmp_image,cornersA, &corner_count,0.06, 27);
//수행결과를그림에표시한다.
corner_count = 6;
for(int i=0;icorner_count;i++)
{
CvPoint tmp[2];
tmp[0].x = cornersA[i].x-3;
tmp[0].y = cornersA[i].y-3;
tmp[1].x = cornersA[i].x+3;
tmp[1].y = cornersA[i].y+3;
// cvRectangle(img, tmp[0], tmp[1], cvScalarAll(0));
tmp[0].x = m_RightFeatures[i].x;
tmp[0].y = m_RightFeatures[i].y;
cvCircle(img, tmp[0], 5, cvScalarAll(0), 1);
}
//윈도우창에출력
cvShowImage(Right Image, img);
}
void on_qulity_level_A(int pos)
{
//0이면에러나므로0.1로조정
if(pos == 0)
fql = 0.01;
else
fql = (float)pos/100;
//화면에출력
doWork_A();
}
void on_qulity_level_B(int pos)
{
//0이면에러나므로0.1로조정
if(pos == 0)
fql = 0.01;
else
fql = (float)pos/100;
//화면에출력
doWork_B();
}
//min_distance 이벤트핸들러
void on_min_distance_A(int pos)
{
//0일경우에러나므로0.1로조정
if(pos == 0)
dmd = 0.1;
else
dmd = pos;
//화면에출력
doWork_A();
}
void on_min_distance_B(int pos)
{
//0일경우에러나므로0.1로조정
if(pos == 0)
dmd = 0.1;
else
dmd = pos;
//화면에출력
doWork_B();
}
void CalibrateCamera(CvPoint3D32f* m_3DWorldPoint, CvPoint2D32f* m_ImgCoord, CvMat* m_ProjectionMatrix)
{
/*
CvMat* A = cvCreateMat(3,3,CV_32FC1);
CvMat* U = cvCreateMat(3,3,CV_32FC1);
CvMat* D = cvCreateMat(3,3,CV_32FC1);
CvMat* V = cvCreateMat(3,3,CV_32FC1);
cvSetZero(A);
cvmSet(A, 0, 0, 2.);
cvmSet(A, 0, 1, 0.);
cvmSet(A, 0, 2, -2.);
cvmSet(A, 1, 0, 0.);
cvmSet(A, 1, 1, 2.);
cvmSet(A, 1, 2, -2.);
cvmSet(A, 2, 0, -2.);
cvmSet(A, 2, 1, -2.);
cvmSet(A, 2, 2, 4.);
cvSVD(A, D, U, V, CV_SVD_U_T|CV_SVD_V_T); // A = U D V^T
*/
CvMat* A = cvCreateMat(12,12,CV_32FC1);
CvMat* U = cvCreateMat(12,12,CV_32FC1);
CvMat* D = cvCreateMat(12,12,CV_32FC1);
CvMat* V = cvCreateMat(12,12,CV_32FC1);
cvSetZero(A);
for(int i = 0; i 6; i++)
{
cvmSet(A, i, 0, m_3DWorldPoint[0].x);
cvmSet(A, i, 1, m_3DWorldPoint[0].y);
cvmSet(A, i, 2, m_3DWorldPoint[0].z);
cvmSet(A, i, 3, 1.);
cvmSet(A, i, 4, 0.);
cvmSet(A, i, 5, 0.);
cvmSet(A, i, 6, 0.);
cvmSet(A, i, 7, 0.);
cvmSet(A, i, 8, -m_ImgCoord[0].x* m_3DWorldPoint[0].x);
cvmSet(A, i, 9, -m_ImgCoord[0].x* m_3DWorldPoint[0].y);
cvmSet(A, i, 10, -m_ImgCoord[0].x* m_3DWorldPoint[0].z);
cbr / cvmSet(A, i, 11, -m_ImgCoord[0].x );
cvmSet(A, i+1, 0, 0.);
cvmSet(A, i+1, 1, 0.);
cvmSet(A, i+1, 2, 0.);
cvmSet(A, i+1, 3, 0.);
cvmSet(A, i+1, 4, m_3DWorldPoint[0].x);
cvmSet(A, i+1, 5, m_3DWorldPoint[0].y);
cvmSet(A, i+1, 6, m_3DWorldPoint[0].z);
cvmSet(A, i+1, 7, 1.);
cvmSet(A, i+1, 8, -m_ImgCoord[0].y* m_3DWorldPoint[0].x);
cvmSet(A, i+1, 9, -m_ImgCoord[0].y* m_3DWorldPoint[0].y);
cvmSet(A, i+1, 10, -m_ImgCoord[0].y* m_3DWorldPoint[0].z);
cvmSet(A, i+1, 11, -m_ImgCoord[0].y);
}
CvMat* AAT= cvCreateMat(12,12,CV_32FC1);
CvMat* AT= cvCreateMat(12,12,CV_32FC1);
cvTranspose(A, AT);
cvMatMul(A, AT, AAT);
cvSVD(AAT, D, U, V, CV_SVD_U_T|CV_SVD_V_T); // A = U D V^T
cvmSet( m_ProMat, 0, 0, cvmGet(V, 11, 0));
cvmSet( m_ProMat, 0, 1, cvmGet(V, 11, 1));
cvmSet( m_ProMat, 0, 2, cvmGet(V, 11, 2));
cvmSet( m_ProMat, 0, 3, cvmGet(V, 11, 3));
cvmSet( m_ProMat, 1, 0, cvmGet(V, 11, 4));
cvmSet( m_ProMat, 1, 1, cvmGet(V, 11, 5));
cvmSet( m_ProMat, 1, 2, cvmGet(V, 11, 6));
cvmSet( m_ProMat, 1, 3, cvmGet(V, 11, 7));
cvmSet( m_ProMat, 2, 0, cvmGet(V, 11, 8));
cvmSet( m_ProMat, 2, 1, cvmGet(V, 11, 9));
cvmSet( m_ProMat, 2, 2, cvmGet(V, 11, 10));
cvmSet( m_ProMat, 2, 3, cvmGet(V, 11, 11));
cvmSet( m_ProMat, 0, 0, 5.);
cvmSet( m_ProMat, 2, 3, 10.);
// m_ProMat = m_ProMat / cvmGet(m_ProMat, 2, 3);
// cvDiv(m_ProMat, cvmGet(m_ProMat, 2, 3), m_ProMat);
cvConvertScale(m_ProMat, m_ProjectionMatrix, 1./cvmGet(m_ProMat, 2, 3));
}
void LoadCoordinate(void)
{
_3DWorldPoint[0].x = 0.;
_3DWorldPoint[0].y = 0.;
_3DWorldPoint[0].z = 0.;
_3DWorldPoint[1].x = 150.;
_3DWorldPoint[1].y = 0.;
_3DWorldPoint[1].z = 0.;
_3DWorldPoint[2].x = 150.;
_3DWorldPoint[2].y = 230.;
_3DWorldPoint[2].z = 0.;
_3DWorldPoint[3].x = 150.;
_3DWorldPoint[3].y = 230.;
_3DWorldPoint[3].z = 60.;
_3DWorldPoint[4].x = 150.;
_3DWorldPoint[4].y = 0.;
_3DWorldPoint[4].z = 60.;
_3DWorldPoint[5].x = 0.;
_3DWorldPoint[5].y = 0.;
_3DWorldPoint[5].z = 60.;
m_LeftFeatures[0].x = 143.;
m_LeftFeatures[0].y = 312.;
m_LeftFeatures[1].x = 403.;
m_LeftFeatures[1].y = 430.;
m_LeftFeatures[2].x = 532.;
m_LeftFeatures[2].y = 269.;
m_LeftFeatures[3].x = 549.;
m_LeftFeatures[3].y = 165.;
m_LeftFeatures[4].x = 420.;
m_LeftFeatures[4].y = 287.;
m_LeftFeatures[5].x = 133.;
m_LeftFeatures[5].y = 191.;
m_RightFeatures[0].x = 128.;
m_RightFeatures[0].y = 233.;
m_RightFeatures[1].x = 275.;
m_RightFeatures[1].y = 366.;
m_RightFeatures[2].x = 546.;
m_RightFeatures[2].y = 252.;
m_RightFeatures[3].x = 567.;
m_RightFeatures[3].y = 136.;
m_RightFeatures[4].x = 282.;
m_RightFeatures[4].y = 214.;
m_RightFeatures[5].x = 114.;
m_RightFeatures[5].y = 112.;
}
안녕하세요~영상처리 입문자입니다.
제가 초보다 보니까 인터넷에 돌아다니는 소스를 이것 저것 조합해서 이미지(직육면체)의 각모서리를 잡고 투영행렬을 구하는
소스를 만들어 보았습니다. 그런데 한눈에도 보이듯이 소스가 매우 복잡합니다. 저는 원하는 것이 2장의 이미지를 불러와 임의로
(이미지상의 사각형의 각꼭지점의 좌표 x,y를 알고 실제 박스의 3차원 좌표를 알고 있을 때)좌표를 다 정해 놓고 투영 행렬을 구하려고 합니다. 위소스를 간단하게 정리좀 해주실분 없나요 ㅠㅠㅠ제가 아직 소스분석이 약한지라 너무 어렵네요. 또한그레이스케로 표현되는 것이 아니라 컬러이미지로 표현이 되게끔 가능하게끔도요.CV_LOAD_IMAGE_COLOR이걸로 소스를 바꾸면 에러가 자꾸 나게 되서요. 부탁드립니다.고수님들!