Commit 6964789c authored by email's avatar email
Browse files

cleaning code iteraton one

parent ec8221eb
cmake_minimum_required(VERSION 3.8)
#including macroses
include(cmake/SetBoostEnv.cmake)
include(cmake/SetNlohmannEnv.cmake)
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/aruco)
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/OSLabDetector)
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/ccalib)
file(GLOB EXPERIMENTAL_SOURCES experimental/*.cpp experimental/*.h)
set (TARGET EXPERIMENTAL_EXEC)
add_executable(${TARGET} ${EXPERIMENTAL_SOURCES})
message(STATUS ${CCALIB_INCLUDE})
message(STATUS ${ARUCO_INCLUDE})
target_include_directories(${TARGET} PRIVATE ${CCALIB_INCLUDE} ${ARUCO_INCLUDE} ${OPENCV_INCLUDE} ${OSLabDetector_INCLUDE})
target_link_libraries(${TARGET} ${OpenCV_LIBS} ${CCALIB_LIBRARY} ${ARUCO_LIBRARY})
\ No newline at end of file
add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/ccalibTool)
......@@ -4,6 +4,7 @@
#include <opencv2/highgui.hpp>
#include <opencv2/features2d.hpp>
#include "opencv2/core/types.hpp"
#include <opencv2/aruco/charuco.hpp>
#include <opencv2/aruco/dictionary.hpp>
......@@ -35,6 +36,8 @@ class CharucoGridCornerDetector : public cv::FeatureDetector
bool m_showRejected = true;
bool m_refindStrategy = false;
//TODO put it into configFile
cv::Ptr<cv::aruco::Dictionary> m_dictionary = nullptr;
cv::Ptr<cv::aruco::CharucoBoard> m_charucoboard = nullptr;
......
......@@ -3,34 +3,19 @@
CharucoGridCornerDetector::CharucoGridCornerDetector()
{
m_detectorParams = cv::aruco::DetectorParameters::create();
m_dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::PREDEFINED_DICTIONARY_NAME(m_dictionaryId));
float axisLength = 0.5f * ((float)std::min(m_squaresX, m_squaresY) * (m_squareLength));
// create charuco board object
m_charucoboard =
cv::aruco::CharucoBoard::create(m_squaresX, m_squaresY, m_squareLength, m_markerLength, m_dictionary);
m_board = m_charucoboard.staticCast<cv::aruco::Board>();
}
CharucoGridCornerDetector::~CharucoGridCornerDetector()
{
}
void CharucoGridCornerDetector::detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint>& keypoints, cv::InputArray mask)
{
cv::Mat img = image.getMat();
......@@ -40,8 +25,6 @@ void CharucoGridCornerDetector::detect(cv::InputArray image, CV_OUT std::vector<
void CharucoGridCornerDetector::detectMat( cv::Mat &image, std::vector< cv::KeyPoint > &keypoints, const cv::Mat &mask)
{
std::vector< int > markerIds, charucoIds;
std::vector< std::vector< cv::Point2f > > markerCorners, rejectedMarkers;
std::vector< cv::Point2f > charucoCorners;
......@@ -50,7 +33,6 @@ void CharucoGridCornerDetector::detectMat( cv::Mat &image, std::vector< cv::KeyP
// detect markers
cv::aruco::detectMarkers(image, m_dictionary, markerCorners, markerIds, m_detectorParams,
rejectedMarkers);
// interpolate charuco corners
int interpolatedCorners = 0;
......@@ -59,8 +41,6 @@ void CharucoGridCornerDetector::detectMat( cv::Mat &image, std::vector< cv::KeyP
cv::aruco::interpolateCornersCharuco(markerCorners, markerIds, image, m_charucoboard,
charucoCorners, charucoIds, camMatrix, distCoeffs);
cv::Mat imageCopy;
// draw results
image.copyTo(imageCopy);
......@@ -75,8 +55,6 @@ void CharucoGridCornerDetector::detectMat( cv::Mat &image, std::vector< cv::KeyP
cv::aruco::drawDetectedCornersCharuco(imageCopy, charucoCorners, charucoIds, color);
}
//if (validPose)
//cv::aruco::drawAxis(imageCopy, camMatrix, distCoeffs, rvec, tvec, m_axisLength);
keypoints.clear();
for (size_t i = 0; i < charucoCorners.size(); i++)
{
......@@ -85,12 +63,5 @@ void CharucoGridCornerDetector::detectMat( cv::Mat &image, std::vector< cv::KeyP
kp.pt = charucoCorners[i];
keypoints.push_back(kp);
}
//TO DO :
// implement a chessgridcorners filter
// implement net ?
//cv::imshow("out", imageCopy);
//char key = (char)cv::waitKey(10);
}
......@@ -4,10 +4,12 @@ find_package(OpenCV REQUIRED)
set(ARUCO_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)
set(ARUCO_LIBRARY ARUCO_LIBRARY)
set(ARUCO_INCLUDE ${ARUCO_INCLUDE} PARENT_SCOPE)
set(ARUCO_LIBRARY ${ARUCO_LIBRARY} PARENT_SCOPE)
add_library (${ARUCO_LIBRARY} ${ARUCO_SOURCES} )
target_include_directories(${ARUCO_LIBRARY} PRIVATE ${ARUCO_INCLUDE} ${OPENCV_INCLUDE})
target_link_libraries(${ARUCO_LIBRARY} ${OpenCV_LIBS})
\ No newline at end of file
target_link_libraries(${ARUCO_LIBRARY} ${OpenCV_LIBS})
set (ARUCO_LIBRARY ${ARUCO_LIBRARY} PARENT_SCOPE)
set (ARUCO_INCLUDE ${ARUCO_INCLUDE} PARENT_SCOPE)
file(GLOB CCALIB_SOURCES include/*.h include/*.hpp src/*.cpp)
find_package(OpenCV REQUIRED)
set(CCALIB_INCLUDE ${CMAKE_CURRENT_LIST_DIR}/include)
set(CCALIB_LIBRARY CCALIB_LIBRARY)
set(CCALIB_INCLUDE ${CCALIB_INCLUDE} PARENT_SCOPE)
set(CCALIB_LIBRARY ${CCALIB_LIBRARY} PARENT_SCOPE)
add_library (${CCALIB_LIBRARY} ${CCALIB_SOURCES} )
target_include_directories(${CCALIB_LIBRARY} PRIVATE ${CCALIB_INCLUDE} ${OPENCV_INCLUDE} ${OSLabDetector_INCLUDE})
add_library (${CCALIB_LIBRARY} ${CCALIB_SOURCES} )
target_link_libraries(${CCALIB_LIBRARY} ${OpenCV_LIBS} ${OSLabDetector_LIBRARY} )
\ No newline at end of file
target_include_directories(${CCALIB_LIBRARY} PRIVATE ${CCALIB_INCLUDE} ${OPENCV_INCLUDE} ${ARUCO_INCLUDE} ${OSLabDetector_INCLUDE} ${NLOHMANN_INCLUDE})
target_link_libraries(${CCALIB_LIBRARY} ${OpenCV_LIBS} ${OSLabDetector_LIBRARY} ${ARUCO_LIBRARY} )
\ No newline at end of file
......@@ -124,7 +124,7 @@ namespace cv {
return l;
}
void MultiCameraCalibration::loadImages()
void MultiCameraCalibration::loadImages( )
{
std::vector<std::string> file_list;
file_list = readStringList();
......@@ -199,26 +199,9 @@ namespace cv {
{
rms = cv::calibrateCamera(_objectPointsForEachCamera[camera], _imagePointsForEachCamera[camera],
image.size(), _cameraMatrix[camera], _distortCoeffs[camera], _omEachCamera[camera],
_tEachCamera[camera],
CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5 + CALIB_FIX_K6 +
CALIB_FIX_S1_S2_S3_S4 + CALIB_ZERO_TANGENT_DIST);
// CALIB_FIX_K3 set k3 to 0
// CALIB_FIX_K4 set k4 to 0
// CALIB_FIX_K5 set k5 to 0
// CALIB_FIX_K6 set k6 to 0
// CALIB_FIX_S1_S2_S3_S4 set s1 s2 s3 s4 to 0
// CALIB_ZERO_TANGENT_DIST set p1 and p2 to 0
// CALIB_RATIONAL_MODEL enables k3 k4 k5 k6
// CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global optimization.
// CALIB_FIX_ASPECT_RATIO The functions considers only fy as a free parameter. The ratio fx/fy stays the same as in the input cameraMatrix .
//
//
_tEachCamera[camera], _flags);
idx = Mat(1, (int)_omEachCamera[camera].size(), CV_32S);
for (int i = 0; i < (int)idx.total(); ++i)
{
......@@ -653,6 +636,10 @@ namespace cv {
Mat objectPoints, imagePoints, proImagePoints;
objectPoints = this->_objectPointsForEachCamera[cameraVertex][PhotoIndex];
imagePoints = this->_imagePointsForEachCamera[cameraVertex][PhotoIndex];
if (this->_camType == PINHOLE)
{
......
This diff is collapsed.
This diff is collapsed.
Multi-camera Calibration {#tutorial_multi_camera_main}
====================================
This tutorial will show how to use the multiple camera calibration toolbox. This toolbox is based on the usage of "random" pattern calibration object, so the tutorial is mainly two parts: an introduction to "random" pattern and multiple camera calibration.
Random Pattern Calibration Object
-------------------------------
The random pattern is an image that is randomly generated. It is "random" so that it has many feature points. After generating it, one print it out and use it as a calibration object. The following two images are random pattern and a photo taken for it.
![image](img/random_pattern.jpg)
![image](img/pattern_img.jpg)
To generate a random pattern, use the class ```cv::randpattern::RandomPatternGenerator``` in ```ccalib``` module. Run it as
```
cv::randpattern::RandomPatternGenerator generator(width, height);
generator.generatePattern();
pattern = generator.getPattern();
```
Here ```width``` and ```height``` are width and height of pattern image. After getting the pattern, print it out and take some photos of it.
Now we can use these images to calibrate camera. First, ```objectPoints``` and ```imagePoints``` need to be detected. Use class ```cv::randpattern::RandomPatternCornerFinder``` to detect them. A sample code can be
```
cv::randpattern::RandomPatternCornerFinder finder(patternWidth, patternHeight, nMiniMatches);
finder.loadPattern(pattern);
finder.computeObjectImagePoints(vecImg);
vector<Mat> objectPoints = finder.getObjectPoints();
vector<Mat> imagePoints = finder.getImagePoints();
```
Here variable ```patternWidth``` and ```patternHeight``` are physical pattern width and height with some user defined unit. ```vecImg``` is a vector of images that stores calibration images.
Second, use calibration functions like ```cv::calibrateCamera``` or ```cv::omnidir::calibrate``` to calibrate camera.
Multiple Cameras Calibration
-------------------------------
Now we move to multiple camera calibration, so far this toolbox must use random pattern object.
To calibrate multiple cameras, we first need to take some photos of random pattern. Of cause, to calibrate the extrinsic parameters, one pattern need to be viewed by multiple cameras (at least two) at the same time. Another thing is that to help the program know which camera and which pattern the photo is taken, the image file should be named as "cameraIdx-timestamp.*". Photos with same timestamp means that they are the same object taken by several cameras. In addition, cameraIdx should start from 0. Some examples of files names are "0-129.png", "0-187.png", "1-187", "2-129".
Then, we can run multiple cameras calibration as
```
cv::multicalib::MultiCameraCalibration multiCalib(cameraType, nCamera, inputFilename,patternWidth, patternHeight, showFeatureExtraction, nMiniMatches);
multiCalib.run();
multiCalib.writeParameters(outputFilename);
```
Here ```cameraType``` indicates the camera type, ```multicalib::MultiCameraCalibration::PINHOLE``` and ```multicalib::MultiCameraCalibration::OMNIDIRECTIONAL``` are supported. For omnidirectional camera, you can refer to ```cv::omnidir``` module for detail. ```nCamera``` is the number of camers. ```inputFilename``` is the name of a file generated by ```imagelist_creator``` from ```opencv/sample```. It stores names of random pattern and calibration images, the first file name is the name of random pattern. ```patternWidth``` and ```patternHeight``` are physical width and height of pattern. ```showFeatureExtraction``` is a flags to indicate whether show feature extraction process. ```nMiniMatches``` is a minimal points that should be detected in each frame, otherwise this frame will be abandoned. ```outputFilename``` is a xml file name to store parameters.
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment