c++ - images - OpenCV Orb no encuentra coincidencias una vez que se introducen las invarianzas de rotaciĆ³n/escala
sift opencv python (3)
No creo que haya nada malo en tu código. Desde mi experiencia, el ORB de opencv es sensible a las variaciones de escala.
Probablemente pueda confirmar esto con una pequeña prueba, hacer algunas imágenes con solo rotación y algunas con variaciones de escala solamente. Los de rotación probablemente coincidirán bien, pero los de escala no (creo que la escala decreciente es la peor).
También te aconsejo que pruebes la versión de opencv desde el trunk (mira el sitio de opencv para instrucciones de compilación), ORB como se ha actualizado desde 2.3.1 y funciona un poco mejor, pero aún tiene esos problemas de escala.
Estoy trabajando en un proyecto usando el detector de características Orb en OpenCV 2.3.1. Estoy buscando coincidencias entre 8 imágenes diferentes, 6 de las cuales son muy similares (diferencia de 20 cm en la posición de la cámara, a lo largo de un control deslizante lineal, por lo que no hay escala o varianza rotacional), y luego 2 imágenes tomadas desde un ángulo de 45 grados lado. Mi código encuentra muchas coincidencias precisas entre las imágenes muy similares, pero pocas para las imágenes tomadas desde una perspectiva más diferente. He incluido lo que creo que son las partes pertinentes de mi código. Por favor, avíseme si necesita más información.
// set parameters
int numKeyPoints = 1500;
float distThreshold = 15.0;
//instantiate detector, extractor, matcher
detector = new cv::OrbFeatureDetector(numKeyPoints);
extractor = new cv::OrbDescriptorExtractor;
matcher = new cv::BruteForceMatcher<cv::HammingLUT>;
//Load input image detect keypoints
cv::Mat img1;
std::vector<cv::KeyPoint> img1_keypoints;
cv::Mat img1_descriptors;
cv::Mat img2;
std::vector<cv::KeyPoint> img2_keypoints
cv::Mat img2_descriptors;
img1 = cv::imread(fList[0].string(), CV_LOAD_IMAGE_GRAYSCALE);
img2 = cv::imread(fList[1].string(), CV_LOAD_IMAGE_GRAYSCALE);
detector->detect(img1, img1_keypoints);
detector->detect(img2, img2_keypoints);
extractor->compute(img1, img1_keypoints, img1_descriptors);
extractor->compute(img2, img2_keypoints, img2_descriptors);
//Match keypoints using knnMatch to find the single best match for each keypoint
//Then cull results that fall below given distance threshold
std::vector<std::vector<cv::DMatch> > matches;
matcher->knnMatch(img1_descriptors, img2_descriptors, matches, 1);
int matchCount=0;
for (int n=0; n<matches.size(); ++n) {
if (matches[n].size() > 0){
if (matches[n][0].distance > distThreshold){
matches[n].erase(matches[n].begin());
}else{
++matchCount;
}
}
}
Terminé obteniendo suficientes coincidencias útiles al cambiar mi proceso para filtrar las coincidencias. Mi método anterior descartaba muchas coincidencias buenas basadas únicamente en su valor de distancia. Esta clase de RobustMatcher
que encontré en el OpenCV2 Computer Vision Application Programming Cookbook terminó funcionando de maravilla. Ahora que todas mis coincidencias son precisas, he podido obtener resultados lo suficientemente buenos aumentando la cantidad de puntos clave que el detector ORB está buscando. El uso de RobustMatcher
con SIFT o SURF aún ofrece resultados mucho mejores, pero ahora RobustMatcher
datos utilizables con ORB.
//RobustMatcher class taken from OpenCV2 Computer Vision Application Programming Cookbook Ch 9
class RobustMatcher {
private:
// pointer to the feature point detector object
cv::Ptr<cv::FeatureDetector> detector;
// pointer to the feature descriptor extractor object
cv::Ptr<cv::DescriptorExtractor> extractor;
// pointer to the matcher object
cv::Ptr<cv::DescriptorMatcher > matcher;
float ratio; // max ratio between 1st and 2nd NN
bool refineF; // if true will refine the F matrix
double distance; // min distance to epipolar
double confidence; // confidence level (probability)
public:
RobustMatcher() : ratio(0.65f), refineF(true),
confidence(0.99), distance(3.0) {
// ORB is the default feature
detector= new cv::OrbFeatureDetector();
extractor= new cv::OrbDescriptorExtractor();
matcher= new cv::BruteForceMatcher<cv::HammingLUT>;
}
// Set the feature detector
void setFeatureDetector(
cv::Ptr<cv::FeatureDetector>& detect) {
detector= detect;
}
// Set the descriptor extractor
void setDescriptorExtractor(
cv::Ptr<cv::DescriptorExtractor>& desc) {
extractor= desc;
}
// Set the matcher
void setDescriptorMatcher(
cv::Ptr<cv::DescriptorMatcher>& match) {
matcher= match;
}
// Set confidence level
void setConfidenceLevel(
double conf) {
confidence= conf;
}
//Set MinDistanceToEpipolar
void setMinDistanceToEpipolar(
double dist) {
distance= dist;
}
//Set ratio
void setRatio(
float rat) {
ratio= rat;
}
// Clear matches for which NN ratio is > than threshold
// return the number of removed points
// (corresponding entries being cleared,
// i.e. size will be 0)
int ratioTest(std::vector<std::vector<cv::DMatch> >
&matches) {
int removed=0;
// for all matches
for (std::vector<std::vector<cv::DMatch> >::iterator
matchIterator= matches.begin();
matchIterator!= matches.end(); ++matchIterator) {
// if 2 NN has been identified
if (matchIterator->size() > 1) {
// check distance ratio
if ((*matchIterator)[0].distance/
(*matchIterator)[1].distance > ratio) {
matchIterator->clear(); // remove match
removed++;
}
} else { // does not have 2 neighbours
matchIterator->clear(); // remove match
removed++;
}
}
return removed;
}
// Insert symmetrical matches in symMatches vector
void symmetryTest(
const std::vector<std::vector<cv::DMatch> >& matches1,
const std::vector<std::vector<cv::DMatch> >& matches2,
std::vector<cv::DMatch>& symMatches) {
// for all matches image 1 -> image 2
for (std::vector<std::vector<cv::DMatch> >::
const_iterator matchIterator1= matches1.begin();
matchIterator1!= matches1.end(); ++matchIterator1) {
// ignore deleted matches
if (matchIterator1->size() < 2)
continue;
// for all matches image 2 -> image 1
for (std::vector<std::vector<cv::DMatch> >::
const_iterator matchIterator2= matches2.begin();
matchIterator2!= matches2.end();
++matchIterator2) {
// ignore deleted matches
if (matchIterator2->size() < 2)
continue;
// Match symmetry test
if ((*matchIterator1)[0].queryIdx ==
(*matchIterator2)[0].trainIdx &&
(*matchIterator2)[0].queryIdx ==
(*matchIterator1)[0].trainIdx) {
// add symmetrical match
symMatches.push_back(
cv::DMatch((*matchIterator1)[0].queryIdx,
(*matchIterator1)[0].trainIdx,
(*matchIterator1)[0].distance));
break; // next match in image 1 -> image 2
}
}
}
}
// Identify good matches using RANSAC
// Return fundemental matrix
cv::Mat ransacTest(
const std::vector<cv::DMatch>& matches,
const std::vector<cv::KeyPoint>& keypoints1,
const std::vector<cv::KeyPoint>& keypoints2,
std::vector<cv::DMatch>& outMatches) {
// Convert keypoints into Point2f
std::vector<cv::Point2f> points1, points2;
cv::Mat fundemental;
for (std::vector<cv::DMatch>::
const_iterator it= matches.begin();
it!= matches.end(); ++it) {
// Get the position of left keypoints
float x= keypoints1[it->queryIdx].pt.x;
float y= keypoints1[it->queryIdx].pt.y;
points1.push_back(cv::Point2f(x,y));
// Get the position of right keypoints
x= keypoints2[it->trainIdx].pt.x;
y= keypoints2[it->trainIdx].pt.y;
points2.push_back(cv::Point2f(x,y));
}
// Compute F matrix using RANSAC
std::vector<uchar> inliers(points1.size(),0);
if (points1.size()>0&&points2.size()>0){
cv::Mat fundemental= cv::findFundamentalMat(
cv::Mat(points1),cv::Mat(points2), // matching points
inliers, // match status (inlier or outlier)
CV_FM_RANSAC, // RANSAC method
distance, // distance to epipolar line
confidence); // confidence probability
// extract the surviving (inliers) matches
std::vector<uchar>::const_iterator
itIn= inliers.begin();
std::vector<cv::DMatch>::const_iterator
itM= matches.begin();
// for all matches
for ( ;itIn!= inliers.end(); ++itIn, ++itM) {
if (*itIn) { // it is a valid match
outMatches.push_back(*itM);
}
}
if (refineF) {
// The F matrix will be recomputed with
// all accepted matches
// Convert keypoints into Point2f
// for final F computation
points1.clear();
points2.clear();
for (std::vector<cv::DMatch>::
const_iterator it= outMatches.begin();
it!= outMatches.end(); ++it) {
// Get the position of left keypoints
float x= keypoints1[it->queryIdx].pt.x;
float y= keypoints1[it->queryIdx].pt.y;
points1.push_back(cv::Point2f(x,y));
// Get the position of right keypoints
x= keypoints2[it->trainIdx].pt.x;
y= keypoints2[it->trainIdx].pt.y;
points2.push_back(cv::Point2f(x,y));
}
// Compute 8-point F from all accepted matches
if (points1.size()>0&&points2.size()>0){
fundemental= cv::findFundamentalMat(
cv::Mat(points1),cv::Mat(points2), // matches
CV_FM_8POINT); // 8-point method
}
}
}
return fundemental;
}
// Match feature points using symmetry test and RANSAC
// returns fundemental matrix
cv::Mat match(cv::Mat& image1,
cv::Mat& image2, // input images
// output matches and keypoints
std::vector<cv::DMatch>& matches,
std::vector<cv::KeyPoint>& keypoints1,
std::vector<cv::KeyPoint>& keypoints2) {
// 1a. Detection of the SURF features
detector->detect(image1,keypoints1);
detector->detect(image2,keypoints2);
// 1b. Extraction of the SURF descriptors
cv::Mat descriptors1, descriptors2;
extractor->compute(image1,keypoints1,descriptors1);
extractor->compute(image2,keypoints2,descriptors2);
// 2. Match the two image descriptors
// Construction of the matcher
//cv::BruteForceMatcher<cv::L2<float>> matcher;
// from image 1 to image 2
// based on k nearest neighbours (with k=2)
std::vector<std::vector<cv::DMatch> > matches1;
matcher->knnMatch(descriptors1,descriptors2,
matches1, // vector of matches (up to 2 per entry)
2); // return 2 nearest neighbours
// from image 2 to image 1
// based on k nearest neighbours (with k=2)
std::vector<std::vector<cv::DMatch> > matches2;
matcher->knnMatch(descriptors2,descriptors1,
matches2, // vector of matches (up to 2 per entry)
2); // return 2 nearest neighbours
// 3. Remove matches for which NN ratio is
// > than threshold
// clean image 1 -> image 2 matches
int removed= ratioTest(matches1);
// clean image 2 -> image 1 matches
removed= ratioTest(matches2);
// 4. Remove non-symmetrical matches
std::vector<cv::DMatch> symMatches;
symmetryTest(matches1,matches2,symMatches);
// 5. Validate matches using RANSAC
cv::Mat fundemental= ransacTest(symMatches,
keypoints1, keypoints2, matches);
// return the found fundemental matrix
return fundemental;
}
};
// set parameters
int numKeyPoints = 1500;
//Instantiate robust matcher
RobustMatcher rmatcher;
//instantiate detector, extractor, matcher
detector = new cv::OrbFeatureDetector(numKeyPoints);
extractor = new cv::OrbDescriptorExtractor;
matcher = new cv::BruteForceMatcher<cv::HammingLUT>;
rmatcher.setFeatureDetector(detector);
rmatcher.setDescriptorExtractor(extractor);
rmatcher.setDescriptorMatcher(matcher);
//Load input image detect keypoints
cv::Mat img1;
std::vector<cv::KeyPoint> img1_keypoints;
cv::Mat img1_descriptors;
cv::Mat img2;
std::vector<cv::KeyPoint> img2_keypoints
cv::Mat img2_descriptors;
std::vector<std::vector<cv::DMatch> > matches;
img1 = cv::imread(fList[0].string(), CV_LOAD_IMAGE_GRAYSCALE);
img2 = cv::imread(fList[1].string(), CV_LOAD_IMAGE_GRAYSCALE);
rmatcher.match(img1, img2, matches, img1_keypoints, img2_keypoints);
Tuve un problema similar con opencv python y vine aquí a través de google.
Para resolver mi problema, escribí el código python para el filtro de coincidencias basado en la solución @KLowes. Lo compartiré aquí en caso de que alguien más tenga el mismo problema:
""" Clear matches for which NN ratio is > than threshold """
def filter_distance(matches):
dist = [m.distance for m in matches]
thres_dist = (sum(dist) / len(dist)) * ratio
sel_matches = [m for m in matches if m.distance < thres_dist]
#print ''#selected matches:%d (out of %d)'' % (len(sel_matches), len(matches))
return sel_matches
""" keep only symmetric matches """
def filter_asymmetric(matches, matches2, k_scene, k_ftr):
sel_matches = []
for match1 in matches:
for match2 in matches2:
if match1.queryIdx < len(k_ftr) and match2.queryIdx < len(k_scene) and /
match2.trainIdx < len(k_ftr) and match1.trainIdx < len(k_scene) and /
k_ftr[match1.queryIdx] == k_ftr[match2.trainIdx] and /
k_scene[match1.trainIdx] == k_scene[match2.queryIdx]:
sel_matches.append(match1)
break
return sel_matches
def filter_ransac(matches, kp_scene, kp_ftr, countIterations=2):
if countIterations < 1 or len(kp_scene) < minimalCountForHomography:
return matches
p_scene = []
p_ftr = []
for m in matches:
p_scene.append(kp_scene[m.queryIdx].pt)
p_ftr.append(kp_ftr[m.trainIdx].pt)
if len(p_scene) < minimalCountForHomography:
return None
F, mask = cv2.findFundamentalMat(np.float32(p_ftr), np.float32(p_scene), cv2.FM_RANSAC)
sel_matches = []
for m, status in zip(matches, mask):
if status:
sel_matches.append(m)
#print ''#ransac selected matches:%d (out of %d)'' % (len(sel_matches), len(matches))
return filter_ransac(sel_matches, kp_scene, kp_ftr, countIterations-1)
def filter_matches(matches, matches2, k_scene, k_ftr):
matches = filter_distance(matches)
matches2 = filter_distance(matches2)
matchesSym = filter_asymmetric(matches, matches2, k_scene, k_ftr)
if len(k_scene) >= minimalCountForHomography:
return filter_ransac(matchesSym, k_scene, k_ftr)
Para filtrar las coincidencias filter_matches(matches, matches2, k_scene, k_ftr)
debe llamarse donde matches, matches2
representan las coincidencias obtenidas por orb-matcher y k_scene, k_ftr
son los puntos clave correspondientes.