(OpenCV Python)在BFMatcher中匹配后如何获取关键点?

2024-09-27 00:23:24 发布

您现在位置:Python中文网/ 问答频道 /正文

我正在用python编写一个项目,需要从描述符中获取匹配项。我无法从图像中获取存储点坐标的关键点。这些坐标实际上是需要的。我无法通过使用DMatch类的trainIdxqueryIdx属性获得它们,DMatch类是在BFMatcher()中使用match函数的输出

我的逻辑如下:

  1. 使用distance属性按升序对匹配项进行排序
  2. 对于选定的匹配项,获取queryIdxtrainIdx
  3. queryIdxtrainIdx对应于list个关键点中的关键点索引
  4. 使用索引从关键点列表中获取关键点:queryIdxtrainIdx

我在第四步失败了。我的结论是,这是因为缺乏OpenCV的经验。你能帮我吗

如何从使用BFMatcher()中的匹配函数后选择的描述符中获取关键点?

这是我的密码

import cv2
import os
import numpy as np
import sys


path = os.path.expanduser('~') + '/catkin_ws/src/cse598a2/images/task_3_and_4'
entries = os.listdir(path)

object_points = np.zeros((6*9,3), np.float32)
object_points[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
    
objectt_points = [] # A 3d point in real world space
image_points_L = [] # for 2d points in image plane.
image_points_R = [] # for 2d points in image plane.
image_points = []

size_image = (6, 9)

image_L = cv2.imread(path + '/' + 'left_0.png');
image_R = cv2.imread(path + '/' + 'right_0.png');


object_file_storage = cv2.FileStorage('camera_L.yml', flags=cv2.FILE_STORAGE_READ)
newcameramtx = object_file_storage.getNode('cam_L_ncmat').mat()
dist = object_file_storage.getNode('cam_L_dispar').mat()
matrix_L = object_file_storage.getNode('cam_L_matrix').mat()
object_file_storage.release()
object_file_storage = cv2.FileStorage('camera_R.yml', flags=cv2.FILE_STORAGE_READ)
matrix_L = object_file_storage.getNode('cam_R_matrix').mat()
object_file_storage.release()

image_L_gray = cv2.cvtColor(image_L, cv2.COLOR_BGR2GRAY)
image_R_gray = cv2.cvtColor(image_R, cv2.COLOR_BGR2GRAY)
height,  width = image_L_gray.shape[:2]

#cv2.imshow("see", image_L_gray)
#k = cv2.waitKey(0)

map_1, map_2 = cv2.initUndistortRectifyMap(matrix_L, dist, None, newcameramtx, (width, height), cv2.CV_32FC1)
image_undistorted_L = cv2.remap(image_L_gray, map_1, map_2, cv2.INTER_LINEAR)
image_undistorted_R = cv2.remap(image_R_gray, map_1, map_2, cv2.INTER_LINEAR)



orb = cv2.ORB_create(nfeatures=200)
keypoints_L, des_L = orb.detectAndCompute(image_undistorted_L, None)
keypoints_R, des_R = orb.detectAndCompute(image_undistorted_R, None)
#keypoints_L = orb.detect(image_L_gray, None)
#des_L = orb.compute(image_L_gray, keypoints_L)
#keypoints_R = orb.detect(image_R_gray, None)
#des_R = orb.compute(image_R_gray, keypoints_R)
#keypoints_con_L = cv2.KeyPoints_convert(keypoints_L)

#map_1, map_2 = cv2.initUndistortRectifyMap(matrix, dist, None, newcameramtx, (width, height), cv2.CV_32FC1)
#image_undistorted = cv2.remap(image_L_gray, map_1, map_2, cv2.INTER_LINEAR)

#cv2.imshow("see", image_undistorted)
#k = cv2.waitKey(0)

#print len(keypoints_L)
#print len(des_R)
#sys.exit(0)
matcher_BF = cv2.BFMatcher(cv2.NORM_HAMMING, True)
matches = matcher_BF.match(des_L, des_R)

#for each in matches:
#    print each
#    print each.distance
#sys.exit(0)
matches_copy = list(matches)
matches_sorted = list()
x = 99999999999999
indexx = 0
k = list()

#print type(type(matches_copy).__name__).__name__
print len(keypoints_L)
print len(keypoints_R)
#matches = sorted(matches, key = lambda x:x.distance)

for times in range(0, len(matches)):
    for index, element in enumerate(matches):
        if (element.distance < x and index not in k):
            x = element.distance
            indexx = index
    matches_sorted.append(matches[indexx])
    k.append(indexx)
    indexx = 0
    x = 99999999999999


indexes = list()
locs = list()
temporary = list()

for each in matches_sorted:
    if each.distance < 50:
        indexes.append([each.queryIdx, each.trainIdx])
        print 'idxs:     %d %d'%(each.queryIdx, each.trainIdx)
        print 'lens:     %d %d'%(len(keypoints_L), len(keypoints_R))
        temporary.append(keypoints_L[each.queryIdx])
        temporary.append(keypoints_R[each.queryIdx])
        locs.append(temporary)
#for index, each in enumerate(des_L):
#    print each
#for each in locs:
#    print each
        
#for index, each in enumerate(matches):
#    print str(index) + ' ' + str(each.distance)
#print len(matches)

Tags: inimagemapforobjectstoragecv2points

热门问题