OpenCV立体鱼眼校准

2024-10-03 04:36:56 发布

您现在位置:Python中文网/ 问答频道 /正文

我使用的是Insta 360 Evo。但是,我使用它作为立体相机,并想校准它。为此,我想使用OpenCV Fisheye。单独校准摄像机可以工作,但不能使用StereoCalibrate功能。在那里,我总是收到以下错误消息:

cv2.error: OpenCV(4.5.1) C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-wvn_it83\opencv\modules\calib3d\src\fisheye.cpp:1051: error: (-215:Assertion failed) abs_max < threshold in function 'cv::fisheye::stereoCalibrate'

当我查看Fisheye.cpp时,只有当我有错误的校准数据时,错误才会出现,但是我已经多次更换了数据,它不起作用。 是否OpenCV不能在180度视野下工作? 如果可能,是否有其他方法校准立体摄像机

这是我的密码:

鱼眼校准.py->;一个鱼眼照相机

import cv2
assert cv2.__version__[0] >= '3', 'The fisheye module requires opencv version >= 3.0.0'

from VideoReader import VideoReader

import os
import numpy as np


class calibrateFisheye(object):
    def __init__(self, videoIn, parameterOut, CHECKERBOARD=(5,7)):
        self.video = VideoReader(videoIn)
        self.parameterOut = parameterOut
        self.CHECKERBOARD = CHECKERBOARD

        #self.subpix_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        self.calibration_flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + cv2.fisheye.CALIB_CHECK_COND + cv2.fisheye.CALIB_FIX_SKEW

        #objp = np.zeros((1, CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32)
        #objp[0, :, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)

        self.subpix_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        objp = np.zeros((CHECKERBOARD[0] * CHECKERBOARD[1], 1, 3), np.float64)
        objp[:, 0, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
        self.objp = objp

        self.objpoints = []  # 3d point in real world space
        self.imgpoints = []  # 2d points in image plane.

        self.computePoints()
        self.calibrate()
        self.saveNp()


    def _skip(self, count):
        for i in range(count):
            retVideo, _ = self.video.read()
            if not retVideo:
                return

    def computePoints(self):
        retVideo, f = self.video.read()

        while retVideo:
            print("Process-ID: {}\t\tCalibration finished to {:.2f} %".format(os.getpid(), self.video.progress()))
            gray = self.video.getGrayFrame()
            # Find the chess board corners
            ret, corners = cv2.findChessboardCorners(gray, self.CHECKERBOARD,
                                                     cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
            if ret == True:
                self.objpoints.append(self.objp)
                cv2.cornerSubPix(gray, corners, (3, 3), (-1, -1), self.subpix_criteria)
                self.imgpoints.append(corners)

            self._skip(0)
            retVideo, f = self.video.read()
            # = False

    def calibrate(self):
        self.objpoints = np.array(self.objpoints)
        self.imgpoints = np.array(self.imgpoints)

        N_OK = len(self.objpoints)
        self.K = np.zeros((3, 3))
        self.D = np.zeros((4, 1))
        self.rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
        self.tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
        self.rms, _, _, _, _ = \
            cv2.fisheye.calibrate(
                self.objpoints,
                self.imgpoints,
                (self.video.width, self.video.height),
                self.K,
                self.D,
                self.rvecs,
                self.tvecs,
                self.calibration_flags,
                (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6)
            )

    def saveNp(self):
        np.save(self.parameterOut + "k", self.K)
        np.save(self.parameterOut + "d", self.D)
        np.save(self.parameterOut + "rvecs", self.rvecs)
        np.save(self.parameterOut + "tvecs", self.tvecs)
        np.save(self.parameterOut + "rms", self.rms)

立体鱼眼振动.py->;用于立体校准

from VideoReader import VideoReader

import cv2
assert cv2.__version__[0] >= '3', 'The fisheye module requires opencv version >= 3.0.0'

import numpy as np

class calibrationFisheyeStereo(object):
    def __init__(self, videoIn, paramerterIn, parameterOut, CHECKERBOARD=(5,7),skip=0):
        print(skip)
        self.skip = skip
        self.CHECKERBOARD = CHECKERBOARD
        self.leftVideo = VideoReader(videoIn[0])
        self.rightVideo = VideoReader(videoIn[1])

        self.parameterLeft = paramerterIn[0]
        self.parameterRight = paramerterIn[1]
        self.parameterOut = parameterOut

        self.subpix_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        objp = np.zeros((CHECKERBOARD[0] * CHECKERBOARD[1], 1, 3), np.float64)
        objp[:, 0, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
        self.objp = objp

        self.objpoints = []  # 3d point in real world space
        self.imgpointsLeft = []  # 2d points in image plane.
        self.imgpointsRight = []  # 2d points in image plane.

        self.computePoints()
        self.calibrate()
        self.saveNp()

    def _skip(self, count):
        for i in range(count):
            retVideoL, _ = self.leftVideo.read()
            retVideoR, _ = self.rightVideo.read()
            if not retVideoR or not retVideoL:
                return

    def computePoints(self):
        retVideoL, f1 = self.leftVideo.read()
        retVideoR, f2 = self.rightVideo.read()

        i = 0

        while retVideoL and retVideoR:
            print("Calibration finished to {:.2f} %".format(self.leftVideo.progress()))
            grayL = self.leftVideo.getGrayFrame()
            grayR = self.rightVideo.getGrayFrame()

            retL, cornersL = cv2.findChessboardCorners(grayL, self.CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
            retR, cornersR = cv2.findChessboardCorners(grayR, self.CHECKERBOARD, cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)

            if ((retL == True) and (retR == True)):
                i += 1
                print(i)
                #img = cv2.drawChessboardCorners(grayL, self.CHECKERBOARD, cornersL, retL)
                #img = cv2.resize(img, (self.leftVideo.width//2, self.leftVideo.height//2), interpolation=cv2.INTER_AREA)
                #if cv2.waitKey(1) & 0xFF == ord('q'):
                    #exit()
                #cv2.imshow("windowTxt", img)
                self.objpoints.append(self.objp)

                cv2.cornerSubPix(grayL, cornersL, (3, 3), (-1, -1), self.subpix_criteria)
                self.imgpointsLeft.append(cornersL)

                cv2.cornerSubPix(grayR, cornersR, (3, 3), (-1, -1), self.subpix_criteria)
                self.imgpointsRight.append(cornersR)

            self._skip(self.skip)
            retVideoL, _ = self.leftVideo.read()
            retVideoR, _ = self.rightVideo.read()
        print(i)

    def calibrate(self):
        self.D_left = np.load(self.parameterLeft + "d.npy")
        self.K_left = np.load(self.parameterLeft + "k.npy")

        self.D_right = np.load(self.parameterRight + "d.npy")
        self.K_right = np.load(self.parameterRight + "k.npy")

        self.R = np.zeros((1, 1, 3), dtype=np.float64)
        self.T = np.zeros((1, 1, 3), dtype=np.float64)

        TERMINATION_CRITERIA = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
        FLAG = cv2.fisheye.CALIB_FIX_INTRINSIC

        N_OK = len(self.imgpointsLeft)

        self.objpoints = np.array(self.objpoints, dtype=np.float64)
        #self.imgpointsLeft = np.asarray(self.imgpointsLeft, dtype=np.float64)
        #self.imgpointsRight = np.asarray(self.imgpointsRight, dtype=np.float64)

        self.imgpointsLeft = np.array(self.imgpointsLeft, dtype=np.float64)
        self.imgpointsRight = np.array(self.imgpointsRight, dtype=np.float64)

        """self.objpoints = np.reshape(self.objpoints, (N_OK, 1, self.CHECKERBOARD[0] * self.CHECKERBOARD[1], 3))
        self.imgpointsLeft = np.reshape(self.imgpointsLeft, (N_OK, 1, self.CHECKERBOARD[0] * self.CHECKERBOARD[1], 2))
        self.imgpointsRight = np.reshape(self.imgpointsRight, (N_OK, 1, self.CHECKERBOARD[0] * self.CHECKERBOARD[1], 2))"""

        self.rms, self.K_left, self.D_left, self.K_right, self.D_right, self.R, self.T = \
            cv2.fisheye.stereoCalibrate(
                self.objpoints,
                self.imgpointsLeft, self.imgpointsRight,
                self.K_left, self.D_left,
                self.K_right, self.D_right,
                (self.leftVideo.width, self.leftVideo.height), self.R, self.T,
                FLAG, TERMINATION_CRITERIA)

    def saveNp(self):
        np.save(self.parameterOut + "K_left", self.K_left)
        np.save(self.parameterOut + "K_right", self.K_right)

        np.save(self.parameterOut + "D_left", self.D_left)
        np.save(self.parameterOut + "D_right", self.D_right)

        np.save(self.parameterOut + "R", self.R)
        np.save(self.parameterOut + "T", self.T)
        np.save(self.parameterOut + "rms", self.rms)

VideoReader.py->;我的助手班

import cv2

class VideoReader(object):
    def __init__(self, path):
        self.video = cv2.VideoCapture(path)
        self.fps = self.video.get(cv2.CAP_PROP_FPS)
        self.total_frames = self.video.get(cv2.CAP_PROP_FRAME_COUNT)
        self.current_frames = self.video.get(cv2.CAP_PROP_POS_FRAMES)

        self.frame = None
        self.ret = False
        self.windowNames = {}
        self.width = None
        self.height = None

    def __del__(self):
        self.video.release()
        for name in self.windowNames:
            cv2.destroyWindow(name)

    def read(self):
        self.ret, self.frame = self.video.read()
        self.current_frames = self.video.get(cv2.CAP_PROP_POS_FRAMES)
        if self.ret:
            self.height, self.width, _ = self.frame.shape
        return self.ret, self.frame

    def progress(self):
        percentage = self.current_frames / self.total_frames * 100
        return percentage

    def duration(self):
        duration = self.fps * self.total_frames
        return duration

    def display(self, windowTxt):
        if self.ret:
            self.windowNames[windowTxt]=0
            if cv2.waitKey(1) & 0xFF == ord('q'):
                exit()
            cv2.imshow(windowTxt, self.frame)

    def getGrayFrame(self):
        return cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)

    def __version__(self):
        return cv2.__version__

Tags: inselfreadsavedefvideonpcv2