Python OpenCV實現基于模板的圖像拼接
更新時間:2022年10月25日 08:50:29 作者:天人合一peng
基于特征點的圖像拼接如果是多張圖,每次計算變換矩陣,都有誤差,最后可以圖像拼完就變形很大,基于模板的方法可以很好的解決這一問題,本文就來和大家具體聊聊
之前基于特征點的圖像拼接如果是多張圖,每次計算變換矩陣,都有誤差,最后可以圖像拼完就變形很大,基于模板的方法可以很好的解決這一問題。
import cv2 import numpy as np def matchStitch(imageLeft, imageRight): ImageLeft_gray = cv2.cvtColor(imageLeft,cv2.COLOR_BGR2GRAY) ImageRight_gray = cv2.cvtColor(imageRight,cv2.COLOR_BGR2GRAY) # cv2.imshow("gray", ImageLeft_gray) # cv2.waitKey() # 獲取圖像長寬 height_Left, width_left = ImageLeft_gray.shape[:2] height_Right, width_Right = ImageRight_gray.shape[:2] # 模板區(qū)域 left_width_begin = int(3*width_left/4) left_height_begin = 0 template_left = imageLeft[left_height_begin:int(height_Left/2), left_width_begin: width_left] drawLeftRect = imageLeft.copy() cv2.rectangle(drawLeftRect, (left_width_begin, left_height_begin), (width_left, int(height_Left/2) ), (0, 0, 255), 1) cv2.imshow("template_left", drawLeftRect) # cv2.waitKey() # 右邊匹配區(qū)域 match_right = imageRight[0:height_Right, 0: int(2*width_Right/3)] # cv2.imshow("match_right", match_right) # cv2.waitKey() # 執(zhí)行模板匹配,采用的匹配方式cv2.TM_CCOEFF_NORMED matchResult = cv2.matchTemplate(match_right, template_left, cv2.TM_CCOEFF_NORMED) # 歸一化處理 cv2.normalize( matchResult, matchResult, 0, 1, cv2.NORM_MINMAX, -1 ) # 尋找矩陣(一維數組當做向量,用Mat定義)中的最大值和最小值的匹配結果及其位置 min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(matchResult) # 設置最終圖片大小 dstStitch = np.zeros((height_Left, width_Right + left_width_begin - max_loc[0] , 3), imageLeft.dtype) # imageLeft.dtype # print(imageLeft.dtype) height_dst, width_dst = dstStitch.shape[:2] # copy left image dstStitch[0:height_Left, 0:width_left] = imageLeft.copy() # cv2.imshow("src", dstStitch) # 匹配右圖的高要能和目標區(qū)域一樣 matchRight_H = height_Right - max_loc[1] + left_height_begin dst_y_start = 0 if height_dst == matchRight_H: matchRight = imageRight[max_loc[1] - left_height_begin: height_Right, max_loc[0]:width_Right] elif height_dst < matchRight_H: matchRight = imageRight[max_loc[1] - left_height_begin: height_Right - 1, max_loc[0]:width_Right] else: matchRight = imageRight[max_loc[1] - left_height_begin: height_Right, max_loc[0]:width_Right] dst_y_start = height_dst - matchRight_H # copy right image # matchRight = imageRight[max_loc[1] - left_height_begin: height_Right, max_loc[0]:width_Right] drawRightRect = imageRight.copy() h, w = template_left.shape[:2] cv2.rectangle(drawRightRect, (max_loc[0],max_loc[1]), (max_loc[0] + w, max_loc[1] + h ), (0, 0, 255), 1) # cv2.imshow("drawRightRect", drawRightRect) # cv2.imshow("matchRight", matchRight) # print("height_Right " + str(height_Right - max_loc[1] + left_height_begin)) # print("matchRight" + str(matchRight.shape)) height_mr, width_mr = matchRight.shape[:2] # print("dstStitch" + str(dstStitch.shape)) dstStitch[dst_y_start:height_dst, left_width_begin:width_mr + left_width_begin] = matchRight.copy() # # 圖像融合處理相圖相交的地方 效果不好 # for i in range(0, height_dst): # # if i + winHeight > height: # # i_heiht = True # for j in range(0, width_dst): # if j == left_width_begin: # # j += 1 # (b1, g1, r1) = dstStitch[i, j] # j -= 1 # # dstStitch[i, j] = (b1, g1, r1) # cv2.imwrite("fineFlower04.jpg", dstStitch) cv2.imshow("dstStitch", dstStitch) cv2.waitKey() if __name__ == "__main__": # imageLeft = cv2.imread("Images/Scan/2.jpg") # imageRight = cv2.imread("Images/Scan/3.jpg") imageLeft = cv2.imread("Images/Scan/flower05.jpg") imageRight = cv2.imread("Images/Scan/flower06.jpg") if imageLeft is None or imageRight is None: print("NOTICE: No images") else: # cv2.imshow("image", imageLeft) # cv2.waitKey() matchStitch(imageLeft, imageRight)
計算時需要注意的是模板區(qū)域一定要在拼接的左右兩張圖中都有,如果疏忽導致左圖中模板較大,而右較中選的區(qū)域沒有完整的模型就接錯了。
# 右邊匹配區(qū)域 match_right = imageRight[0:height_Right, 0: int(width_Right/2)]
右邊先一半,一部分模板的不在里面了,就會拼的效果不好
邊緣的區(qū)域還有改進的地方,后面有空再寫。
到此這篇關于Python OpenCV實現基于模板的圖像拼接的文章就介紹到這了,更多相關Python OpenCV圖像拼接內容請搜索腳本之家以前的文章或繼續(xù)瀏覽下面的相關文章希望大家以后多多支持腳本之家!
相關文章
簡單利用conda安裝tensorflow-gpu=2.2.0的過程及問題解決
這篇文章主要介紹了簡單利用conda安裝tensorflow-gpu=2.2.0,本文給大家詳細分享問題記錄及錯誤問題解決方案,需要的朋友可以參考下2023-01-01