暴力匹配
FLANN匹配法
一、SIFT Different of Gaussian(DoG) 对同一图像使用不同高斯滤波器 DoG得到感兴趣区域(关键点),通过SIFT进行说明
import cv2 import sys import numpy as np imgpath = sys.argv[1] img = cv2.imread(imgpath) gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) sift = cv2.xfeatures2d.SIFT_create()# keypoints, descriptor = sift.detectAndCompute(gray,None)#返回关键点和描述符 img = cv2.drawKeypoints(image=img, outImg=img, keypoints=keypoints, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINT, color=(51,163,236))# #cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINT每个关键点绘制圆和方向 cv2.imshow('sift_keypoints',img) while True: if cv2.waitKey(1000/12) & 0xff == ord("c"): break cv2.destoryAllWindow() 关键点keypoints的属性: 1.pt: 点xy坐标 2.size: 特征直径 3.angle: 特征方向 4.response: 特征强度 评估特征好坏 5.octave: 特征所在金字塔层级 6.class_id: 关键点ID
二、SURF
surf = cv2.xfeatures2d.SURF_create(8000)#阈值越小特征点越多 keypoints, descriptor = surf.detectAndCompute(gray,None)#返回关键点和描述符 img = cv2.drawKeypoints(image=img, outImg=img, keypoints=keypoints, flags=4, color=(51,163,236))#
三、ORB特征检测与特征匹配
import numpy as np import cv2 from matplotlib import pyplot as plt cv2.ocl.setUseOpenCL(False) # query and test images img1 = cv2.imread('../images/manowar_logo.png',0) img2 = cv2.imread('../images/manowar_single.jpg',0) # create the ORB detector orb = cv2.ORB_create() kp1, des1 = orb.detectAndCompute(img1,None) kp2, des2 = orb.detectAndCompute(img2,None) # brute force matching bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)#暴力匹配 matches = bf.match(des1,des2)# #matches = bf.knnMatch(des1,des2,k=2)#knn匹配 # Sort by distance. matches = sorted(matches, key = lambda x:x.distance) img3 = cv2.drawMatches(img1,kp1,img2,kp2, matches[:25], img2,flags=2) #img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2, matches[:25], img2,flags=2)#knn plt.imshow(img3),plt.show() 四、FLANN特征匹配flann = cv2.FlannBasedMatcher(indexParams,searchParams) FLANN匹配器两个参数 python 字典形式 c++ 结构体形式 indexParams:LinearIndex、KTreeIndex、KMeansIndex、CompositeIndex、AutotuneIndexS searchParams:checks=50 指定索引树遍历次数
import numpy as np import cv2 from matplotlib import pyplot as plt queryImage = cv2.imread('../images/bathory_album.jpg',0) trainingImage = cv2.imread('../images/bathory_vinyls.jpg',0) # create SIFT and detect/compute sift = cv2.xfeatures2d.SIFT_create() kp1, des1 = sift.detectAndCompute(queryImage,None) kp2, des2 = sift.detectAndCompute(trainingImage,None) # FLANN matcher parameters # FLANN_INDEX_KDTREE = 0 indexParams = dict(algorithm = 0, trees = 5) searchParams = dict(checks=50) # or pass empty dictionary flann = cv2.FlannBasedMatcher(indexParams,searchParams) matches = flann.knnMatch(des1,des2,k=2) # prepare an empty mask to draw good matches matchesMask = [[0,0] for i in xrange(len(matches))] # David G. Lowe's ratio test, populate the mask for i,(m,n) in enumerate(matches):#enumerate枚举列举 例 enumerate(['a',(2,3)]) 0 'a' 1 (2,3) if m.distance < 0.7*n.distance:#匹配过滤 丢弃距离大于0.7的值 matchesMask[i]=[1,0] drawParams = dict(matchColor = (0,255,0), singlePointColor = (255,0,0), matchesMask = matchesMask, flags = 0) resultImage = cv2.drawMatchesKnn(queryImage,kp1,trainingImage,kp2,matches,None,**drawParams)#收集参数 **变量存放的方式是字典 *元组 plt.imshow(resultImage,),plt.show() 五、FLANN单应性匹配单应性:是一个条件,该条件表示两幅图像当中一幅出现投影畸变时,彼此还能匹配
import numpy as np import cv2 from matplotlib import pyplot as plt MIN_MATCH_COUNT = 10#计算单应性至少需要4个匹配 img1 = cv2.imread('images/bb.jpg',0) img2 = cv2.imread('images/color2_small.jpg',0) # Initiate SIFT detector sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1,None) kp2, des2 = sift.detectAndCompute(img2,None) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1,des2,k=2) # store all the good matches as per Lowe's ratio test. good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) if len(good)>MIN_MATCH_COUNT: #m.queryIdx测试图像描述符des和对应kp的下标 m.trainIdx样本图像。。 src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)#三维 -1:自动计算多少个一行两列 dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist()#ravel()多维降为一维 h,w = img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M)#投影映射 img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else: print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT) matchesMask = None draw_params = dict(matchColor = (0,255,0), # draw matches in green color singlePointColor = None, matchesMask = matchesMask, # draw only inliers flags = 2) img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params) plt.imshow(img3, 'gray'),plt.show() 六、基于纹身取证的应用程序1.将图像描述符保存为npy文件
import os import cv2 import numpy as np from os import walk from os.path import join import sys def create_descriptors(folder): files = [] for (dirpath, dirnames, filenames) in walk(folder): files.extend(filenames) for f in files: save_descriptor(folder, f, cv2.xfeatures2d.SIFT_create()) def save_descriptor(folder, image_path, feature_detector): print "reading %s" % image_path if image_path.endswith("npy"): return img = cv2.imread(join(folder, image_path), 0) keypoints, descriptors = feature_detector.detectAndCompute(img, None) descriptor_file = image_path.replace("jpg", "npy")## np.save(join(folder, descriptor_file), descriptors)## dir = sys.argv[1] create_descriptors(dir) 2.扫描匹配 from os.path import join from os import walk import numpy as np import cv2 from sys import argv # create an array of filenames folder = argv[1] query = cv2.imread(join(folder, "tattoo_seed.jpg"), 0) # create files, images, descriptors globals files = [] images = [] descriptors = [] for (dirpath, dirnames, filenames) in walk(folder): files.extend(filenames) for f in files: if f.endswith("npy") and f != "tattoo_seed.npy": descriptors.append(f) print descriptors # create the sift detector sift = cv2.xfeatures2d.SIFT_create() query_kp, query_ds = sift.detectAndCompute(query, None) # create FLANN matcher FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) flann = cv2.FlannBasedMatcher(index_params, search_params) # minimum number of matches MIN_MATCH_COUNT = 10 potential_culprits = {} print ">> Initiating picture scan..." for d in descriptors: print "--------- analyzing %s for matches ------------" % d matches = flann.knnMatch(query_ds, np.load(join(folder, d)), k =2) good = [] for m,n in matches: if m.distance < 0.7*n.distance: good.append(m) if len(good) > MIN_MATCH_COUNT: print "%s is a match! (%d)" % (d, len(good)) else: print "%s is not a match" % d potential_culprits[d] = len(good) max_matches = None potential_suspect = None for culprit, matches in potential_culprits.iteritems(): if max_matches == None or matches > max_matches: max_matches = matches potential_suspect = culprit print "potential suspect is %s" % potential_suspect.replace("npy", "").upper()
