Python OpenCV中使用透明度进行模板匹配的cv2.matchTemplate。

7

OpenCV 3.0.0增加了在执行模板匹配时指定蒙版的功能。当我指定蒙版时,会出现以下错误:error: (-215) (depth == CV_8U || depth == CV_32F) && type == _templ.type() && _img.dims() <= 2 in function matchTemplateMask

模板图像(带透明度的PNG):

enter image description here

源图像:

enter image description here

代码:

# read the template emoji with the alpha channel
template = cv2.imread(imagePath, cv2.IMREAD_UNCHANGED)
channels = cv2.split(template)
zero_channel = np.zeros_like(channels[0])
mask = np.array(channels[3])

# all elements in alpha_channel that have value 0 are set to 1 in the mask matrix
mask[channels[3] == 0] = 1

# all elements in alpha_channel that have value 100 are set to 0 in the mask matrix
mask[channels[3] == 100] = 0

transparent_mask = cv2.merge([zero_channel, zero_channel, zero_channel, mask])

print image.shape, image.dtype  # (72, 232, 3) uint8
print template.shape, template.dtype  # (40, 40, 4) uint8
print transparent_mask.shape, transparent_mask.dtype    # (40, 40, 4) uint8

# find the matches
res = cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED, mask=transparent_mask)

图片类型有问题吗?我找不到使用matchTemplate方法的新mask参数的任何示例(使用Python)。 有人知道如何创建mask吗?


你加载了 image 吗? - Miki
是的,所有的图片都已经正确加载了。我认为根据抛出的错误,图片格式可能有误? - jaredrada
我在 mask = np.array(channels[3]) 处收到了 IndexError: list index out of range 错误,这是有道理的,因为 [3] 是 alpha 通道,而 IMREAD_COLOR 不会读取 alpha 通道。 - jaredrada
当然,我没有想到那个。获取“image”和“template”的深度和类型,这样我们就知道哪一个是错误的。 - Miki
尝试使用Python等效的cvtColor(image, image, COLOR_BGR2BGRA);image转换为BGRA。 - Miki
显示剩余6条评论
4个回答

7

我使用Python 2.7.13和opencv-python==3.1.0.4成功实现了这个功能。

以下是代码。

import cv2
import numpy as np
import sys

if len(sys.argv) < 3:
    print 'Usage: python match.py <template.png> <image.png>'
    sys.exit()

template_path = sys.argv[1]
template = cv2.imread(template_path, cv2.IMREAD_UNCHANGED)
channels = cv2.split(template)
zero_channel = np.zeros_like(channels[0])
mask = np.array(channels[3])

image_path = sys.argv[2]
image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)

mask[channels[3] == 0] = 1
mask[channels[3] == 100] = 0

# transparent_mask = None
# According to http://www.devsplanet.com/question/35658323, we can only use
# cv2.TM_SQDIFF or cv2.TM_CCORR_NORMED
# All methods can be seen here:
# http://docs.opencv.org/2.4/doc/tutorials/imgproc/histograms/template_matching/template_matching.html#which-are-the-matching-methods-available-in-opencv
method = cv2.TM_SQDIFF  # R(x,y) = \sum _{x',y'} (T(x',y')-I(x+x',y+y'))^2 (essentially, sum of squared differences)

transparent_mask = cv2.merge([zero_channel, zero_channel, zero_channel, mask])
result = cv2.matchTemplate(image, template, method, mask=transparent_mask)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
print 'Lowest squared difference WITH mask', min_val

# Now we'll try it without the mask (should give a much larger error)
transparent_mask = None
result = cv2.matchTemplate(image, template, method, mask=transparent_mask)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
print 'Lowest squared difference WITHOUT mask', min_val

这里是它的代码片段。

基本上,你需要确保使用正确的匹配方法。


2

2

我的环境使用的是opencv 3.1.0和python 2.7.11。

以下是查找带有透明度(alpha通道)模板的图像的代码。希望这能帮到你。

def getMultiFullInfo(all_matches,w,h):
    #This function will rearrange the data and calculate the tuple
    #   for the square and the center and the tolerance for each point
    result = []
    for match in all_matches:
        tlx = match[0]
        tly = match[1]
        top_left = (tlx,tly)
        brx = match[0] + w
        bry = match[1] + h 
        bottom_right = (brx,bry)     
        centerx = match[0] + w/2
        centery = match[1] + h/2
        center = [centerx,centery]
        result.append({'top_left':top_left,'bottom_right':bottom_right,'center':center,'tolerance':match[2]})
    return result

def getMulti(res, tolerance,w,h):
    #We get an opencv image in the form of a numpy array and we need to
    #   find all the occurances in there knowing that 2 squares cannot intersect
    #This will give us exactly the matches that are unique

    #First we need to get all the points where value is >= tolerance
    #This wil get sometimes some squares that vary only from some pixels and that are overlapping
    all_matches_full = np.where (res >= tolerance)
    logging.debug('*************Start of getMulti function')
    logging.debug('All >= tolerance')
    logging.debug(all_matches_full)
    #Now we need to arrange it in x,y coordinates
    all_matches_coords = []
    for pt in zip(*all_matches_full[::-1]):
        all_matches_coords.append([pt[0],pt[1],res[pt[1]][pt[0]]])
    logging.debug('In coords form')
    logging.debug(all_matches_coords)
    #Let's sort the new array
    all_matches_coords = sorted(all_matches_coords)
    logging.debug('Sorted')
    logging.debug(all_matches_coords)

    #This function will be called only when there is at least one match so if matchtemplate returns something
    #This means we have found at least one record so we can prepare the analysis and loop through each records 
    all_matches = [[all_matches_coords[0][0],all_matches_coords[0][1],all_matches_coords[0][2]]]
    i=1
    for pt in all_matches_coords:
        found_in_existing = False
        logging.debug('%s)',i)
        for match in all_matches:
            logging.debug(match)
            #This is the test to make sure that the square we analyse doesn't overlap with one of the squares already found
            if pt[0] >= (match[0]-w) and pt[0] <= (match[0]+w) and pt[1] >= (match[1]-h) and pt[1] <= (match[1]+h):
                found_in_existing = True
                if pt[2] > match[2]:
                    match[0] = pt[0]
                    match[1] = pt[1]
                    match[2] = res[pt[1]][pt[0]]
        if not found_in_existing:
            all_matches.append([pt[0],pt[1],res[pt[1]][pt[0]]])
        i += 1
    logging.debug('Final')
    logging.debug(all_matches)
    logging.debug('Final with all info')
    #Before returning the result, we will arrange it with data easily accessible
    all_matches = getMultiFullInfo(all_matches,w,h)
    logging.debug(all_matches)   
    logging.debug('*************End of getMulti function')
    return all_matches

def checkPicture(screenshot,templateFile, tolerance, multiple = False):
    #This is an intermediary function so that the actual function doesn't include too much specific arguments
    #We open the config file

    configFile = 'test.cfg'

    config = SafeConfigParser()

    config.read(configFile)
    basepics_dir = config.get('general', 'basepics_dir')

    debug_dir = config.get('general', 'debug_dir')

    font = cv2.FONT_HERSHEY_PLAIN

    #The value -1 means we keep the file as is meaning with color and alpha channel if any
    #   btw, 0 means grayscale and 1 is color
    template = cv2.imread(basepics_dir+templateFile,-1)

    #Now we search in the picture
    result = findPicture(screenshot,template, tolerance, multiple)
    #If it didn't get any result, we log the best value

    if not result['res']:
        logging.debug('Best value found for %s is: %f',templateFile,result['best_val'])  

    elif logging.getLogger().getEffectiveLevel() == 10:
        screenshot_with_rectangle = screenshot.copy()
        for pt in result['points']:
            cv2.rectangle(screenshot_with_rectangle, pt['top_left'], pt['bottom_right'], 255, 2)
            fileName_top_left = (pt['top_left'][0],pt['top_left'][1]-10)
            cv2.putText(screenshot_with_rectangle,str(pt['tolerance'])[:4],fileName_top_left, font, 1,(255,255,255),2)
            #Now we save to the file if needed
            filename = time.strftime("%Y%m%d-%H%M%S") + '_' + templateFile[:-4] + '.jpg'
            cv2.imwrite(debug_dir + filename, screenshot_with_rectangle)

    result['name']=templateFile

    return result

def extractAlpha(img, hardedge = True):
    if img.shape[2]>3:
        logging.debug('Mask detected')
        channels = cv2.split(img)

        mask = np.array(channels[3])
        if hardedge:
            for idx in xrange(len(mask[0])):
                if mask[0][idx] <=128:
                    mask[0][idx] = 0
                else:
                    mask[0][idx] = 255


        mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
        img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)


        return {'res':True,'image':img,'mask':mask}
    else:
        return {'res':False,'image':img}


def findPicture(screenshot,template, tolerance, multiple = False):

    #This function will work with color images 3 channels minimum
    #The template can have an alpha channel and we will extract it to have the mask


    logging.debug('Looking for %s' , template)

    logging.debug('Tolerance to check is %f' , tolerance)


    logging.debug('*************Start of checkPicture')


    h = template.shape[0]
    w = template.shape[1]

    #We will now extract the alpha channel
    tmpl = extractAlpha(template)

    logging.debug('Image width: %d - Image heigth: %d',w,h)


    # the method used for comparison, can be ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR','cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']

    meth = 'cv2.TM_CCORR_NORMED'

    method = eval(meth)



    # Apply template Matching
    if tmpl['res']:

        res = cv2.matchTemplate(screenshot,tmpl['image'],method, mask = tmpl['mask'])
    else:
        res = cv2.matchTemplate(screenshot,tmpl['image'],method)


    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)



    # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum

    if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:

        top_left = min_loc

        best_val = 1 - min_val

    else:

        top_left = max_loc

        best_val = max_val
    #We need to ensure we found at least one match otherwise we return false
    if best_val >= tolerance:

        if multiple:
            #We need to find all the time the image is found
            all_matches = getMulti(res, float(tolerance),int(w),int(h))
        else:
            bottom_right = (top_left[0] + w, top_left[1] + h)

            center = (top_left[0] + (w/2), top_left[1] + (h/2))
            all_matches = [{'top_left':top_left,'bottom_right':bottom_right,'center':center,'tolerance':best_val}]

        #point will be in the form: [{'tolerance': 0.9889718890190125, 'center': (470, 193), 'bottom_right': (597, 215), 'top_left': (343, 172)}]
        logging.debug('The points found will be:')
        logging.debug(all_matches)
        logging.debug('*************End of checkPicture')

        return {'res': True,'points':all_matches}

    else:
        logging.debug('Could not find a value above tolerance')
        logging.debug('*************End of checkPicture')

        return {'res': False,'best_val':best_val}

2
仅仅是使用 alpha 蒙版进行模板匹配就需要这么复杂的操作吗? - michaelsnowden

2
在OpenCV 4.2.0中,前两个建议的代码对我产生了以下错误:
cv2.error: OpenCV(4.2.0) C:\projects\opencv-python\opencv\modules\imgproc\src\templmatch.cpp:766: error: (-215:Assertion failed) (depth == CV_8U || depth == CV_32F) && type == _templ.type() && _img.dims() <= 2 in function 'cv::matchTemplateMask'
看起来事情已经变得容易得多。这是我尽可能简化的Python代码。文件“crowncap_85x85_mask.png”是一张黑白图片。在匹配过程中,所有掩码中的黑色像素将被忽略。
仍然只支持使用掩码时的匹配方法TM_SQDIFF和TM_CCORR_NORMED。
import cv2 as cv
    
img = cv.imread("fridge_zoomed.png", cv.IMREAD_COLOR)
templ = cv.imread("crowncap_85x85.png", cv.IMREAD_COLOR)
mask = cv.imread( "crowncap_85x85_mask.png", cv.IMREAD_COLOR )

result = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED, None, mask)
cv.imshow("Matching with mask", result)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
print('Highest correlation WITH mask', max_val)

result = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED)
cv.imshow("Matching without mask", result)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
print('Highest correlation without mask', max_val)

while True:
    if cv.waitKey(10) == 27:
        break

cv.destroyAllWindows()

如果你想从模板的 alpha 通道生成掩码,可以按照以下步骤进行:
import cv2 as cv
import numpy as np

img = cv.imread("fridge_zoomed.png", cv.IMREAD_COLOR)
templ = cv.imread("crowncap_85x85_transp.png", cv.IMREAD_COLOR)
templ_incl_alpha_ch = cv.imread("crowncap_85x85_transp.png", cv.IMREAD_UNCHANGED)

channels = cv.split(templ_incl_alpha_ch)
#extract "transparency" channel from image
alpha_channel = np.array(channels[3]) 
#generate mask image, all black dots will be ignored during matching
mask = cv.merge([alpha_channel,alpha_channel,alpha_channel])
cv.imshow("Mask", mask)

result = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED, None, mask)
cv.imshow("Matching with mask", result)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
print('Highest correlation WITH mask', max_val)

result = cv.matchTemplate(img, templ, cv.TM_CCORR_NORMED)
cv.imshow("Matching without mask", result)
min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
print('Highest correlation without mask', max_val)

while True:
    if cv.waitKey(10) == 27:
        break

cv.destroyAllWindows()

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接