如何在图像中检测游戏棋盘

4

我需要找到一个围棋棋盘,并使用Python中的OpenCV2检测照片上的棋子,但是现在我遇到了检测棋盘的问题,同一轮廓中有奇怪的点,我不明白如何去除它们。这是我目前拥有的:

from skimage import exposure
import numpy as np
import argparse
import imutils
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-r", required = True,
    help = "ratio", type=int, default = 800)
args = vars(ap.parse_args())

img = cv2.imread('3.jpg') #upload image and change resolution
ratio = img.shape[0] / args["r"]
orig = img.copy()
img = imutils.resize(img, height = args["r"])

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edged = cv2.Canny(gray, 30, 200)

cnts= cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) #search contours and sorting them
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]
screenCnt = None

for cnt in cnts:

    rect = cv2.minAreaRect(cnt) # try to fit each contour in rectangle
    box = cv2.boxPoints(rect)
    box = np.int0(box)

    area = int(rect[1][0]*rect[1][1]) # calculating contour area

    if (area > 300000):
        print(area)
        cv2.drawContours(img, cnt, -1, (255, 0, 0), 4) #dots in contour
        hull = cv2.convexHull(cnt) # calculating convex hull
        cv2.drawContours(img, [hull], -1, (0, 0, 255), 3)

cv2.imshow("death", img)
cv2.waitKey(0)

源代码

源代码图片

结果

结果图片


尝试根据轮廓的面积进行排序,然后选择第一个轮廓进行绘制(这将是最大的轮廓)。这可能会有所帮助 - Rick M.
@Rick已经在cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]中这样做了。 - zteffi
@zteffi 没错,是我的错误。谢谢你指出来。那我就看不出问题了。OP 可以绘制第一个轮廓并检查。 - Rick M.
@Georgy,你是在问蓝色点代表什么,为什么Canny检测到了棋盘外的边缘,还是建议如何过滤棋盘外的边缘? - zteffi
@Georgy,你能添加你的原始输入图像吗? - nathancy
@nathancy 已更新。谢谢。 - Georgy
2个回答

6

以下是检测棋盘的方法:

  • 将图像转换为灰度,并使用双边滤波器进行模糊处理
  • 使用阈值获取二进制图像
  • 查找轮廓
  • 使用轮廓面积和轮廓形状逼近进行过滤
  • 执行透视变换以提取棋盘ROI(感兴趣区域)

阈值

查找轮廓,然后使用cv2.contourArea()和最小阈值面积进行过滤。此外,使用轮廓逼近作为第二个过滤器,使用cv2.approxPolyDP()。如果一个轮廓有四个顶点,则必须是正方形或矩形(即棋盘)。

我们还可以提取棋盘的边界框并将其放在遮罩上

最后,如果我们想获得棋盘的俯视图,我们可以进行透视变换

import cv2
import numpy as np

def perspective_transform(image, corners):
    def order_corner_points(corners):
        # Separate corners into individual points
        # Index 0 - top-right
        #       1 - top-left
        #       2 - bottom-left
        #       3 - bottom-right
        corners = [(corner[0][0], corner[0][1]) for corner in corners]
        top_r, top_l, bottom_l, bottom_r = corners[0], corners[1], corners[2], corners[3]
        return (top_l, top_r, bottom_r, bottom_l)

    # Order points in clockwise order
    ordered_corners = order_corner_points(corners)
    top_l, top_r, bottom_r, bottom_l = ordered_corners

    # Determine width of new image which is the max distance between 
    # (bottom right and bottom left) or (top right and top left) x-coordinates
    width_A = np.sqrt(((bottom_r[0] - bottom_l[0]) ** 2) + ((bottom_r[1] - bottom_l[1]) ** 2))
    width_B = np.sqrt(((top_r[0] - top_l[0]) ** 2) + ((top_r[1] - top_l[1]) ** 2))
    width = max(int(width_A), int(width_B))

    # Determine height of new image which is the max distance between 
    # (top right and bottom right) or (top left and bottom left) y-coordinates
    height_A = np.sqrt(((top_r[0] - bottom_r[0]) ** 2) + ((top_r[1] - bottom_r[1]) ** 2))
    height_B = np.sqrt(((top_l[0] - bottom_l[0]) ** 2) + ((top_l[1] - bottom_l[1]) ** 2))
    height = max(int(height_A), int(height_B))

    # Construct new points to obtain top-down view of image in 
    # top_r, top_l, bottom_l, bottom_r order
    dimensions = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1], 
                    [0, height - 1]], dtype = "float32")

    # Convert to Numpy format
    ordered_corners = np.array(ordered_corners, dtype="float32")

    # Find perspective transform matrix
    matrix = cv2.getPerspectiveTransform(ordered_corners, dimensions)

    # Return the transformed image
    return cv2.warpPerspective(image, matrix, (width, height))

image = cv2.imread('1.jpg')
original = image.copy()
blur = cv2.bilateralFilter(image,9,75,75)
gray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray,40,255, cv2.THRESH_BINARY_INV)[1]

cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]

mask = np.zeros(image.shape, dtype=np.uint8)
for c in cnts:
    area = cv2.contourArea(c)
    peri = cv2.arcLength(c, True)
    approx = cv2.approxPolyDP(c, 0.015 * peri, True)

    if area > 150000 and len(approx) == 4:
        cv2.drawContours(image,[c], 0, (36,255,12), 3)
        cv2.drawContours(mask,[c], 0, (255,255,255), -1)
        transformed = perspective_transform(original, approx)

mask = cv2.bitwise_and(mask, original)

cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.imshow('mask', mask)
cv2.imshow('transformed', transformed)
cv2.waitKey()

2
我也曾经研究过类似于检测国际象棋棋盘的任务。我使用了两种不同的方法。第一种方法与nathancy的回答类似,所以我认为我不需要再发布那个了;第二种方法是基于模板匹配的方法(我使用了SIFT)。下面是一个例子:
模板图像: enter image description here 示例查询图像: enter image description here 结果: enter image description here 代码:
import numpy as np
import cv2
from matplotlib import pyplot as plt
import os


MIN_MATCH_COUNT = 5


template_image = cv2.imread('go_board_template.png')
template_image_gray = cv2.cvtColor(template_image, cv2.COLOR_BGR2GRAY)


# Initiate SIFT detector
#sift = cv2.SIFT()
sift = cv2.xfeatures2d.SIFT_create()


# find the keypoints and descriptors with SIFT in template image
kp_template, des_template = sift.detectAndCompute(template_image_gray, None)


FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)

flann = cv2.FlannBasedMatcher(index_params, search_params)


img = cv2.imread("1.jpg")  #  use second parameter 0 for auto gray conversion?


#  convert image to gray
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

#  find the keypoints and descriptors with SIFT in query image
kp_img, des_img = sift.detectAndCompute(img, None)

#  get image dimension info
img_height, img_width = img_gray.shape
print("Image height:{}, image width:{}".format(img_height, img_width))


matches = flann.knnMatch(des_template,des_img,k=2)


# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
    if m.distance < 0.7*n.distance:
        good.append(m)


if len(good)>MIN_MATCH_COUNT:
    src_pts = np.float32([ kp_template[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
    dst_pts = np.float32([ kp_img[m.trainIdx].pt for m in good ]).reshape(-1,1,2)

    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
    matchesMask = mask.ravel().tolist()

    h,w = template_image_gray.shape
    pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
    dst = cv2.perspectiveTransform(pts,M)

    img_board = img.copy()
    cv2.polylines(img_board,[np.int32(dst)],True,255,10, cv2.LINE_AA)
    """
    draw_params = dict(matchColor = (0,255,0), # draw matches in green color
                   singlePointColor = None,
                   matchesMask = matchesMask, # draw only inliers
                   flags = 2)

    img3 = cv2.drawMatches(template_image,kp_template,img,kp_img,good,None,**draw_params)
    """
    plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    plt.show()






    #  get axis aligned bounding box for chessboard in input image
    x,y,w,h = cv2.boundingRect(dst)
    img_crop = img.copy()
    cv2.rectangle(img_crop,(x,y),(x+w,y+h),(0,0,255),5)


    #  draw OBB and AABB
    fig = plt.figure()
    ax1 = fig.add_subplot(121)
    ax2 = fig.add_subplot(122)
    ax1.axis("off")
    ax2.axis("off")
    ax1.set_title('OBB')
    ax2.set_title('AABB')
    ax1.imshow(cv2.cvtColor(img_board, cv2.COLOR_BGR2RGB))
    ax2.imshow(cv2.cvtColor(img_crop, cv2.COLOR_BGR2RGB))
    plt.show()


    #  crop board
    cropped_img = img[y:y+h, x:x+w].copy()
    plt.imshow(cv2.cvtColor(cropped_img, cv2.COLOR_BGR2RGB))
    plt.show()

    #  convert cropped area to gray
    cropped_img_gray = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2GRAY)
    plt.imshow(cropped_img_gray, cmap="gray")
    plt.show()

else:
    print("Not enough match")

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接