save extracted face, left_eye, right_eye as numpy (in dlib detection loop/ iteration) - python

I was unable to save detected face, left_eye and right_eye as npy --> empty array. The face, left_eye and right_eye was detected using dlib facial landmarks and should be saved as npy.file. My numpy array is empty.
# loop over an individual facial parts
for (name, (i, j)) in FACIAL_LANDMARKS_IDXS.items():
visual_check = image.copy()
# draw rectangle on detected facial parts
(x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
cv2.rectangle(visual_check, (x, y), (x + w, y + h), (0, 255, 0), 2) #green box
# clone the original image
# display the name of the face part on the image
extracted_data = image.copy()
cv2.putText(visual_check, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 210, 255), 2)
# loop over the subset of facial landmarks, drawing the specific face parts
for (u, v) in shape[i:j]:
cv2.circle(visual_check , (u, v), 1, (0, 0, 255), -1)
#extract the ROI of the face region as a separate image
(u, v, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
roi = extracted_data[v - 20: v + h + 20 , u - 20: u + w + 20]
roi_frame = imutils.resize(roi, 64, 64, inter=cv2.INTER_CUBIC)
right_eye_data = np.array(extracted_data)
# left_eye_data = np.array(left_eye)
# face_data = np.array(face)
# right_eye_data.append(right_eye) #declare array of detected right eye
# left_eye_data.append(left_eye)
# face_data.append(face)
np.save('./train_right_eye.npy + d%', extracted_data)
# np.save('./train_left_eye.npy', left_eye_data)
# np.save('./train_face.npy', face_data)
print(right_eye_data)
print(left_eye_data)
print(face_data)
Expected result/output:
# left_eye.npy (1000000, 64, 64, 3)
# right_eye.npy (1000000, 64, 64, 3)
# face.npy (1000000, 64, 64, 3)
# train_y.npy (1000000, 64, 64, 1)

Related

findContour() detect unintended internal edges and calculates area wierd for them OpenCV python

I'm not sure what is going on here but when I use the findContours() function using the cv2.RETER_EXTERNAL on this image:
it still seem to detect inside contours and calculates the area of them weirdly which prevents me from filtering the unwanted contours....
Any clue to why is that?
Here is the original and dialated tresh images:
here's the code so far:
import cv2
import PIL
import numpy as np
import imutils
imgAddr = "ADisplay2.jpg"
cropX = 20
cropY = 200
cropAngle = 2
CropIndex = (cropX, cropY, cropAngle)
img = cv2.imread(imgAddr)
cv2.imshow("original image",img)
(h, w) = img.shape[:2]
(cX, cY) = (w / 2, h / 2)
# rotate our image by 45 degrees
M = cv2.getRotationMatrix2D((cX, cY), -1.2, 1.0)
rotated = cv2.warpAffine(img, M, (w, h))
#cv2.imshow("Rotated by 45 Degrees", rotated)
cropedImg = rotated[300:700, 100:1500]
# grab the dimensions of the image and calculate the center of the image
#cv2.imshow("croped img", cropedImg)
grayImg = cv2.cvtColor(cropedImg, cv2.COLOR_BGR2GRAY)
#cv2.imshow("gray scale image", grayImg)
blurredImg = cv2.GaussianBlur(grayImg, (9, 9), 0)
cv2.imshow("Blurred_Img", blurredImg)
(T, threshInvImg) = cv2.threshold(blurredImg, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
cv2.imshow("ThresholdInvF.jpg", threshInvImg)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7,19))
#opening = cv2.morphologyEx(threshInvImg, cv2.MORPH_OPEN, kernel)
#cv2.imshow("openingImg", opening)
dialeteImg = cv2.morphologyEx(threshInvImg, cv2.MORPH_DILATE, kernel)
cv2.imshow("erodeImg", dialeteImg)
cannyImg = cv2.Canny(dialeteImg, 100,200)
cv2.imshow("Canny_img", cannyImg)
hierarchy,cntsImg,_ = cv2.findContours(cannyImg,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#print("Img cnts: {}".format(cntsImg))
#print("Img hierarchy: {}".format(hierarchy))
txtOffset = (25, 50)
for cntIdx, cnt in enumerate(cntsImg):
cntArea = cv2.contourArea(cnt)
print("Area of contour #{} = {}".format(cntIdx, cntArea))
(x, y, w, h) = cv2.boundingRect(cnt)
cv2.rectangle(cropedImg, (x, y), (x + w, y + h), (0, 255, 0), 2)
txtIdxPos = [x,y]
txtPos = ((txtIdxPos[0] + txtOffset[0]), (txtIdxPos[1] + txtOffset[1]))
cv2.putText(cropedImg, "#{}".format(cntIdx), txtPos, cv2.FONT_HERSHEY_SIMPLEX, 1.25, (0, 0, 255), 4)
cv2.imshow("drawCntsImg.jpg", cropedImg)
cv2.waitKey(0)
Thanks for helping :D
What you could do is to only use them if they're within a certain size. For this you could use contourArea(). It seems you already compute this anyhow.
For example:
for cntIdx, cnt in enumerate(cntsImg):
cntArea = cv2.contourArea(cnt)
#########################
# Skip iteration if area is too big or small to filter out non-digits
if cntArea < 50 or cntArea > 100: continue # Need to fiddle with these values
#########################
print("Area of contour #{} = {}".format(cntIdx, cntArea))
(x, y, w, h) = cv2.boundingRect(cnt)
cv2.rectangle(cropedImg, (x, y), (x + w, y + h), (0, 255, 0), 2)
txtIdxPos = [x,y]
txtPos = ((txtIdxPos[0] + txtOffset[0]), (txtIdxPos[1] + txtOffset[1]))
cv2.putText(cropedImg, "#{}".format(cntIdx), txtPos, cv2.FONT_HERSHEY_SIMPLEX,1.25, (0, 0, 255), 4)
You are already printing out each contour's area. You could use that to get an idea of what sizes to let through.
If the size digits might vary between images it could still be a problem. For that you could, for example, calculate the average contour area, which should be very close to the typical digit area. Then say that each contour should be at least this close to the average area.
Note: Just remember to make the minimum area large enough to let a 1 through.
Update:
If you want to rather use aspect ratio, then it's easy to change your formula, as you already calculate the height and width.
# If height is smaller than 1.5*w or larger than 2.5*w, then skip
if not 1.5 < h/w < 2.5: continue # Need to fiddle with these values
You could even use this to calculate the area. It has a chance as being different from contourArea. For example:
cntArea = w*h

Size detected region of interest is not square

My problem is the size of detected region of interest (face, eye and nose) are not square even after I resize the detected region of interest
width = height = 64
# filename = label
# label =
# load all images in traincroppedlabelledimage_inputPath
for n, img in enumerate(TrainCroppedLabelledImage[:5]):
image = cv2.imread(img)
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale image
rects = detector(gray, 1)
# loop over face detection
for (r, rect) in enumerate(rects):
# determine the facial landmarks for the face region
# convert the landmark (x,y)-coordinates to numpy array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# # draw box on detected face (blue)
# (x, y, w, h) = face_utils.rect_to_bb(rect)
# cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
# loop over an individual facial parts
for (name, (i, j)) in FACIAL_LANDMARKS_IDXS.items():
visual_check = image.copy()
# draw rectangle on detected facial parts
(x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
cv2.rectangle(visual_check, (x, y), (x + w, y + h), (0, 255, 0), 2) #green box
# clone the original image
# display the name of the face part on the image
extracted_data = image.copy()
cv2.putText(visual_check, name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 210, 255), 2)
# loop over the subset of facial landmarks, drawing the specific face parts
for (u, v) in shape[i:j]:
cv2.circle(visual_check , (u, v), 1, (0, 0, 255), -1)
#extract the ROI of the face region as a separate image
(u, v, w, h) = cv2.boundingRect(np.array([shape[i:j]]))
roi = extracted_data[v - 32: v + h + 32 , u - 32: u + w + 32]
roi_frame = imutils.resize(roi, width=height, inter=cv2.INTER_CUBIC)
the size of detected eye, face and nose need to be square 64x64

Open cv join nearest Rect contours

My input image named "img" is as follows:
I have the following code to detect contours on this image:
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
grad = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
_, bw = cv2.threshold(grad, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 1))
connected = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
contours, hierarchy = cv2.findContours(connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
mask = np.zeros(bw.shape, dtype=np.uint8)
for idx in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[idx])
cv2.rectangle(img, (x, y), (x+w-1, y+h-1), (255, 255, 255), 2)
print(w,x,y)
I am getting the following output:
My question is how do I join the nearest contours on the last 3 lines on the image . In output I want 3 rectangle boxes covering the 3 lines of mrz. Ive referred https://dsp.stackexchange.com/questions/2564/opencv-c-connect-nearby-contours-based-on-distance-between-them/2618#2618 but that method seems computationally expensive, I want something simple
Below a relatively simple solution. The comments explain the idea behind it.
import cv2, numpy as np
img = cv2.imread("test.jpg", cv2.IMREAD_GRAYSCALE)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
grad = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
_, bw = cv2.threshold(grad, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 1))
connected = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
contours, hierarchy = cv2.findContours(connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:]
# y-coordinate of midline of rectangle
def ymid(y, h): return y+int(h/2)
# identify lines (l=0, 1, ...) based on ymid() and estimate line width
ym2l, l, l2w, rects = {}, 0, {}, []
for cont in contours:
x, y, w, h = cv2.boundingRect(cont)
rects.append([x, y, w, h])
ym = ymid(y, h)
if ym not in ym2l:
for i in range(-2, 3): # range of ymid() values allowed for same line
if ym+i not in ym2l:
ym2l[ym+i] = l
l2w[l] = w
l += 1
else:
l2w[ym2l[ym]] += w
# combine rectangles for "good" lines (those close to maximum width)
maxw, l2r = max(l2w.values()), {}
for x, y, w, h in rects:
l = ym2l[ymid(y, h)]
if l2w[l] > .9*maxw:
if l not in l2r:
l2r[l] = [x, y, x+w, y+h]
else:
x1, y1, X1, Y1 = l2r[l]
l2r[l] = [min(x, x1), min(y, y1), max(x+w, X1), max(y+h, Y1)]
for x, y, X, Y in l2r.values():
cv2.rectangle(img, (x, y), (X-1, Y-1), (255, 255, 255), 2)
cv2.imshow("img", img)
cv2.waitKey(0)
Here the result:

Unable to use sort_contors for building seven segment OCR

I am trying to build an OCR for recognising seven segment display as mentioned below
Using preprocessing tools of open CV I got it here
Now I am trying to follow this tutorial - https://www.pyimagesearch.com/2017/02/13/recognizing-digits-with-opencv-and-python/
But on the part
digitCnts = contours.sort_contours(digitCnts,
method="left-to-right")[0]
digits = []
I am getting error as -
The error is solved using THRESH_BINARY_INV but still the OCR is not working any fix would be great
File "/Users/ms/anaconda3/lib/python3.6/site-packages/imutils/contours.py", line 25, in sort_contours
key=lambda b: b1[i], reverse=reverse))
ValueError: not enough values to unpack (expected 2, got 0)
Any idea how to solve this and make my OCR a working model
My whole code is :
import numpy as np
import cv2
import imutils
# import the necessary packages
from imutils.perspective import four_point_transform
from imutils import contours
import imutils
import cv2
# define the dictionary of digit segments so we can identify
# each digit on the thermostat
DIGITS_LOOKUP = {
(1, 1, 1, 0, 1, 1, 1): 0,
(0, 0, 1, 0, 0, 1, 0): 1,
(1, 0, 1, 1, 1, 1, 0): 2,
(1, 0, 1, 1, 0, 1, 1): 3,
(0, 1, 1, 1, 0, 1, 0): 4,
(1, 1, 0, 1, 0, 1, 1): 5,
(1, 1, 0, 1, 1, 1, 1): 6,
(1, 0, 1, 0, 0, 1, 0): 7,
(1, 1, 1, 1, 1, 1, 1): 8,
(1, 1, 1, 1, 0, 1, 1): 9
}
# load image
image = cv2.imread('d4.jpg')
# create hsv
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# set lower and upper color limits
low_val = (60,180,160)
high_val = (179,255,255)
# Threshold the HSV image
mask = cv2.inRange(hsv, low_val,high_val)
# find contours in mask
ret, cont, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# select the largest contour
largest_area = 0
for cnt in cont:
if cv2.contourArea(cnt) > largest_area:
cont = cnt
largest_area = cv2.contourArea(cnt)
# get the parameters of the boundingbox
x,y,w,h = cv2.boundingRect(cont)
# create and show subimage
roi = image[y:y+h, x:x+w]
cv2.imshow("Result", roi)
# draw box on original image and show image
cv2.rectangle(image, (x,y),(x+w,y+h), (0,0,255),2)
cv2.imshow("Image", image)
grayscaled = cv2.cvtColor(roi,cv2.COLOR_BGR2GRAY)
retval, threshold = cv2.threshold(grayscaled, 10, 255, cv2.THRESH_BINARY)
retval2,threshold2 = cv2.threshold(grayscaled,125,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow('threshold',threshold2)
cv2.waitKey(0)
cv2.destroyAllWindows()
# find contours in the thresholded image, then initialize the
# digit contours lists
cnts = cv2.findContours(threshold2.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if w >= 15 and (h >= 30 and h <= 40):
digitCnts.append(c)
# sort the contours from left-to-right, then initialize the
# actual digits themselves
digitCnts = contours.sort_contours(digitCnts,
method="left-to-right")[0]
digits = []
# loop over each of the digits
for c in digitCnts:
# extract the digit ROI
(x, y, w, h) = cv2.boundingRect(c)
roi = thresh[y:y + h, x:x + w]
# compute the width and height of each of the 7 segments
# we are going to examine
(roiH, roiW) = roi.shape
(dW, dH) = (int(roiW * 0.25), int(roiH * 0.15))
dHC = int(roiH * 0.05)
# define the set of 7 segments
segments = [
((0, 0), (w, dH)), # top
((0, 0), (dW, h // 2)), # top-left
((w - dW, 0), (w, h // 2)), # top-right
((0, (h // 2) - dHC) , (w, (h // 2) + dHC)), # center
((0, h // 2), (dW, h)), # bottom-left
((w - dW, h // 2), (w, h)), # bottom-right
((0, h - dH), (w, h)) # bottom
]
on = [0] * len(segments)
# loop over the segments
for (i, ((xA, yA), (xB, yB))) in enumerate(segments):
# extract the segment ROI, count the total number of
# thresholded pixels in the segment, and then compute
# the area of the segment
segROI = roi[yA:yB, xA:xB]
total = cv2.countNonZero(segROI)
area = (xB - xA) * (yB - yA)
# if the total number of non-zero pixels is greater than
# 50% of the area, mark the segment as "on"
if total / float(area) > 0.5:
on[i]= 1
# lookup the digit and draw it on the image
digit = DIGITS_LOOKUP[tuple(on)]
digits.append(digit)
cv2.rectangle(output, (x, y), (x + w, y + h), (0, 255, 0), 1)
cv2.putText(output, str(digit), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
# display the digits
print(u"{}{}.{}{}.{}{} \u00b0C".format(*digits))
cv2.imshow("Input", image)
cv2.imshow("Output", output)
cv2.waitKey(0)
A help would be great in fixing my OCR
So, as I said in the comments, there were two problems:
You were trying to find black contours on a white background, which is opposite of OpenCV documentation. This was solved using THRESH_BINARY_INV flag instead of THRESH_BINARY.
Due to the numbers not being connected, a full contour for the number couldn't be found. So I tried some morphological operations. Following are the steps:
2a) Opening on the above image with following code:
threshold2 = cv2.morphologyEx(threshold, cv2.MORPH_OPEN, np.ones((3,3), np.uint8))
2b) Dilation on the previous image:
threshold2 = cv2.dilate(threshold2, np.ones((5,1), np.uint8), iterations=1)
2c) Crop the top part of the image to separate numbers due to dilating into the top border:
height, width = threshold2.shape[:2]
threshold2 = threshold2[5:height,5:width]
Note Somehow, the images are being displayed here without the white border that I am talking about. Try opening the image in a new window and you will see what I mean.
So, after solving these issues, the contours were pretty good and how they were supposed to be as seen here:
cnts = cv2.findContours(threshold2.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if w <= width * 0.5 and (h >= height * 0.2):
digitCnts.append(c)
# sort the contours from left-to-right, then initialize the
# actual digits themselves
cv2.drawContours(image2, digitCnts, -1, (0, 0, 255))
cv2.imwrite("cnts-sort.jpg", image2)
As you can see below, the contours are being drawn in red.
Now, for estimating whether the digit is a code or not, this part somehow doesn't work and I blame the look-up table for it. As you can see from the below images, the bounding rects for all the numbers are correctly cropped but the lookup table fails to recognize them.
# loop over each of the digits
j = 0
for c in digitCnts:
# extract the digit ROI
(x, y, w, h) = cv2.boundingRect(c)
roi = threshold2[y:y + h, x:x + w]
cv2.imwrite("roi" + str(j) + ".jpg", roi)
j += 1
# compute the width and height of each of the 7 segments
# we are going to examine
(roiH, roiW) = roi.shape
(dW, dH) = (int(roiW * 0.25), int(roiH * 0.15))
dHC = int(roiH * 0.05)
# define the set of 7 segments
segments = [
((0, 0), (w, dH)), # top
((0, 0), (dW, h // 2)), # top-left
((w - dW, 0), (w, h // 2)), # top-right
((0, (h // 2) - dHC) , (w, (h // 2) + dHC)), # center
((0, h // 2), (dW, h)), # bottom-left
((w - dW, h // 2), (w, h)), # bottom-right
((0, h - dH), (w, h)) # bottom
]
on = [0] * len(segments)
# loop over the segments
for (i, ((xA, yA), (xB, yB))) in enumerate(segments):
# extract the segment ROI, count the total number of
# thresholded pixels in the segment, and then compute
# the area of the segment
segROI = roi[yA:yB, xA:xB]
total = cv2.countNonZero(segROI)
area = (xB - xA) * (yB - yA)
# if the total number of non-zero pixels is greater than
# 50% of the area, mark the segment as "on"
if area != 0:
if total / float(area) > 0.5:
on[i] = 1
# lookup the digit and draw it on the image
try:
digit = DIGITS_LOOKUP[tuple(on)]
digits.append(digit)
cv2.rectangle(roi, (x, y), (x + w, y + h), (0, 255, 0), 1)
cv2.putText(roi, str(digit), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
except KeyError:
continue
I read through the website you mentioned in the question and from the comments it seems some of the entries in the LUT might be wrong. So I am going to leave it to you to figure that out. Following are the individual digits found (but not recognised):
Alternatively, you could use tesseract instead to recognise these detected digits.
Hope it helps!
I think the lookup-table you created is is for seven-digit display, not for seven-digit OCR. As for the size of display is fixed, I think you can try to segment it into seperated regions and recognise using template-matching or k-means.
This is my preprocessed steps:
(1) Find light green display in the HSV
mask = cv2.inRange(hsv, (50, 100, 180), (70, 255, 255))
(2) try to seperate by projecting and recognise standard seven-digits using LUT:
(3) try on the detected green display

Python file write all the bounding box coordinates using OpenCV

My task:
My task is to extract bounding box coordinates of following image:
I have following code. I am trying to get these coordinates using roi, but I am not sure how to get them.
import cv2
import numpy as np
large = cv2.imread('1.jpg')
small = cv2.cvtColor(large, cv2.COLOR_BGR2GRAY)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
grad = cv2.morphologyEx(small, cv2.MORPH_GRADIENT, kernel)
_, bw = cv2.threshold(grad, 0.0, 255.0, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 1))
connected = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
contours, hierarchy = cv2.findContours(connected.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
mask = np.zeros(bw.shape, dtype=np.uint8)
for idx in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[idx])
mask[y:y+h, x:x+w] = 0
cv2.drawContours(mask, contours, idx, (255, 255, 255), -1)
r = float(cv2.countNonZero(mask[y:y+h, x:x+w])) / (w * h)
if r > 0.45 and w > 8 and h > 8:
cv2.rectangle(large, (x, y), (x+w-1, y+h-1), (0, 255, 0), 1)
roi=large[y:y+h, x:x+w]
print(roi)
Result should be something like this:
1675,1335,2338,1338,2337,1455,1674,1452. :Box1
3067,519,3604,521,3603,651,3066,648 :Box2
1017,721,1729,726,1728,857,1016,852 :Box3
I have referred:
Extract all bounding boxes using OpenCV Python . On this link they are extracting images inside bounding boxes when they already have annotated image with rectangular GUI as a input. I want to extract the detected regions into a text file. How do I do it?
x, y, w, h = cv2.boundingRect(contours[idx]) is the coordinates you want, then write it to a txt file:
...
with open("coords.txt","w+") as file:
for idx in range(len(contours)):
x, y, w, h = cv2.boundingRect(contours[idx])
mask[y:y+h, x:x+w] = 0
file.write("Box {0}: ({1},{2}), ({3},{4}), ({5},{6}), ({7},{8})".format(idx,x,y,x+w,y,x+w,y+h,x,y+h))
cv2.drawContours(mask, contours, idx, (255, 255, 255), -1)
r = float(cv2.countNonZero(mask[y:y+h, x:x+w])) / (w * h)
...
The result will contain 4 points for each box, like this.
Box 0: (360,259), (364,259), (364,261), (360,261)
Box 1: (380,258), (385,258), (385,262), (380,262)
Box 2: (365,258), (370,258), (370,262), (365,262)
Box 3: (386,256), (393,256), (393,260), (386,260)
Box 4: (358,256), (361,256), (361,258), (358,258)

Resources