在 opencv 中创建一个蒙版并删除内部轮廓 python
create a mask and delete inside contour in opencv python
这是我的代码的结果:
enter image description here
我使用轮廓在脸上制作了这个面具,如下面的代码所示。
该项目的最终结果是删除面部并显示背景我还没有定义它 .
我的问题是: 有没有什么方法可以用这个计数器制作一个蒙版,这样我就可以使用类似这样的东西 cv2.imshow('My Image',cmb(foreground,background,mask))
只在背景的蒙版下显示前景? (这里的问题是我必须以这种形式将面具作为视频,但我希望它是实时的)
或者可能是另一种方式,我能以某种方式删除我柜台中(或下方)的帧像素吗?
这是我的代码:
from imutils.video import VideoStream
from imutils import face_utils
import datetime
import argparse
import imutils
import time
import dlib
import cv2
import numpy as np
# path to facial landmark predictor
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True)
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# grab the indexes of the facial landmarks
(lebStart, lebEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eyebrow"]
(rebStart, rebEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eyebrow"]
(jawStart, jawEnd) = face_utils.FACIAL_LANDMARKS_IDXS["jaw"]
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] camera sensor warming up...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream, resize it to
# have a maximum width of 400 pixels, and convert it to
# grayscale
frame = vs.read()
frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# extract the face coordinates, then use the
faceline = shape[jawStart:lebEnd]
# compute the convex hull for face, then
# visualize each of the face
facelineHull = cv2.convexHull(faceline)
mask = np.zeros(frame.shape,dtype='uint8')
cv2.drawContours(frame, [facelineHull], -1, (0, 0, 0),thickness=cv2.FILLED)
cv2.drawContours(frame, [facelineHull], -1, (0, 255, 0))
# show the frame
cv2.imshow("Frame", frame)
# cv2.imshow("Frame", mask)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
假设您的掩码是二进制掩码,您可以执行以下操作:
def cmb(foreground,background,mask):
result = background.copy()
result[mask] = foreground[mask]
return result
我没有测试这段代码,但我希望你能理解这个想法。您反转背景的蒙版,而单独保留前景的蒙版。你将它应用到每一帧,瞧,你有你的蒙版图像。
编辑:
根据评论调整代码。当然,这个解决方案比我最初写的要清晰得多。不过,功能保持不变。
这是我从框架中删除人脸的解决方案(速度更快,但再次感谢@meetaig 的帮助)
mask = np.zeros(frame.shape,dtype='uint8')
mask = cv2.drawContours(mask, [facelineHull], -1, (255 , 255 , 255),thickness=cv2.FILLED)
mask = cv2.bitwise_not(mask)
img2gray = cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
result= cv2.bitwise_and(frame,frame,mask=mask)
如果我现在显示结果,它将起作用。
cv2.imshow("Frame", result)
这是我的代码的结果: enter image description here
我使用轮廓在脸上制作了这个面具,如下面的代码所示。
该项目的最终结果是删除面部并显示背景我还没有定义它 .
我的问题是: 有没有什么方法可以用这个计数器制作一个蒙版,这样我就可以使用类似这样的东西 cv2.imshow('My Image',cmb(foreground,background,mask))
只在背景的蒙版下显示前景? (这里的问题是我必须以这种形式将面具作为视频,但我希望它是实时的)
或者可能是另一种方式,我能以某种方式删除我柜台中(或下方)的帧像素吗?
这是我的代码:
from imutils.video import VideoStream
from imutils import face_utils
import datetime
import argparse
import imutils
import time
import dlib
import cv2
import numpy as np
# path to facial landmark predictor
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True)
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# grab the indexes of the facial landmarks
(lebStart, lebEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eyebrow"]
(rebStart, rebEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eyebrow"]
(jawStart, jawEnd) = face_utils.FACIAL_LANDMARKS_IDXS["jaw"]
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] camera sensor warming up...")
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream, resize it to
# have a maximum width of 400 pixels, and convert it to
# grayscale
frame = vs.read()
frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# extract the face coordinates, then use the
faceline = shape[jawStart:lebEnd]
# compute the convex hull for face, then
# visualize each of the face
facelineHull = cv2.convexHull(faceline)
mask = np.zeros(frame.shape,dtype='uint8')
cv2.drawContours(frame, [facelineHull], -1, (0, 0, 0),thickness=cv2.FILLED)
cv2.drawContours(frame, [facelineHull], -1, (0, 255, 0))
# show the frame
cv2.imshow("Frame", frame)
# cv2.imshow("Frame", mask)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
假设您的掩码是二进制掩码,您可以执行以下操作:
def cmb(foreground,background,mask):
result = background.copy()
result[mask] = foreground[mask]
return result
我没有测试这段代码,但我希望你能理解这个想法。您反转背景的蒙版,而单独保留前景的蒙版。你将它应用到每一帧,瞧,你有你的蒙版图像。
编辑: 根据评论调整代码。当然,这个解决方案比我最初写的要清晰得多。不过,功能保持不变。
这是我从框架中删除人脸的解决方案(速度更快,但再次感谢@meetaig 的帮助)
mask = np.zeros(frame.shape,dtype='uint8')
mask = cv2.drawContours(mask, [facelineHull], -1, (255 , 255 , 255),thickness=cv2.FILLED)
mask = cv2.bitwise_not(mask)
img2gray = cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
result= cv2.bitwise_and(frame,frame,mask=mask)
如果我现在显示结果,它将起作用。
cv2.imshow("Frame", result)