为什么我不能将我的图像放在眼睛检测上?
Why can't I place my image on eye detection?
我想在 cv2
检测到我眼睛的位置放置一个透明图像。我已经完成了主要的两个步骤,现在我需要将它们结合起来。
例如,这里是 image transparency working, and here is the output with the eye detection working 的输出。脚本和图片在下面,我不知道该怎么做。
图片
app.py
import os
import numpy
import cv2
from PIL import Image
from os.path import join, dirname, realpath
def upload_files():
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('/Users/matt/Python/LazerEyes/haarcascade_eye.xml')
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
eye_cascade = cv2.CascadeClassifier('/Users/matt/Python/LazerEyes/haarcascade_eye.xml')
img = cv2.imread('new.png')
dot = cv2.imread('dot_transparent.png', cv2.IMREAD_UNCHANGED)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray_to_place = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img_h, img_w = gray.shape
img_to_place_h, img_to_place_w = gray_to_place.shape
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
dot = cv2.resize(dot, (eh, ew))
# Prepare pixel-wise alpha blending
dot_alpha = dot[..., :3] / 255.0
dot_alpha = numpy.repeat(dot_alpha[..., numpy.newaxis], 3, axis=2)
dot = dot[..., :3]
resized_img = cv2.resize(dot, (eh, ew), interpolation = cv2.INTER_AREA)
resized_img_h, resized_img_w, _ = resized_img.shape
#pointsOnFace = []
#integersToAppend = eh
#pointsOnFace.append(integersToAppend)
#print(pointsOnFace)
roi_color[ey:ey+resized_img_h, ex:ex+resized_img_w, :] = resized_img
cv2.imwrite('out.png', img)
将 my earlier answer 合并到给定的代码中(并最小化生成的代码),解决方案可能如下所示:
import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread('new.jpg')
dot = cv2.imread('dot_transparent.png', cv2.IMREAD_UNCHANGED)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
eyes = eye_cascade.detectMultiScale(gray[y:y+h, x:x+w])
for (ex, ey, ew, eh) in eyes:
# Filter out small detections, if you're only want to have the eyes
if ew < 100 or eh < 100:
continue
d = cv2.resize(dot.copy(), (eh, ew))
d_alpha = d[..., 3] / 255.0
d_alpha = np.repeat(d_alpha[..., np.newaxis], 3, axis=2)
d = d[..., :3]
img[y+ey:y+ey+eh, x+ex:x+ex+ew, :] = \
img[y+ey:y+ey+eh, x+ex:x+ex+ew, :] * (1 - d_alpha) + d * d_alpha
cv2.imwrite('out.png', img)
这就是输出(我过滤掉了小的检测,这样只有真实的眼睛被覆盖):
可能仍需要微调确切位置,但我认为这是来自 Haar 级联分类器本身的问题。
----------------------------------------
System information
----------------------------------------
Platform: Windows-10-10.0.16299-SP0
Python: 3.9.1
NumPy: 1.20.1
OpenCV: 4.5.1
----------------------------------------
我想在 cv2
检测到我眼睛的位置放置一个透明图像。我已经完成了主要的两个步骤,现在我需要将它们结合起来。
例如,这里是 image transparency working, and here is the output with the eye detection working 的输出。脚本和图片在下面,我不知道该怎么做。
图片
app.py
import os
import numpy
import cv2
from PIL import Image
from os.path import join, dirname, realpath
def upload_files():
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('/Users/matt/Python/LazerEyes/haarcascade_eye.xml')
#https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
eye_cascade = cv2.CascadeClassifier('/Users/matt/Python/LazerEyes/haarcascade_eye.xml')
img = cv2.imread('new.png')
dot = cv2.imread('dot_transparent.png', cv2.IMREAD_UNCHANGED)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray_to_place = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
img_h, img_w = gray.shape
img_to_place_h, img_to_place_w = gray_to_place.shape
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
dot = cv2.resize(dot, (eh, ew))
# Prepare pixel-wise alpha blending
dot_alpha = dot[..., :3] / 255.0
dot_alpha = numpy.repeat(dot_alpha[..., numpy.newaxis], 3, axis=2)
dot = dot[..., :3]
resized_img = cv2.resize(dot, (eh, ew), interpolation = cv2.INTER_AREA)
resized_img_h, resized_img_w, _ = resized_img.shape
#pointsOnFace = []
#integersToAppend = eh
#pointsOnFace.append(integersToAppend)
#print(pointsOnFace)
roi_color[ey:ey+resized_img_h, ex:ex+resized_img_w, :] = resized_img
cv2.imwrite('out.png', img)
将 my earlier answer 合并到给定的代码中(并最小化生成的代码),解决方案可能如下所示:
import cv2
import numpy as np
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
img = cv2.imread('new.jpg')
dot = cv2.imread('dot_transparent.png', cv2.IMREAD_UNCHANGED)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
eyes = eye_cascade.detectMultiScale(gray[y:y+h, x:x+w])
for (ex, ey, ew, eh) in eyes:
# Filter out small detections, if you're only want to have the eyes
if ew < 100 or eh < 100:
continue
d = cv2.resize(dot.copy(), (eh, ew))
d_alpha = d[..., 3] / 255.0
d_alpha = np.repeat(d_alpha[..., np.newaxis], 3, axis=2)
d = d[..., :3]
img[y+ey:y+ey+eh, x+ex:x+ex+ew, :] = \
img[y+ey:y+ey+eh, x+ex:x+ex+ew, :] * (1 - d_alpha) + d * d_alpha
cv2.imwrite('out.png', img)
这就是输出(我过滤掉了小的检测,这样只有真实的眼睛被覆盖):
可能仍需要微调确切位置,但我认为这是来自 Haar 级联分类器本身的问题。
----------------------------------------
System information
----------------------------------------
Platform: Windows-10-10.0.16299-SP0
Python: 3.9.1
NumPy: 1.20.1
OpenCV: 4.5.1
----------------------------------------