ValueError: Error when checking input: expected input_1 to have shape (224, 224, 3) but got array with shape (3, 224, 224)
ValueError: Error when checking input: expected input_1 to have shape (224, 224, 3) but got array with shape (3, 224, 224)
我在视频数据集上使用预训练模型。下面是我的代码。
from keras.applications.vgg19 import decode_predictions
from keras.applications.vgg19 import VGG19, preprocess_input
import threading,cv2
import numpy as np
label = ''
frame = None
class MyThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global label
self.model = VGG19(weights="imagenet")
while (~(frame is None)):
(inID, label) = self.predict(frame)
def predict(self, frame):
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB).astype(np.float32)
image = image.transpose((2, 0, 1))
image = image.reshape((1,) + image.shape)
image = preprocess_input(image)
preds = self.model.predict(image)
return decode_predictions(preds)[0]
videoFile ="D:/lostpanda.mp4"
cap = cv2.VideoCapture(videoFile)
while(cap.isOpened()):
keras_thread = MyThread()
keras_thread.start()
while (True):
ret, original = cap.read()
frame = cv2.resize(original, (224, 224))
cv2.putText(original, "Label: {}".format(label), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
cv2.imshow("Classification", original)
cap.release()
frame = None
我正在检索以下错误
ValueError: Error when checking input: expected input_1 to have a shape (224, 224, 3) but got array
with shape (3, 224, 224)
Traceback (most recent call last):
File "C:/Usersvideo-classification.py", line 35, in <module>
frame = cv2.resize(original, (224, 224))
cv2.error: OpenCV(4.2.0) C:\projects\opencv-python\opencv\modules\imgproc\src\resize.cpp:4045: error:
(-215:Assertion failed) !ssize.empty() in function 'cv::resize'
谢谢,非常感谢您的帮助。
老实说,为了能够在视频上显示标签,我们需要区分两个问题:
- 准备输入形状和类型以满足模型要求。
- 使用线程来提高性能。
在下面的代码中,预测有效,因此您可以将其用作起点。之后您将需要引入线程来提高性能并加快预测速度。
from keras.applications.vgg19 import VGG19, preprocess_input
from keras.applications.vgg19 import decode_predictions
from keras.preprocessing.image import img_to_array
import threading
import cv2
import numpy as np
def predict_custom(image):
# convert the image pixels to a numpy array
image = img_to_array(image)
# reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# prepare the image for the VGG model
image = preprocess_input(image)
# predict the probability across all output classes
preds = model.predict(image)
return decode_predictions(preds)[0]
# define the model
model = VGG19(weights="imagenet")
# Load the video file
videoFile ="file_example_MP4_480_1_5MG.mp4"
cap = cv2.VideoCapture(videoFile)
(W, H) = (None, None)
writer = None
# loop over frames from the video file stream
while True:
# read the next frame from the file
(grabbed, frame) = cap.read()
# the stream ends when the frame was not grabbed
if not grabbed:
break
# if the frame dimensions are empty, grab them
if W is None or H is None:
(H, W) = frame.shape[:2]
# clone frame to use it for output later
output = frame.copy()
# convert the frame into gray
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# resize the frame to a fixed 224x224
frame = cv2.resize(frame, (224, 224)).astype("float32")
# perfrom the prediction
label = predict_custom(frame)
# show the output image
cv2.putText(output, "Label: {}".format(label), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
cv2.imshow("Classification", output)
key = cv2.waitKey(1) & 0xFF
cap.release()
frame = None
我在视频数据集上使用预训练模型。下面是我的代码。
from keras.applications.vgg19 import decode_predictions
from keras.applications.vgg19 import VGG19, preprocess_input
import threading,cv2
import numpy as np
label = ''
frame = None
class MyThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global label
self.model = VGG19(weights="imagenet")
while (~(frame is None)):
(inID, label) = self.predict(frame)
def predict(self, frame):
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB).astype(np.float32)
image = image.transpose((2, 0, 1))
image = image.reshape((1,) + image.shape)
image = preprocess_input(image)
preds = self.model.predict(image)
return decode_predictions(preds)[0]
videoFile ="D:/lostpanda.mp4"
cap = cv2.VideoCapture(videoFile)
while(cap.isOpened()):
keras_thread = MyThread()
keras_thread.start()
while (True):
ret, original = cap.read()
frame = cv2.resize(original, (224, 224))
cv2.putText(original, "Label: {}".format(label), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
cv2.imshow("Classification", original)
cap.release()
frame = None
我正在检索以下错误
ValueError: Error when checking input: expected input_1 to have a shape (224, 224, 3) but got array
with shape (3, 224, 224)
Traceback (most recent call last):
File "C:/Usersvideo-classification.py", line 35, in <module>
frame = cv2.resize(original, (224, 224))
cv2.error: OpenCV(4.2.0) C:\projects\opencv-python\opencv\modules\imgproc\src\resize.cpp:4045: error:
(-215:Assertion failed) !ssize.empty() in function 'cv::resize'
谢谢,非常感谢您的帮助。
老实说,为了能够在视频上显示标签,我们需要区分两个问题:
- 准备输入形状和类型以满足模型要求。
- 使用线程来提高性能。
在下面的代码中,预测有效,因此您可以将其用作起点。之后您将需要引入线程来提高性能并加快预测速度。
from keras.applications.vgg19 import VGG19, preprocess_input
from keras.applications.vgg19 import decode_predictions
from keras.preprocessing.image import img_to_array
import threading
import cv2
import numpy as np
def predict_custom(image):
# convert the image pixels to a numpy array
image = img_to_array(image)
# reshape data for the model
image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
# prepare the image for the VGG model
image = preprocess_input(image)
# predict the probability across all output classes
preds = model.predict(image)
return decode_predictions(preds)[0]
# define the model
model = VGG19(weights="imagenet")
# Load the video file
videoFile ="file_example_MP4_480_1_5MG.mp4"
cap = cv2.VideoCapture(videoFile)
(W, H) = (None, None)
writer = None
# loop over frames from the video file stream
while True:
# read the next frame from the file
(grabbed, frame) = cap.read()
# the stream ends when the frame was not grabbed
if not grabbed:
break
# if the frame dimensions are empty, grab them
if W is None or H is None:
(H, W) = frame.shape[:2]
# clone frame to use it for output later
output = frame.copy()
# convert the frame into gray
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# resize the frame to a fixed 224x224
frame = cv2.resize(frame, (224, 224)).astype("float32")
# perfrom the prediction
label = predict_custom(frame)
# show the output image
cv2.putText(output, "Label: {}".format(label), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
cv2.imshow("Classification", output)
key = cv2.waitKey(1) & 0xFF
cap.release()
frame = None