无法识别图像文件 <_io.BytesIO object at 0x000001E6CF13A108>

cannot identify image file <_io.BytesIO object at 0x000001E6CF13A108>

所以我的 validation_dataset 显然收到“无法识别图像文件 <_io.BytesIO 对象在 0x7fd2278943b0>”的错误。

我对此很陌生,有点迷茫。有什么明显的地方我做错了吗?

感谢任何帮助,谢谢大家。 model.fit() 给出相同的错误

   train_datagen = image.ImageDataGenerator(rescale = 1./255, shear_range= 0.2, zoom_range = 0.2, horizontal_flip = True)

   test_datagen = image.ImageDataGenerator(rescale = 1./255)

   training_set = train_datagen.flow_from_directory('dataset/train', target_size= (224,224), batch_size = 32, class_mode = 'binary')

   test_set = test_datagen.flow_from_directory('dataset/test', target_size= (224,224), batch_size = 32, class_mode = 'binary')

 hist = model.fit_generator(
training_set, 
validation_data=test_set,
epochs=5,
steps_per_epoch=10,
validation_steps=8
 )

{---------------------------------------- ------------------------------ UnidentifiedImageError Traceback(最近调用最后) ~\AppData\Local\Temp\ipykernel_1107649645995.py 中 4个时期= 5, 5 steps_per_epoch=10, ----> 6 validation_steps=8 7 )

 ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
  322               'in a future version' if date is None else ('after %s' % date),
  323               instructions)
  --> 324       return func(*args, **kwargs)
  325     return tf_decorator.make_decorator(
  326         func, new_func, 'deprecated',

  ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\training.py in 
  fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1827         use_multiprocessing=use_multiprocessing,
   1828         shuffle=shuffle,
  -> 1829         initial_epoch=initial_epoch)
  1830 
   1831   @deprecation.deprecated(

   ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs)
   106   def _method_wrapper(self, *args, **kwargs):
   107     if not self._in_multi_worker_mode():  # pylint: disable=protected-access
  --> 108       return method(self, *args, **kwargs)
   109 
   110     # Running inside `run_distribute_coordinator` already.

 ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
  1061           use_multiprocessing=use_multiprocessing,
  1062           model=self,
 -> 1063           steps_per_execution=self._steps_per_execution)
  1064 
  1065       # Container that configures and calls `tf.keras.Callback`s.

  ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py in __init__(self, x, y, sample_weight, batch_size, steps_per_epoch, initial_epoch, epochs, shuffle, class_weight, max_queue_size, workers, use_multiprocessing, model, steps_per_execution)
  1115         use_multiprocessing=use_multiprocessing,
  1116         distribution_strategy=ds_context.get_strategy(),
 -> 1117         model=model)
   1118 
  1119     strategy = ds_context.get_strategy()

  ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py in __init__(self, x, y, sample_weights, shuffle, workers, use_multiprocessing, max_queue_size, model, **kwargs)
    914         max_queue_size=max_queue_size,
    915         model=model,
   --> 916         **kwargs)
   917 
    918   @staticmethod

   ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py in __init__(self, x, y, sample_weights, workers, use_multiprocessing, max_queue_size, model, **kwargs)
   784     # Since we have to know the dtype of the python generator when we build the
   785     # dataset, we have to look at a batch to infer the structure.
   --> 786     peek, x = self._peek_and_restore(x)
   787     peek = self._standardize_batch(peek)
   788     peek = _process_tensorlike(peek)

 ~\anaconda3\envs\tf\lib\site-packages\tensorflow\python\keras\engine\data_adapter.py in _peek_and_restore(x)
   918   @staticmethod
   919   def _peek_and_restore(x):
   --> 920     return x[0], x
  921 
  922   def _handle_multiprocessing(self, x, workers, use_multiprocessing,

  ~\anaconda3\envs\tf\lib\site-packages\keras_preprocessing\image\iterator.py in __getitem__(self, idx)
   63         index_array = self.index_array[self.batch_size * idx:
   64                                        self.batch_size * (idx + 1)]
   ---> 65         return self._get_batches_of_transformed_samples(index_array)
   66 
   67     def __len__(self):

   ~\anaconda3\envs\tf\lib\site-packages\keras_preprocessing\image\iterator.py in _get_batches_of_transformed_samples(self, index_array)
    228                            color_mode=self.color_mode,
    229                            target_size=self.target_size,
   --> 230                            interpolation=self.interpolation)
    231             x = img_to_array(img, data_format=self.data_format)
    232             # Pillow images should be closed after `load_img`,

    ~\anaconda3\envs\tf\lib\site-packages\keras_preprocessing\image\utils.py in load_img(path, grayscale, color_mode, target_size, interpolation)
    112                           'The use of `load_img` requires PIL.')
    113     with open(path, 'rb') as f:
    --> 114         img = pil_image.open(io.BytesIO(f.read()))
    115         if color_mode == 'grayscale':
    116             # if image is not already an 8-bit, 16-bit or 32-bit grayscale image

   ~\anaconda3\envs\tf\lib\site-packages\PIL\Image.py in open(fp, mode, formats)
     3007         warnings.warn(message)
    3008     raise UnidentifiedImageError(
   -> 3009         "cannot identify image file %r" % (filename if filename else fp)
    3010     )
    3011 

 UnidentifiedImageError: cannot identify image file <_io.BytesIO object at 0x000001E6CF13A108>}

使用以下代码在预处理后找到有问题的图像,并从数据集中删除这些图像

import PIL
from pathlib import Path
from PIL import UnidentifiedImageError

path = Path("INSERT PATH HERE").rglob("*.jpeg")
for img_p in path:
    try:
        img = PIL.Image.open(img_p)
    except PIL.UnidentifiedImageError:
            print(img_p)

以上代码将列出错误的图像。让我们知道问题是否仍然存在。谢谢!