TensorFlow 模型在第一个时期抛出无效参数错误

TensorFlow Model is throwing an Invalid Argument Error at the 1st Epoch

我有一个简单的 2 层 Tensorflow 模型,我正在尝试在大小相等的立体声音频文件的数据集上进行训练,以告诉我声音更多地来自左侧还是右侧。这意味着输入是 3072 x 2 数组,输出是 1 和 0 的数组来表示左右。

问题是当我 运行 程序时,它在 model.fit() 处失败并出现无效参数错误。

代码:

# -*- coding: utf-8 -*-
"""
Created on Tue Jan 18 15:51:56 2022

@author: andre
"""

import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from datetime import datetime
from sklearn import metrics
from scipy.io import wavfile
import os
import glob


# Load in Right Side .WAV Data.
X1 = []
count1 = 0
database_path = "C:\Users\andre\OneDrive\Documents\ESI2022\MLDatabases\Right\"
for filename in glob.glob(os.path.join(database_path, '*.wav')):
    X1.append(wavfile.read(filename)[1])
    count1 = count1 + 1

# Load in Left side .WAV Data.
X2 = [] 
count2 = 0
database_path2 = "C:\Users\andre\OneDrive\Documents\ESI2022\MLDatabases\Right\"
for filename2 in glob.glob(os.path.join(database_path2, '*.wav')):
    X2.append(wavfile.read(filename2)[1])
    count2 = count2 + 1    

# Get the smallest size audio file (this will be sample size input to model)
sample_size = len(X1[0])
for data in X1:
    if len(data) < sample_size:
        sample_size = len(data)

# Make audio data into equal size chunks
X1e = []
for i in X1:
    num_chunks = len(i)//sample_size
    for j in range(num_chunks):
        X1e.append(i[(j+1)*sample_size-sample_size:(j+1)*sample_size])
X1 = X1e
        
X2e = []
for i in X2:
    num_chunks = len(i)//sample_size
    for j in range(num_chunks):
        X2e.append(i[(j+1)*sample_size-sample_size:(j+1)*sample_size])        
X2=X2e  

del X1e
del X2e   

# Create Output data that is the same length as the input data.
Y1 = np.ones([X1.__len__()],dtype='float32').tolist()
Y2 = np.zeros([X2.__len__()],dtype='float32').tolist()


# Concatenate Left and Right .WAV data and output data as numpy arrays.
X1.extend(X2)
X = np.asarray(X1)
Y = np.asarray(Y1+Y2).astype(np.int16)

#X=list(X)
#Y=list(Y)

# Split data into test training data.
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.2,random_state=0,shuffle=True)

'''  
print(X[1])    
time = np.linspace(0.,33792, 33792)
plt.plot(time, X[1][:,1], label="Left channel")
plt.plot(time, X[1][:,0], label="Right channel")
plt.legend()
plt.xlabel("Time [s]")
plt.ylabel("Amplitude")
plt.show()
'''

# Create the Model
model = Sequential()

# Add a LSTM layer with 1 output, and ambiguous input data length.
model.add(layers.LSTM(1,batch_input_shape=(1,sample_size,2),return_sequences=True))
model.add(layers.LSTM(1,return_sequences=False))

# Compile Model
#history = model.compile(loss='mean_absolute_error', metrics=['accuracy'],optimizer='adam',output='sparse_categorical_crossentropy')
optimizer = Adam(learning_rate=2*1e-4)

'''
history = model.compile(optimizer=optimizer, loss={
                  'output': 'sparse_categorical_crossentropy', },
              metrics={
                  'output': 'sparse_categorical_accuracy', },
              sample_weight_mode='temporal')
'''

history = model.compile(
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    optimizer="adam",
    metrics=["accuracy"],
)
model.summary()

# Define Training Parameters
num_epochs = 200
num_batch_size = 1

# Save the most accurate model to file. (Verbosity Gives more information)
checkpointer = ModelCheckpoint(filepath="SavedModels/checkpointModel.hdf5", verbose=1,save_best_only=True)

# Start the timer
start = datetime.now()

# Train the model
model.fit(X_train,Y_train,batch_size=num_batch_size, epochs=num_epochs, validation_data=(X_test,Y_test), callbacks=[checkpointer],verbose=1)

# Get and Print Model Validation Accuracy
test_accuracy=model.evaluate(X_test,Y_test,verbose=0)
print(test_accuracy[1])

输出和错误:

Model: "sequential_1"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 lstm_2 (LSTM)               (1, 3072, 1)              16        
                                                                 
 lstm_3 (LSTM)               (1, 1)                    12        
                                                                 
=================================================================
Total params: 28
Trainable params: 28
Non-trainable params: 0
_________________________________________________________________
Epoch 1/200

2022-02-07 09:40:36.348127: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'cudart64_110.dll'; dlerror: cudart64_110.dll not found
2022-02-07 09:40:36.348459: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
2022-02-07 09:40:43.978976: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'nvcuda.dll'; dlerror: nvcuda.dll not found
2022-02-07 09:40:43.979029: W tensorflow/stream_executor/cuda/cuda_driver.cc:269] failed call to cuInit: UNKNOWN ERROR (303)
2022-02-07 09:40:43.985710: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: DESKTOP-0FFTIDB
2022-02-07 09:40:43.986092: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: DESKTOP-0FFTIDB
2022-02-07 09:40:43.990164: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations:  AVX AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2022-02-07 09:40:48.470415: W tensorflow/core/framework/op_kernel.cc:1745] OP_REQUIRES failed at sparse_xent_op.cc:103 : INVALID_ARGUMENT: Received a label value of 1 which is outside the valid range of [0, 1).  Label values: 1
2022-02-07 09:58:29.070767: W tensorflow/core/framework/op_kernel.cc:1745] OP_REQUIRES failed at sparse_xent_op.cc:103 : INVALID_ARGUMENT: Received a label value of 1 which is outside the valid range of [0, 1).  Label values: 1
Traceback (most recent call last):

  File "C:\Users\andre\OneDrive\Documents\ESI2022\PythonScripts\BeltML\testML.py", line 127, in <module>
    model.fit(X_train,Y_train,batch_size=num_batch_size, epochs=num_epochs, validation_data=(X_test,Y_test), callbacks=[checkpointer],verbose=1)

  File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
    raise e.with_traceback(filtered_tb) from None

  File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\execute.py", line 58, in quick_execute
    tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,

InvalidArgumentError:  Received a label value of 1 which is outside the valid range of [0, 1).  Label values: 1
     [[node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits
 (defined at C:\ProgramData\Anaconda3\lib\site-packages\keras\backend.py:5113)
]] [Op:__inference_train_function_9025]

Errors may have originated from an input operation.
Input Source operations connected to node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits:
In[0] sparse_categorical_crossentropy/Reshape_1 (defined at C:\ProgramData\Anaconda3\lib\site-packages\keras\backend.py:5109)   
In[1] sparse_categorical_crossentropy/Reshape (defined at C:\ProgramData\Anaconda3\lib\site-packages\keras\backend.py:3561)

Operation defined at: (most recent call last)
  File "C:\ProgramData\Anaconda3\lib\runpy.py", line 194, in _run_module_as_main
    return _run_code(code, main_globals, None,

  File "C:\ProgramData\Anaconda3\lib\runpy.py", line 87, in _run_code
    exec(code, run_globals)

  File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\console\__main__.py", line 23, in <module>
    start.main()

  File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\console\start.py", line 328, in main
    kernel.start()

  File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelapp.py", line 677, in start
    self.io_loop.start()

  File "C:\ProgramData\Anaconda3\lib\site-packages\tornado\platform\asyncio.py", line 199, in start
    self.asyncio_loop.run_forever()

  File "C:\ProgramData\Anaconda3\lib\asyncio\base_events.py", line 570, in run_forever
    self._run_once()

  File "C:\ProgramData\Anaconda3\lib\asyncio\base_events.py", line 1859, in _run_once
    handle._run()

  File "C:\ProgramData\Anaconda3\lib\asyncio\events.py", line 81, in _run
    self._context.run(self._callback, *self._args)

  File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 457, in dispatch_queue
    await self.process_one()

  File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 446, in process_one
    await dispatch(*args)

  File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 353, in dispatch_shell
    await result

  File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 648, in execute_request
    reply_content = await reply_content

  File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\ipkernel.py", line 353, in do_execute
    res = shell.run_cell(code, store_history=store_history, silent=silent)

  File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\zmqshell.py", line 533, in run_cell
    return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)

  File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2901, in run_cell
    result = self._run_cell(

  File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2947, in _run_cell
    return runner(coro)

  File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\async_helpers.py", line 68, in _pseudo_sync_runner
    coro.send(None)

  File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3172, in run_cell_async
    has_raised = await self.run_ast_nodes(code_ast.body, cell_name,

  File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3364, in run_ast_nodes
    if (await self.run_code(code, result,  async_=asy)):

  File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3444, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)

  File "C:\Users\andre\AppData\Local\Temp/ipykernel_3604/1229251547.py", line 1, in <module>
    runfile('C:/Users/andre/OneDrive/Documents/ESI2022/PythonScripts/BeltML/testML.py', wdir='C:/Users/andre/OneDrive/Documents/ESI2022/PythonScripts/BeltML')

  File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 577, in runfile
    exec_code(file_code, filename, ns_globals, ns_locals,

  File "C:\ProgramData\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 465, in exec_code
    exec(compiled, ns_globals, ns_locals)

  File "C:\Users\andre\OneDrive\Documents\ESI2022\PythonScripts\BeltML\testML.py", line 127, in <module>
    model.fit(X_train,Y_train,batch_size=num_batch_size, epochs=num_epochs, validation_data=(X_test,Y_test), callbacks=[checkpointer],verbose=1)

  File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
    return fn(*args, **kwargs)

  File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1216, in fit
    tmp_logs = self.train_function(iterator)

  File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 878, in train_function
    return step_function(self, iterator)

  File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 867, in step_function
    outputs = model.distribute_strategy.run(run_step, args=(data,))

  File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 860, in run_step
    outputs = model.train_step(data)

  File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 809, in train_step
    loss = self.compiled_loss(

  File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\compile_utils.py", line 201, in __call__
    loss_value = loss_obj(y_t, y_p, sample_weight=sw)

  File "C:\ProgramData\Anaconda3\lib\site-packages\keras\losses.py", line 141, in __call__
    losses = call_fn(y_true, y_pred)

  File "C:\ProgramData\Anaconda3\lib\site-packages\keras\losses.py", line 245, in call
    return ag_fn(y_true, y_pred, **self._fn_kwargs)

  File "C:\ProgramData\Anaconda3\lib\site-packages\keras\losses.py", line 1737, in sparse_categorical_crossentropy
    return backend.sparse_categorical_crossentropy(

  File "C:\ProgramData\Anaconda3\lib\site-packages\keras\backend.py", line 5113, in sparse_categorical_crossentropy
    res = tf.nn.sparse_softmax_cross_entropy_with_logits(

根据 documentation,参数标签必须是一个 batch_size 向量,其值在 [0, num_classes) 来自您的日志:

received label value of 1 which is outside the valid range of [0, 1)

也许框架认为你只有一个 class 因为我也看到你的神经网络也只有 1 个输出。

也许为了应用 SparseSoftmaxCrossEntropyWithLogits 损失函数,您需要 2 个输出。并且您的标签必须是 0 或 1。