TypeError: ('Keyword argument not understood:', 'training')
TypeError: ('Keyword argument not understood:', 'training')
IMAGE_RES = 224
def format_image(image, label):
image = tf.image.resize(image, (IMAGE_RES, IMAGE_RES))/255.0
return image, label
BATCH_SIZE = 32
train_batches = train_dataset.map(format_image).batch(BATCH_SIZE).prefetch(1)
train_gray_batches = train_grey_dataset.map(format_image).batch(BATCH_SIZE).prefetch(1)
test_batches = test_dataset.map(format_image).batch(BATCH_SIZE).prefetch(1)
test_grey_batches = test_grey_dataset.map(format_image).batch(BATCH_SIZE).prefetch(1)
----------
threshold = 100.0
dropoutrate = 0.5
n_outchannels = 3
height, width = IMAGE_RES, IMAGE_RES
def max_norm_regularizer(threshold, axes=None, name="max_norm",
collection="max_norm"):
def max_norm(weights):
clipped = tf.clip_by_norm(weights, clip_norm=threshold, axes=axes)
clip_weights = tf.assign(weights, clipped, name=name)
tf.add_to_collection(collection, clip_weights)
return None # there is no regularization loss term
return max_norm
max_norm_reg = max_norm_regularizer(threshold=threshold)
clip_all_weights = tf.compat.v1.get_collection("max_norm")
----------
def leaky_relu(z,name=None):
return tf.maximum(0.5*z,z,name=name)
from functools import partial
he_init = tf.keras.initializers.VarianceScaling()
----------
X = tf.compat.v1.placeholder(shape=(None,width,height,2),dtype=tf.float32)
print(X)
training = tf.compat.v1.placeholder_with_default(False,shape=(),name='training')
X_drop = tf.keras.layers.Dropout(X,dropoutrate)
my_batch_norm_layer = partial(tf.keras.layers.BatchNormalization,training=training,momentum=0.9)
bn0 = my_batch_norm_layer(X_drop)
bn0_act = leaky_relu(bn0)
print(bn0_act)
This error creates in my program. what is the problem I don't
understand to solve this I search many times but not solve this
problem?
Tensor("Placeholder_26:0", shape=(?, 224, 224, 2), dtype=float32)
TypeError Traceback (most recent call last)
<ipython-input-64-adf525e2e2de> in <module>()
5 X_drop = tf.keras.layers.Dropout(X,dropoutrate)
6 my_batch_norm_layer = partial(tf.keras.layers.BatchNormalization,training=training,momentum=0.9)
----> 7 bn0 = my_batch_norm_layer(X_drop)
8 bn0_act = leaky_relu(bn0)
9 print(bn0_act)
4 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/generic_utils.py
in validate_kwargs(kwargs, allowed_kwargs, error_message)
1135 for kwarg in kwargs:
1136 if kwarg not in allowed_kwargs:
-> 1137 raise TypeError(error_message, kwarg)
1138
1139
TypeError: ('Keyword argument not understood:', 'training')
您需要将参数放在方括号内,因为 training
关键字当前正应用于 partial()
。您还想使用 trainable
而不是 training
(我假设您想冻结 batchnorm 层)。
X = tf.compat.v1.placeholder(shape=(None,width,height,2),dtype=tf.float32)
print(X)
training = tf.compat.v1.placeholder_with_default(False,shape=(),name='training')
X_drop = tf.keras.layers.Dropout(dropoutrate)(X)
my_batch_norm_layer = partial(tf.keras.layers.BatchNormalization(trainable=training,momentum=0.9))
bn0 = my_batch_norm_layer(X_drop)
bn0_act = leaky_relu()(bn0)
print(bn0_act)
IMAGE_RES = 224
def format_image(image, label):
image = tf.image.resize(image, (IMAGE_RES, IMAGE_RES))/255.0
return image, label
BATCH_SIZE = 32
train_batches = train_dataset.map(format_image).batch(BATCH_SIZE).prefetch(1)
train_gray_batches = train_grey_dataset.map(format_image).batch(BATCH_SIZE).prefetch(1)
test_batches = test_dataset.map(format_image).batch(BATCH_SIZE).prefetch(1)
test_grey_batches = test_grey_dataset.map(format_image).batch(BATCH_SIZE).prefetch(1)
----------
threshold = 100.0
dropoutrate = 0.5
n_outchannels = 3
height, width = IMAGE_RES, IMAGE_RES
def max_norm_regularizer(threshold, axes=None, name="max_norm",
collection="max_norm"):
def max_norm(weights):
clipped = tf.clip_by_norm(weights, clip_norm=threshold, axes=axes)
clip_weights = tf.assign(weights, clipped, name=name)
tf.add_to_collection(collection, clip_weights)
return None # there is no regularization loss term
return max_norm
max_norm_reg = max_norm_regularizer(threshold=threshold)
clip_all_weights = tf.compat.v1.get_collection("max_norm")
----------
def leaky_relu(z,name=None):
return tf.maximum(0.5*z,z,name=name)
from functools import partial
he_init = tf.keras.initializers.VarianceScaling()
----------
X = tf.compat.v1.placeholder(shape=(None,width,height,2),dtype=tf.float32)
print(X)
training = tf.compat.v1.placeholder_with_default(False,shape=(),name='training')
X_drop = tf.keras.layers.Dropout(X,dropoutrate)
my_batch_norm_layer = partial(tf.keras.layers.BatchNormalization,training=training,momentum=0.9)
bn0 = my_batch_norm_layer(X_drop)
bn0_act = leaky_relu(bn0)
print(bn0_act)
This error creates in my program. what is the problem I don't understand to solve this I search many times but not solve this problem?
Tensor("Placeholder_26:0", shape=(?, 224, 224, 2), dtype=float32)
TypeError Traceback (most recent call last) <ipython-input-64-adf525e2e2de> in <module>() 5 X_drop = tf.keras.layers.Dropout(X,dropoutrate) 6 my_batch_norm_layer = partial(tf.keras.layers.BatchNormalization,training=training,momentum=0.9) ----> 7 bn0 = my_batch_norm_layer(X_drop) 8 bn0_act = leaky_relu(bn0) 9 print(bn0_act) 4 frames /usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/generic_utils.py
in validate_kwargs(kwargs, allowed_kwargs, error_message) 1135 for kwarg in kwargs: 1136 if kwarg not in allowed_kwargs: -> 1137 raise TypeError(error_message, kwarg) 1138 1139
TypeError: ('Keyword argument not understood:', 'training')
您需要将参数放在方括号内,因为 training
关键字当前正应用于 partial()
。您还想使用 trainable
而不是 training
(我假设您想冻结 batchnorm 层)。
X = tf.compat.v1.placeholder(shape=(None,width,height,2),dtype=tf.float32)
print(X)
training = tf.compat.v1.placeholder_with_default(False,shape=(),name='training')
X_drop = tf.keras.layers.Dropout(dropoutrate)(X)
my_batch_norm_layer = partial(tf.keras.layers.BatchNormalization(trainable=training,momentum=0.9))
bn0 = my_batch_norm_layer(X_drop)
bn0_act = leaky_relu()(bn0)
print(bn0_act)