MatMul 中的 Keras LSTM Dtype 不一样
Keras LSTM Dtype in MatMul not the same
因此,我正在编写 LSTM 模型以查找序列中的下一个数字。但是当我拟合模型时出现这个错误,
TypeError: Input 'b' of 'MatMul' Op has type float32 that does not match type int64 of argument 'a'.
我不明白这个问题,因为我检查过所有数据和标签都是dtype int64。
这是我的代码:
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
import random
import numpy as np
def makeData(total, types=5):
#PATTERNS:
#1)going up by 1
#2)going up by 2
#3)muliply by 3
#4)multiple by 2
#5)square number
NumPer = int(total/types)
Labels = []
Data = []
print(NumPer)
for type in range(types):
for i in range(NumPer):
preData = []
if type == 0:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x+1)
preData.append(x+2)
Labels.append([x+3])
if type == 1:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x+2)
preData.append(x+4)
Labels.append([x+6])
if type == 2:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x*3)
preData.append(x*9)
Labels.append([x*27])
if type == 3:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x*2)
preData.append(x*4)
Labels.append([x*8])
if type == 4:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x*x)
preData.append((x*x)*(x*x))
Labels.append([(x*x)*(x*x)*((x*x)*(x*x))])
Data.append(preData)
return np.array(Data), np.array(Labels)
x, y = makeData(5)
def MakeRNN():
model = Sequential()
model.add(LSTM(3, activation='tanh'))
model.add(Dense(9, activation='relu'))
model.add(LSTM(9, activation='tanh'))
model.add(Dense(36, activation='relu'))
model.add(Dense(1, activation='relu'))
model.compile(optimizer='Adam', loss="mse", metrics=['accuracy'])
return model
Model = MakeRNN()
print(x.shape)
x = x.reshape(-1,1,3)
print(y[0].dtype)
print(x.shape)
Model.fit(x,y, epochs=20)
print(Model.predict([[1,2,3]]))
我不明白这个,如果你们能提供帮助,我将不胜感激。我对keras这个领域了解的不多
以下代码对我有用。只是对 makeData 中的 x, y 进行了一些调整,将它们转换为浮点数。此外,传递给 predict 的参数需要是 3 维的(目前你有一个 2d 参数)并且它需要是一个浮点数。还需要在第一个 LSTM 处添加 return sequences = True 因为它后面还有另一个 LSTM。
另外,您的模型有点奇怪。 Keras 期望顺序模型的输入为 (batch_size, timesteps, input_dim) 形式。您有 timesteps = 1,因此实际上没有序列。但这是你想做什么的另一回事。至于将代码获取到 运行,以上调整有效。
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
import random
import numpy as np
def makeData(total, types=5):
#PATTERNS:
#1)going up by 1
#2)going up by 2
#3)muliply by 3
#4)multiple by 2
#5)square number
NumPer = int(total/types)
Labels = []
Data = []
print(NumPer)
for type in range(types):
for i in range(NumPer):
preData = []
if type == 0:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x+1)
preData.append(x+2)
Labels.append([x+3])
if type == 1:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x+2)
preData.append(x+4)
Labels.append([x+6])
if type == 2:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x*3)
preData.append(x*9)
Labels.append([x*27])
if type == 3:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x*2)
preData.append(x*4)
Labels.append([x*8])
if type == 4:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x*x)
preData.append((x*x)*(x*x))
Labels.append([(x*x)*(x*x)*((x*x)*(x*x))])
Data.append(preData)
return np.array(Data), np.array(Labels)
x, y = makeData(5)
x = x.astype(float)
y = y.astype(float)
def MakeRNN():
model = Sequential()
model.add(LSTM(3, activation='tanh', return_sequences=True))
model.add(Dense(9, activation='relu'))
model.add(LSTM(9, activation='tanh'))
model.add(Dense(36, activation='relu'))
model.add(Dense(1, activation='relu'))
model.compile(optimizer='Adam', loss="mse", metrics=['accuracy'])
return model
Model = MakeRNN()
print(x.shape)
x = x.reshape(-1,1,3)
print(y[0].dtype)
print(x.shape)
Model.fit(x,y, epochs=20)
print(Model.predict(np.array([[[1.0,2.0,3.0]]])))
因此,我正在编写 LSTM 模型以查找序列中的下一个数字。但是当我拟合模型时出现这个错误,
TypeError: Input 'b' of 'MatMul' Op has type float32 that does not match type int64 of argument 'a'.
我不明白这个问题,因为我检查过所有数据和标签都是dtype int64。
这是我的代码:
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
import random
import numpy as np
def makeData(total, types=5):
#PATTERNS:
#1)going up by 1
#2)going up by 2
#3)muliply by 3
#4)multiple by 2
#5)square number
NumPer = int(total/types)
Labels = []
Data = []
print(NumPer)
for type in range(types):
for i in range(NumPer):
preData = []
if type == 0:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x+1)
preData.append(x+2)
Labels.append([x+3])
if type == 1:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x+2)
preData.append(x+4)
Labels.append([x+6])
if type == 2:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x*3)
preData.append(x*9)
Labels.append([x*27])
if type == 3:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x*2)
preData.append(x*4)
Labels.append([x*8])
if type == 4:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x*x)
preData.append((x*x)*(x*x))
Labels.append([(x*x)*(x*x)*((x*x)*(x*x))])
Data.append(preData)
return np.array(Data), np.array(Labels)
x, y = makeData(5)
def MakeRNN():
model = Sequential()
model.add(LSTM(3, activation='tanh'))
model.add(Dense(9, activation='relu'))
model.add(LSTM(9, activation='tanh'))
model.add(Dense(36, activation='relu'))
model.add(Dense(1, activation='relu'))
model.compile(optimizer='Adam', loss="mse", metrics=['accuracy'])
return model
Model = MakeRNN()
print(x.shape)
x = x.reshape(-1,1,3)
print(y[0].dtype)
print(x.shape)
Model.fit(x,y, epochs=20)
print(Model.predict([[1,2,3]]))
我不明白这个,如果你们能提供帮助,我将不胜感激。我对keras这个领域了解的不多
以下代码对我有用。只是对 makeData 中的 x, y 进行了一些调整,将它们转换为浮点数。此外,传递给 predict 的参数需要是 3 维的(目前你有一个 2d 参数)并且它需要是一个浮点数。还需要在第一个 LSTM 处添加 return sequences = True 因为它后面还有另一个 LSTM。 另外,您的模型有点奇怪。 Keras 期望顺序模型的输入为 (batch_size, timesteps, input_dim) 形式。您有 timesteps = 1,因此实际上没有序列。但这是你想做什么的另一回事。至于将代码获取到 运行,以上调整有效。
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM
import random
import numpy as np
def makeData(total, types=5):
#PATTERNS:
#1)going up by 1
#2)going up by 2
#3)muliply by 3
#4)multiple by 2
#5)square number
NumPer = int(total/types)
Labels = []
Data = []
print(NumPer)
for type in range(types):
for i in range(NumPer):
preData = []
if type == 0:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x+1)
preData.append(x+2)
Labels.append([x+3])
if type == 1:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x+2)
preData.append(x+4)
Labels.append([x+6])
if type == 2:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x*3)
preData.append(x*9)
Labels.append([x*27])
if type == 3:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x*2)
preData.append(x*4)
Labels.append([x*8])
if type == 4:
x = random.random()*100
x = round(x)
preData.append(x)
preData.append(x*x)
preData.append((x*x)*(x*x))
Labels.append([(x*x)*(x*x)*((x*x)*(x*x))])
Data.append(preData)
return np.array(Data), np.array(Labels)
x, y = makeData(5)
x = x.astype(float)
y = y.astype(float)
def MakeRNN():
model = Sequential()
model.add(LSTM(3, activation='tanh', return_sequences=True))
model.add(Dense(9, activation='relu'))
model.add(LSTM(9, activation='tanh'))
model.add(Dense(36, activation='relu'))
model.add(Dense(1, activation='relu'))
model.compile(optimizer='Adam', loss="mse", metrics=['accuracy'])
return model
Model = MakeRNN()
print(x.shape)
x = x.reshape(-1,1,3)
print(y[0].dtype)
print(x.shape)
Model.fit(x,y, epochs=20)
print(Model.predict(np.array([[[1.0,2.0,3.0]]])))