使用字符 RNN 生成莎士比亚文本
Generating Shakespearean Text Using a Character RNN
我正在读一本 ML 书 Hands on Machine Learning
(第 2 版)第 526 页有一个主题 Generating Shakespearean Text Using a Character RNN
我正在做他们正在做的事情,但在训练时显示 TypeError。我尽力在我的水平上解决了这个问题。
TypeError: unsupported operand type(s) for *: 'int' and 'NoneType'
这是代码
import tensorflow as tf
from tensorflow import keras
from nltk import tokenize
import numpy as np
shakespeare_url = "https://homl.info/shakespeare" # shortcut URL
filepath = keras.utils.get_file("shakespeare.txt", shakespeare_url)
with open(filepath) as f:
shakespeare_text = f.read()
tokenizer = keras.preprocessing.text.Tokenizer(char_level=True)
tokenizer.fit_on_texts([shakespeare_text])
max_id = len(tokenizer.word_index)
dataset_size = tokenizer.document_count
[encoded] = np.array(tokenizer.texts_to_sequences([shakespeare_text])) - 1
print(dataset_size)
train_size = dataset_size * 90 // 100
dataset = tf.data.Dataset.from_tensor_slices(encoded[:train_size])
print(train_size)
n_steps = 100
window_length = n_steps + 1 # target = input shifted 1 character ahead
dataset = dataset.window(window_length, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_length))
batch_size = 32
dataset = dataset.shuffle(10000).batch(batch_size)
dataset = dataset.map(lambda windows: (windows[:, :-1], windows[:, 1:]))
dataset = dataset.map(lambda X_batch, Y_batch: (tf.one_hot(X_batch, depth=max_id), Y_batch))
dataset = dataset.prefetch(1)
print(dataset)
model = keras.models.Sequential([
keras.layers.GRU(128, return_sequences=True, input_shape=[None, max_id],
dropout=0.2, recurrent_dropout=0.2),
keras.layers.GRU(128, return_sequences=True,
dropout=0.2, recurrent_dropout=0.2),
keras.layers.TimeDistributed(keras.layers.Dense(max_id,activation="softmax"))
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
history = model.fit(dataset, epochs=20)
dataset_size = tokenizer.document_count
returns 1 出于某种原因所以
dataset = tf.data.Dataset.from_tensor_slices(encoded[:train_size])
失败
我用这个代替它,似乎工作正常:
train_size = encoded.shape[0]*90//100
此错误的原因在这一行:
tokenizer.fit_on_texts([shakespeare_text])
您将整个文本放在一个数组中,这就是为什么 dataset_size
是 1
。
你应该改用这个:
tokenizer.fit_on_texts(shakespeare_text)
我正在读一本 ML 书 Hands on Machine Learning
(第 2 版)第 526 页有一个主题 Generating Shakespearean Text Using a Character RNN
我正在做他们正在做的事情,但在训练时显示 TypeError。我尽力在我的水平上解决了这个问题。
TypeError: unsupported operand type(s) for *: 'int' and 'NoneType'
这是代码
import tensorflow as tf
from tensorflow import keras
from nltk import tokenize
import numpy as np
shakespeare_url = "https://homl.info/shakespeare" # shortcut URL
filepath = keras.utils.get_file("shakespeare.txt", shakespeare_url)
with open(filepath) as f:
shakespeare_text = f.read()
tokenizer = keras.preprocessing.text.Tokenizer(char_level=True)
tokenizer.fit_on_texts([shakespeare_text])
max_id = len(tokenizer.word_index)
dataset_size = tokenizer.document_count
[encoded] = np.array(tokenizer.texts_to_sequences([shakespeare_text])) - 1
print(dataset_size)
train_size = dataset_size * 90 // 100
dataset = tf.data.Dataset.from_tensor_slices(encoded[:train_size])
print(train_size)
n_steps = 100
window_length = n_steps + 1 # target = input shifted 1 character ahead
dataset = dataset.window(window_length, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_length))
batch_size = 32
dataset = dataset.shuffle(10000).batch(batch_size)
dataset = dataset.map(lambda windows: (windows[:, :-1], windows[:, 1:]))
dataset = dataset.map(lambda X_batch, Y_batch: (tf.one_hot(X_batch, depth=max_id), Y_batch))
dataset = dataset.prefetch(1)
print(dataset)
model = keras.models.Sequential([
keras.layers.GRU(128, return_sequences=True, input_shape=[None, max_id],
dropout=0.2, recurrent_dropout=0.2),
keras.layers.GRU(128, return_sequences=True,
dropout=0.2, recurrent_dropout=0.2),
keras.layers.TimeDistributed(keras.layers.Dense(max_id,activation="softmax"))
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam")
history = model.fit(dataset, epochs=20)
dataset_size = tokenizer.document_count
returns 1 出于某种原因所以
dataset = tf.data.Dataset.from_tensor_slices(encoded[:train_size])
失败
我用这个代替它,似乎工作正常:
train_size = encoded.shape[0]*90//100
此错误的原因在这一行:
tokenizer.fit_on_texts([shakespeare_text])
您将整个文本放在一个数组中,这就是为什么 dataset_size
是 1
。
你应该改用这个:
tokenizer.fit_on_texts(shakespeare_text)