在没有变量赋值的情况下激活pytorch网络节点?
Activate pytorch network nodes without variable assignments?
在 PyTorch 网络的教程中,我们通常会看到这样的实现:
from torch.nn.functional import hardtanh, sigmoid
import torch.nn as nn
class great_network(nn.Module):
def __init__(self):
super(great_network, self).__init__()
self.layer1 = nn.Conv2d(2, 2, 3)
self.pool_1 = nn.MaxPool2d(1, 1)
self.layer3 = nn.ConvTranspose2d(2, 2, 3)
self.out_layer = nn.Conv2d(1, 1, 3)
def forward(self, x):
x = hardtanh(self.layer1(x))
x = self.pool_1(x)
x = hardtanh(self.layer3(x))
x = sigmoid(self.out_layer(x))
return x
net = great_network()
print(net)
great_network(
(layer1): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
(pool_1): MaxPool2d(kernel_size=1, stride=1, padding=0, dilation=1, ceil_mode=False)
(layer3): ConvTranspose2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
(out_layer): Conv2d(1, 1, kernel_size=(3, 3), stride=(1, 1))
)
如果我要将此网络的大小动态更改为 运行 多个实验,我将不得不在没有多个分配的情况下模拟上面的代码(类似于数据块代码膨胀)。
可能会这样:
from torch.nn.functional import hardtanh, sigmoid
import torch.nn as nn
import numpy as np
class not_so_great_network(nn.Module):
def __init__(self, n):
super(not_so_great_network, self).__init__()
self.pre_layers = self.generate_pre_layers(n)
self.post_layers = self.generate_post_layers(n)
self.pool = nn.MaxPool2d(1, 1)
self.out = nn.Conv2d(1, 1, 3)
def generate_pre_layers(self, layer_num):
layers = np.empty(layer_num, dtype = object)
for lay in range(0, len(layers)):
layers[lay] = nn.Conv2d(2, 2, 3)
return layers
def generate_post_layers(self, layer_num):
layers = np.empty(layer_num, dtype = object)
for lay in range(0, len(layers)):
layers[lay] = nn.Conv2d(2, 2, 3)
return layers
def forward(self, x):
for pre in self.pre_layers:
x = hardtanh(pre(x))
x = self.pool(x)
for post in self.post_layers:
x = hardtanh(post(x))
x = sigmoid(self.out(x))
return x
然而,并非所有层都存在:
if __name__ == '__main__':
layer_num = 5
net = not_so_great_network(layer_num)
print(net)
not_so_great_network(
(pool): MaxPool2d(kernel_size=1, stride=1, padding=0, dilation=1, ceil_mode=False)
(out): Conv2d(1, 1, kernel_size=(3, 3), stride=(1, 1))
)
我没有分配变量,因为如果我可以生成不同大小的网络而无需复制和粘贴,这可能会更强大。我怎样才能模拟输出,以便以后可以用激活函数激活节点?
另一种方法是使用 ModuleList
:
from torch import nn
from torch.nn.functional import hardtanh, sigmoid
class maybe_great_network(nn.Module):
def __init__(self, n):
super().__init__()
self.pre_layers = self.generate_pre_layers(n)
self.post_layers = self.generate_post_layers(n)
self.pool = nn.MaxPool2d(1, 1)
self.out = nn.Conv2d(1, 1, 3)
def generate_pre_layers(self, layer_num):
return nn.ModuleList([
nn.Conv2d(2, 2, 3)
for l in range(0, layer_num)
])
def generate_post_layers(self, layer_num):
return nn.ModuleList([
nn.Conv2d(2, 2, 3)
for l in range(0, layer_num)
])
def forward(self, x):
for pre in self.pre_layers:
x = hardtanh(pre(x))
x = self.pool(x)
for post in self.post_layers:
x = hardtanh(post(x))
x = sigmoid(self.out(x))
return x
然后:
>>> m = maybe_great_network(3)
>>> m
maybe_great_network(
(pre_layers): ModuleList(
(0): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
(1): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
(2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
)
(post_layers): ModuleList(
(0): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
(1): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
(2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
)
(pool): MaxPool2d(kernel_size=1, stride=1, padding=0, dilation=1, ceil_mode=False)
(out): Conv2d(1, 1, kernel_size=(3, 3), stride=(1, 1))
)
在 PyTorch 网络的教程中,我们通常会看到这样的实现:
from torch.nn.functional import hardtanh, sigmoid
import torch.nn as nn
class great_network(nn.Module):
def __init__(self):
super(great_network, self).__init__()
self.layer1 = nn.Conv2d(2, 2, 3)
self.pool_1 = nn.MaxPool2d(1, 1)
self.layer3 = nn.ConvTranspose2d(2, 2, 3)
self.out_layer = nn.Conv2d(1, 1, 3)
def forward(self, x):
x = hardtanh(self.layer1(x))
x = self.pool_1(x)
x = hardtanh(self.layer3(x))
x = sigmoid(self.out_layer(x))
return x
net = great_network()
print(net)
great_network(
(layer1): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
(pool_1): MaxPool2d(kernel_size=1, stride=1, padding=0, dilation=1, ceil_mode=False)
(layer3): ConvTranspose2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
(out_layer): Conv2d(1, 1, kernel_size=(3, 3), stride=(1, 1))
)
如果我要将此网络的大小动态更改为 运行 多个实验,我将不得不在没有多个分配的情况下模拟上面的代码(类似于数据块代码膨胀)。
可能会这样:
from torch.nn.functional import hardtanh, sigmoid
import torch.nn as nn
import numpy as np
class not_so_great_network(nn.Module):
def __init__(self, n):
super(not_so_great_network, self).__init__()
self.pre_layers = self.generate_pre_layers(n)
self.post_layers = self.generate_post_layers(n)
self.pool = nn.MaxPool2d(1, 1)
self.out = nn.Conv2d(1, 1, 3)
def generate_pre_layers(self, layer_num):
layers = np.empty(layer_num, dtype = object)
for lay in range(0, len(layers)):
layers[lay] = nn.Conv2d(2, 2, 3)
return layers
def generate_post_layers(self, layer_num):
layers = np.empty(layer_num, dtype = object)
for lay in range(0, len(layers)):
layers[lay] = nn.Conv2d(2, 2, 3)
return layers
def forward(self, x):
for pre in self.pre_layers:
x = hardtanh(pre(x))
x = self.pool(x)
for post in self.post_layers:
x = hardtanh(post(x))
x = sigmoid(self.out(x))
return x
然而,并非所有层都存在:
if __name__ == '__main__':
layer_num = 5
net = not_so_great_network(layer_num)
print(net)
not_so_great_network(
(pool): MaxPool2d(kernel_size=1, stride=1, padding=0, dilation=1, ceil_mode=False)
(out): Conv2d(1, 1, kernel_size=(3, 3), stride=(1, 1))
)
我没有分配变量,因为如果我可以生成不同大小的网络而无需复制和粘贴,这可能会更强大。我怎样才能模拟输出,以便以后可以用激活函数激活节点?
另一种方法是使用 ModuleList
:
from torch import nn
from torch.nn.functional import hardtanh, sigmoid
class maybe_great_network(nn.Module):
def __init__(self, n):
super().__init__()
self.pre_layers = self.generate_pre_layers(n)
self.post_layers = self.generate_post_layers(n)
self.pool = nn.MaxPool2d(1, 1)
self.out = nn.Conv2d(1, 1, 3)
def generate_pre_layers(self, layer_num):
return nn.ModuleList([
nn.Conv2d(2, 2, 3)
for l in range(0, layer_num)
])
def generate_post_layers(self, layer_num):
return nn.ModuleList([
nn.Conv2d(2, 2, 3)
for l in range(0, layer_num)
])
def forward(self, x):
for pre in self.pre_layers:
x = hardtanh(pre(x))
x = self.pool(x)
for post in self.post_layers:
x = hardtanh(post(x))
x = sigmoid(self.out(x))
return x
然后:
>>> m = maybe_great_network(3)
>>> m
maybe_great_network(
(pre_layers): ModuleList(
(0): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
(1): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
(2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
)
(post_layers): ModuleList(
(0): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
(1): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
(2): Conv2d(2, 2, kernel_size=(3, 3), stride=(1, 1))
)
(pool): MaxPool2d(kernel_size=1, stride=1, padding=0, dilation=1, ceil_mode=False)
(out): Conv2d(1, 1, kernel_size=(3, 3), stride=(1, 1))
)