如何检查 Pytorch 网络层的参数?
How can I check parameters of Pytorch networks' layers?
import torch
import torch.nn as nn
from torch.optim import Adam
class NN_Network(nn.Module):
def __init__(self,in_dim,hid,out_dim):
super(NN_Network, self).__init__()
self.linear1 = nn.Linear(in_dim,hid)
self.linear2 = nn.Linear(hid,out_dim)
def forward(self, input_array):
h = self.linear1(input_array)
y_pred = self.linear2(h)
return y_pred
in_d = 5
hidn = 2
out_d = 3
net = NN_Network(in_d, hidn, out_d)
list(net.parameters())
结果是:
[Parameter containing:
tensor([[-0.2948, -0.1261, 0.2525, -0.4162, 0.3067],
[-0.2483, -0.3600, -0.4090, 0.0844, -0.2772]], requires_grad=True),
Parameter containing:
tensor([-0.2570, -0.3754], requires_grad=True),
Parameter containing:
tensor([[ 0.4550, -0.4577],
[ 0.1782, 0.2454],
[ 0.6931, -0.6003]], requires_grad=True),
Parameter containing:
tensor([ 0.4181, -0.2229, -0.5921], requires_grad=True)]
不使用 nn.Parameter,list(net.parmeters()) 结果作为参数。
我很好奇的是:
我没有使用nn.Parameter命令,为什么会有结果?并且要检查任何网络层的参数,那么 .parameters() 是检查它的唯一方法吗?
可能结果分别是self.linear1(in_dim,hid)的weight,bias等。
但是有什么方法可以检查它是什么吗?
您可以使用 .named_parameters()
而不是 .parameters()
来获取有关模型的更多信息:
for name, param in net.named_parameters():
if param.requires_grad:
print(name, param.data)
结果:
linear1.weight tensor([[ 0.3727, 0.2522, 0.2381, 0.3115, 0.0656],
[-0.3322, 0.2024, 0.1089, -0.3370, 0.3917]])
linear1.bias tensor([-0.2089, 0.1105])
linear2.weight tensor([[-0.1090, 0.2564],
[-0.3957, 0.6632],
[-0.4036, 0.7066]])
linear2.bias tensor([ 0.1398, -0.0585, 0.4297])
import torch
import torch.nn as nn
from torch.optim import Adam
class NN_Network(nn.Module):
def __init__(self,in_dim,hid,out_dim):
super(NN_Network, self).__init__()
self.linear1 = nn.Linear(in_dim,hid)
self.linear2 = nn.Linear(hid,out_dim)
def forward(self, input_array):
h = self.linear1(input_array)
y_pred = self.linear2(h)
return y_pred
in_d = 5
hidn = 2
out_d = 3
net = NN_Network(in_d, hidn, out_d)
list(net.parameters())
结果是:
[Parameter containing:
tensor([[-0.2948, -0.1261, 0.2525, -0.4162, 0.3067],
[-0.2483, -0.3600, -0.4090, 0.0844, -0.2772]], requires_grad=True),
Parameter containing:
tensor([-0.2570, -0.3754], requires_grad=True),
Parameter containing:
tensor([[ 0.4550, -0.4577],
[ 0.1782, 0.2454],
[ 0.6931, -0.6003]], requires_grad=True),
Parameter containing:
tensor([ 0.4181, -0.2229, -0.5921], requires_grad=True)]
不使用 nn.Parameter,list(net.parmeters()) 结果作为参数。
我很好奇的是:
我没有使用nn.Parameter命令,为什么会有结果?并且要检查任何网络层的参数,那么 .parameters() 是检查它的唯一方法吗?
可能结果分别是self.linear1(in_dim,hid)的weight,bias等。
但是有什么方法可以检查它是什么吗?
您可以使用 .named_parameters()
而不是 .parameters()
来获取有关模型的更多信息:
for name, param in net.named_parameters():
if param.requires_grad:
print(name, param.data)
结果:
linear1.weight tensor([[ 0.3727, 0.2522, 0.2381, 0.3115, 0.0656],
[-0.3322, 0.2024, 0.1089, -0.3370, 0.3917]])
linear1.bias tensor([-0.2089, 0.1105])
linear2.weight tensor([[-0.1090, 0.2564],
[-0.3957, 0.6632],
[-0.4036, 0.7066]])
linear2.bias tensor([ 0.1398, -0.0585, 0.4297])