7.神经网络基础

发布于:2025-07-11 ⋅ 阅读:(14) ⋅ 点赞:(0)

7.1 构造网络模型

#自定义自己的网络模块
import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
    def __init__(self):
        super().__init__()
        self.hidden=nn.Linear(20,256)
        self.out=nn.Linear(256,10)
    #定义模型的前向传播,即如何根据输入X返回所需的模型输出
    def forward(self,x):
        x=self.hidden(x)
        x=F.relu(x)
        x=self.out(x)
        return x
X=torch.normal(mean=0.5,std=2.0,size=(2,20))
net=MLP()
net(X)

7.2 参数管理与初始化

#访问参数
import torch
from torch import nn
net=nn.Sequential(nn.Linear(4,8),nn.ReLU(),nn.Linear(8,1))
X=torch.rand(size=(2,4))
print(net[2].state_dict())
print(type(net[2].bias))
print(net[2].bias)
print(net[2].bias.data)
print(net[2].bias.grad)
print(*[(name,param.shape) for name,param in net[0].named_parameters()])
print(*[(name,param.shape) for name,param in net.named_parameters()])
print(net.state_dict()['2.bias'].data)
import torch
from torch import nn
net=nn.Sequential(nn.Linear(4,8),nn.ReLU(),nn.Linear(8,1))
def init_weight(m):
    if type(m)==nn.Linear:
        nn.init.normal_(m.weight,mean=0,std=0.01)
        nn.init.zeros_(m.bias)
def init_constant(m):
    if type(m)==nn.Linear:
        nn.init.constant_(m.weight,1)
        nn.init.zeros_(m.bias)
net.apply(init_weight)
print(net[0].weight.data[0],net[0].bias.data[0])
net.apply(init_constant)
print(net[0].weight.data[0],net[0].bias.data[0])
#参数绑定
shared=nn.Linear(8,8)
net=nn.Sequential(nn.Linear(4,8),nn.ReLU(),shared,nn.ReLU(),shared,nn.ReLU(),nn.Linear(8,1))
net(X)
print(net[2].weight.data[0]==net[4].weight.data[0])
net[2].weight.data[0,0]=100
print(net[2].weight.data[0]==net[4].weight.data[0])

7.3 自定义层

#如何自定义一个层
import torch
from torch import nn
class centeredLayer(nn.Module):
    def __init__(self):
        super().__init__()
    def forward(self,X):
        return X-X.mean()
layer=centeredLayer()
layer(torch.FloatTensor([1,2,3,4,5]))
net=nn.Sequential(nn.Linear(8,128),centeredLayer())
Y=net(torch.rand(4,8))
Y.mean()

7.4 文件读写与模型保存

#文件读写
import torch
from torch import nn
from torch.nn import functional as F
x=torch.arange(4)
y=torch.zeros(4)
torch.save(x,'x-file')
x2=torch.load('x-file')
print(x2)
torch.save([x,y],'x-files')
x2,y2=torch.load('x-files')
print(x2,y2)
#模型保存
import torch
from torch import nn
from torch.nn import functional as F
class MLP(nn.Module):
    def __init__(self):
        super().__init__()
        self.hidden=nn.Linear(20,256)
        self.out=nn.Linear(256,10)
    #定义模型的前向传播,即如何根据输入X返回所需的模型输出
    def forward(self,x):
        x=self.hidden(x)
        x=F.relu(x)
        x=self.out(x)
        return x
X=torch.normal(mean=0.5,std=2.0,size=(2,20))
net=MLP()
Y=net(X)
torch.save(net.state_dict(),'mlp.params')
#读取权重
clone=MLP()
clone.load_state_dict(torch.load('mlp.params'))
clone.eval()
Y_clone=clone(X)
Y_clone==Y

网站公告

今日签到

点亮在社区的每一天
去签到