Pytorch学习13_神经网络-非线性激活

发布于:2025-02-10 ⋅ 阅读:(63) ⋅ 点赞:(0)

RELU

import torch
from torch import nn
from torch.nn import ReLU

input=torch.tensor([[1,-0.5],
                    [-1,3]])
output=torch.reshape(input,(-1,1,2,2))
print(output.shape)

(-1,1,2,2)表示第一个维度将由系统自动推断,第二个维度为1,而第三和第四个维度为2。

import torch
from torch import nn
from torch.nn import ReLU

input=torch.tensor([[1,-0.5],
                    [-1,3]])
output=torch.reshape(input,(-1,1,2,2))
print(output.shape)

class my_nn(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.relu1=ReLU()

    def forward(self,input):
        output=self.relu1(input)
        return output


my_nn01=my_nn()
output=my_nn01(input)
print(output)

Sigmoid

import torch
import torchvision
from torch import nn
from torch.nn import ReLU, Sigmoid
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

input=torch.tensor([[1,-0.5],
                    [-1,3]])
output=torch.reshape(input,(-1,1,2,2))
print(output.shape)
dataset=torchvision.datasets.CIFAR10("./dataset",train=False,download=True,
                                     transform=torchvision.transforms.ToTensor())

dataloader=DataLoader(dataset,batch_size=64)

class my_nn(nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.relu1=ReLU()
        self.sigmoid1=Sigmoid()

    def forward(self,input):
        # output=self.relu1(input)
        output=self.sigmoid1(input)
        return output

my_nn01=my_nn()

writer=SummaryWriter("./logs_relu")
step=0
for data in dataloader:
    imgs,targets=data
    writer.add_images("input",imgs,global_step=step)

    output = my_nn01(imgs)
    writer.add_images("output",output,step)
    step+=1

writer.close()

终端运行

tensorboard --logdir=”logs_relu“

参考

【PyTorch深度学习快速入门教程(绝对通俗易懂!)【小土堆】】 https://www.bilibili.com/video/BV1hE411t7RN/?p=20&share_source=copy_web&vd_source=be33b1553b08cc7b94afdd6c8a50dc5a


网站公告

今日签到

点亮在社区的每一天
去签到