2 pytorch的权重共享

来源:互联网 发布:冯满天知乎 编辑:程序博客网 时间:2024/06/16 01:54
# -*- coding: utf-8 -*-import randomimport torchfrom torch.autograd import Variableclass DynamicNet(torch.nn.Module):    def __init__(self, D_in, H, D_out):        """        In the constructor we construct three nn.Linear instances that we will use        in the forward pass.        """        super(DynamicNet, self).__init__()        self.input_linear = torch.nn.Linear(D_in, H)        self.middle_linear = torch.nn.Linear(H, H)        self.output_linear = torch.nn.Linear(H, D_out)    def forward(self, x):        """        For the forward pass of the model, we randomly choose either 0, 1, 2, or 3        and reuse the middle_linear Module that many times to compute hidden layer        representations.        Since each forward pass builds a dynamic computation graph, we can use normal        Python control-flow operators like loops or conditional statements when        defining the forward pass of the model.        Here we also see that it is perfectly safe to reuse the same Module many        times when defining a computational graph. This is a big improvement from Lua        Torch, where each Module could be used only once.        """        h_relu = self.input_linear(x).clamp(min=0)        for _ in range(random.randint(0, 3)):            #这里重复利用Middle linear,权重共享,比tensorflow更方便哎            h_relu = self.middle_linear(h_relu).clamp(min=0)        y_pred = self.output_linear(h_relu)        return y_pred# N is batch size; D_in is input dimension;# H is hidden dimension; D_out is output dimension.N, D_in, H, D_out = 64, 1000, 100, 10# Create random Tensors to hold inputs and outputs, and wrap them in Variablesx = Variable(torch.randn(N, D_in))y = Variable(torch.randn(N, D_out), requires_grad=False)# Construct our model by instantiating the class defined abovemodel = DynamicNet(D_in, H, D_out)# Construct our loss function and an Optimizer. Training this strange model with# vanilla stochastic gradient descent is tough, so we use momentumcriterion = torch.nn.MSELoss(size_average=False)optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)for t in range(500):    # Forward pass: Compute predicted y by passing x to the model    y_pred = model(x)    # Compute and print loss    loss = criterion(y_pred, y)    print(t, loss.data[0])    # Zero gradients, perform a backward pass, and update the weights.    optimizer.zero_grad()    loss.backward()    optimizer.step()