问题

Pytorch:mat1 和 mat2 形状不能相乘


我为我的第一个 pytorch 模型设置了一个玩具示例:

x = torch.from_numpy(np.linspace(1,100,num=100))
y = torch.from_numpy(np.dot(2,x))

我已经建立了如下模型:

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.fc1 = nn.Linear(1,10)
        self.fc2 = nn.Linear(10,20)
        self.fc3 = nn.Linear(16,1)
        
    def forward(self,inputs):
        x = F.relu(self.fc1(inputs))
        x = F.relu(self.fc2(x))
        x = F.linear(self.fc3(x))
        return x

但是,当我尝试训练时遇到了这个错误:

RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x20 and 1x10)

以下是完整代码供参考:

import numpy as np # linear algebra
import torch
from torch.utils.data import Dataset
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

x = torch.from_numpy(np.linspace(1,100,num=100))
y = torch.from_numpy(np.dot(2,x))

class MyDataset(Dataset):
    def __init__(self):
        self.sequences = x
        self.target = y
        
    def __getitem__(self,i):
        return self.sequences[i], self.target[i]
    
    def __len__(self):
        return len(self.sequences)

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.fc1 = nn.Linear(1,10)
        self.fc2 = nn.Linear(10,20)
        self.fc3 = nn.Linear(16,1)
        
    def forward(self,inputs):
        x = F.relu(self.fc1(inputs))
        x = F.relu(self.fc2(x))
        x = F.linear(self.fc3(x))
        return x

model = Net().to('cpu')

# Generators
training_set = MyDataset()
loader = torch.utils.data.DataLoader(training_set, batch_size=20)

#criterion and optimizer
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.0001)

### Training
n_epochs = 12

for epoch in range(n_epochs):
    for inputs,target in loader:
        print(target)
        
        optimizer.zero_grad()
        
        output = model(inputs)
        
        loss = criterion(output,target)
        
        loss.backward()
        
        optimizer.step()

以及完整的错误信息:

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-107-d32fd01d3b41> in <module>
      9         optimizer.zero_grad()
     10 
---> 11         output = model(inputs)
     12 
     13         loss = criterion(output,target)

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    725             result = self._slow_forward(*input, **kwargs)
    726         else:
--> 727             result = self.forward(*input, **kwargs)
    728         for hook in itertools.chain(
    729                 _global_forward_hooks.values(),

<ipython-input-103-aefe4823d2e8> in forward(self, inputs)
      7 
      8     def forward(self,inputs):
----> 9         x = F.relu(self.fc1(inputs))
     10         x = F.relu(self.fc2(x))
     11         x = F.linear(self.fc3(x))

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
    725             result = self._slow_forward(*input, **kwargs)
    726         else:
--> 727             result = self.forward(*input, **kwargs)
    728         for hook in itertools.chain(
    729                 _global_forward_hooks.values(),

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/linear.py in forward(self, input)
     91 
     92     def forward(self, input: Tensor) -> Tensor:
---> 93         return F.linear(input, self.weight, self.bias)
     94 
     95     def extra_repr(self) -> str:

/opt/conda/lib/python3.7/site-packages/torch/nn/functional.py in linear(input, weight, bias)
   1690         ret = torch.addmm(bias, input, weight.t())
   1691     else:
-> 1692         output = input.matmul(weight.t())
   1693         if bias is not None:
   1694             output += bias

RuntimeError: mat1 and mat2 shapes cannot be multiplied (1x20 and 1x10)

任何建议将不胜感激。

推荐答案

这里有四个问题:

  1. 查看模型的第一层,我假设您的批量大小为 100。在这种情况下,正确的输入 shape 应该是 (100, 1),而不是 (100,).要解决此问题,您可以使用 unsqueeze(-1)

  2. 输入应该是 dtype float:x.float()

  3. self.fc3 层的大小不正确。对于 self.fc2,以下内容对 self.fc2 有效:nn.Linear(20,1)

  4. 最后,F.linear 不是线性函数(即恒等函数)。它实际上是一个线性变换(即 x @ A.T + b)。查看文档以获取更多详细信息。我不相信这是你在你的情况下想要做的。


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.fc1 = nn.Linear(1, 10)
        self.fc2 = nn.Linear(10, 20)
        self.fc3 = nn.Linear(20, 1)
        
    def forward(self,inputs):
        x = F.relu(self.fc1(inputs))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

推理:

>>> x = torch.linspace(1, 100, 100).float().unsqueeze(-1)
>>> y_hat = Net()(x)
>>> y_hat.shape
torch.Size([100, 1])