Pytorch Tutorial 2

simple linear regression with auto gradient method in pytorch

  1. @ means inner dot
  2. .t() means transpose matrix
  3. .numel() means number of element in matrix
  4. with torch.no_grad() means code insider this block will not track gradients to save memory and computation time
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import torch
import numpy as np

inputs = np.array([[0, 0, 3],
[0, 1, 9],
[1, 0, 8],
[1, 1, 28]], dtype='float32')

outputs = np.array([[0, 1],
[9, 4],
[7, 3],
[6, 7]], dtype='float32')

inputs = torch.from_numpy(inputs)
outputs = torch.from_numpy(outputs)

w = torch.randn(2, 3, requires_grad=True)
b = torch.randn(2, requires_grad=True)

# print(b)

def model(x):
# the b is the vector, when the matrix plus b, the b will be copy bunch of data to make it as the matrix
return x @ w.t() + b

def mse(t1, t2):
return torch.sum((t1 - t2) ** 2) / t1.numel()

learning_rate = 1e-5
for t in range(500):
y_pred = model(inputs)
loss = mse(y_pred, outputs)
loss.backward()
with torch.no_grad():
w -= learning_rate * w.grad
b -= learning_rate * b.grad
w.grad.zero_()
b.grad.zero_()
print(loss.item())