Problem: Do an end-to-end walkthrough of the PyTorch machine learning workflow using the most basic univariate linear regression example. In particular, generate some linear data over a normalized feature space (whose slope \(w\) and intercept \(b\) would in practice be a priori unknown), split that linear data into training and testing subsets (no cross-validation dataset needed for this simple example), define the linear layer class, instantiate a model object of the class, and starting from random values of \(w\) and \(b\), use stochastic gradient descent with learning rate \(\alpha=0.01\) to minimize the training cost function \(C_{\text{train}}(w,b)\) based on an \(L^1\) loss. Iterate SGD for \(300\) epochs, and for every \(20\) epochs, record the current value of \(C_{\text{train}}(w,b)\) and the current value of \(C_{\text{test}}(w,b)\). Plot these cost function curves as a function of the epoch number \(0, 20, 40,…\). Save the final state dictionary of the model’s learned parameters \(w,b\) post-training, and load it back onto a new instance of the model class.
Solution:
Typical PyTorch Workflow
- Get data ready (turn into tensors).
- Build of pick a pretrained model (to suit one’s problem).
- Fit the model to the data and make a prediction.
- Evaluate the model
- Improve through experimentation.
- Save and reload your trained model
import torch
from torch import nn # nn module contains all of PyTorch's building blocks for designing architectures
import matplotlib.pyplot as plt
torch.__version__
'2.9.1+cu128'
# Create known parameters
weight = 0.7
bias = 0.3
start = 0
end = 1
step = 0.02
x = torch.arange(start, end, step).unsqueeze(dim=1)
#print(x)
y = weight * x + bias
print(f"First 10 elements of x: {x[:10]}")
print(f"First 10 elements of y: {y[:10]}")
plt.plot(x, y)
First 10 elements of x: tensor([[0.0000],
[0.0200],
[0.0400],
[0.0600],
[0.0800],
[0.1000],
[0.1200],
[0.1400],
[0.1600],
[0.1800]])
First 10 elements of y: tensor([[0.3000],
[0.3140],
[0.3280],
[0.3420],
[0.3560],
[0.3700],
[0.3840],
[0.3980],
[0.4120],
[0.4260]])
[<matplotlib.lines.Line2D at 0x7f60957a38b0>]
train_split = int(0.8 * len(x)) # or int(0.8 * len(y))
x_train, y_train = x[:train_split], y[:train_split]
x_test, y_test = x[train_split:], y[train_split:]
# can also use scikit learn's splitting method which
# adds some randomness to the training data.
print(len(x_train), len(y_train), len(x_test), len(y_test))
40 40 10 10
# Create linear regression model class
class LinearRegressionModel(nn.Module):
#almost all classes inherit from nn.Module
def __init__(self):
super().__init__()
self.weights = nn.Parameter(torch.randn(1,
requires_grad=True, #requires_grad=True is default
dtype=torch.float))
self.bias = nn.Parameter(torch.randn(1,
requires_grad=True,
dtype=torch.float))
# or, instead of hard coding the weights and biases, PyTorch's nn.Module class has
# a built-in nn.Linear layer, so above can be replaced by something like
# self.linear_layer = nn.Linear(in_features=1, out_features=1)
# Forward method to define computation in the model, should always be defined to override
# default method in nn.Module
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.weights * x + self.bias
PyTorch model building essentials¶
torch.nn – contains all the building blocks for computational graphs (e.g. neural networks)
torch.nn.Parameter – what parameters should our model learn, often a PyTorch layer from torch.nn will set these for us
torch.nn.Module – The base class for all neural network modules, if you subclass/inherit from it, you should overwrite forward()
torch.optim – this where the optimizers in PyTorch live, they will help with gradient descent
def forward() – All nn.Module subclasses require one to overwrite this method; this defines what happens in forward pass/propagation/computation.
# Checking contents of PyTorch model
torch.manual_seed(42)
# Create object instance of the class:
model_0 = LinearRegressionModel()
print(list(model_0.parameters()))
print(model_0.state_dict())
with torch.inference_mode(): #inference mode basically tells the model that this is now training/cross-validation data, not to be used for updating parameters
y_hat = model_0(x_test)
plt.scatter(x, y)
plt.scatter(x_test, y_hat)
[Parameter containing:
tensor([0.3367], requires_grad=True), Parameter containing:
tensor([0.1288], requires_grad=True)]
OrderedDict([('weights', tensor([0.3367])), ('bias', tensor([0.1288]))])
<matplotlib.collections.PathCollection at 0x7f60936cb610>
# Setup loss function
loss_fn = nn.L1Loss() #mean absolute deviation
print(loss_fn)
# Setup optimizer
optimizer = torch.optim.SGD(params=model_0.parameters(),
lr=0.01)
L1Loss()
N_epochs = 200 #hyperparameter
epoch_count = []
train_loss_values = []
test_loss_values = []
# Training loop
for epoch in range(N_epochs):
model_0.train() # put model in training mode (default state)
# Forward pass
y_hat = model_0(x_train)
# Calculate loss
loss = loss_fn(y_hat, y_train)
print(loss)
# Optimizer zero grad
optimizer.zero_grad()
# Backpropagation
loss.backward()
# Step the optimizer (gradient descent)
optimizer.step() # by default optimizer changes will accumulate through the loop, so need to zero the gradient in above step
# Testing loop
model_0.eval()
with torch.inference_mode():
test_pred = model_0(x_test)
test_loss = loss_fn(test_pred, y_test)
if epoch % 10 == 0:
epoch_count.append(epoch)
train_loss_values.append(loss)
test_loss_values.append(test_loss)
print(f"Epoch: {epoch} | Loss: {loss} | Test Loss: {test_loss}")
print(model_0.state_dict())
tensor(0.3129, grad_fn=<MeanBackward0>)
Epoch: 0 | Loss: 0.31288138031959534 | Test Loss: 0.48106518387794495
OrderedDict([('weights', tensor([0.3406])), ('bias', tensor([0.1388]))])
tensor(0.3014, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3445])), ('bias', tensor([0.1488]))])
tensor(0.2898, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3484])), ('bias', tensor([0.1588]))])
tensor(0.2783, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3523])), ('bias', tensor([0.1688]))])
tensor(0.2668, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3562])), ('bias', tensor([0.1788]))])
tensor(0.2553, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3601])), ('bias', tensor([0.1888]))])
tensor(0.2438, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3640])), ('bias', tensor([0.1988]))])
tensor(0.2322, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3679])), ('bias', tensor([0.2088]))])
tensor(0.2207, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3718])), ('bias', tensor([0.2188]))])
tensor(0.2092, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3757])), ('bias', tensor([0.2288]))])
tensor(0.1977, grad_fn=<MeanBackward0>)
Epoch: 10 | Loss: 0.1976713240146637 | Test Loss: 0.3463551998138428
OrderedDict([('weights', tensor([0.3796])), ('bias', tensor([0.2388]))])
tensor(0.1862, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3835])), ('bias', tensor([0.2488]))])
tensor(0.1746, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3874])), ('bias', tensor([0.2588]))])
tensor(0.1631, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3913])), ('bias', tensor([0.2688]))])
tensor(0.1516, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3952])), ('bias', tensor([0.2788]))])
tensor(0.1401, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.3991])), ('bias', tensor([0.2888]))])
tensor(0.1285, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4030])), ('bias', tensor([0.2988]))])
tensor(0.1170, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4069])), ('bias', tensor([0.3088]))])
tensor(0.1061, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4108])), ('bias', tensor([0.3178]))])
tensor(0.0968, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4146])), ('bias', tensor([0.3258]))])
tensor(0.0891, grad_fn=<MeanBackward0>)
Epoch: 20 | Loss: 0.08908725529909134 | Test Loss: 0.21729660034179688
OrderedDict([('weights', tensor([0.4184])), ('bias', tensor([0.3333]))])
tensor(0.0823, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4222])), ('bias', tensor([0.3403]))])
tensor(0.0764, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4258])), ('bias', tensor([0.3463]))])
tensor(0.0716, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4293])), ('bias', tensor([0.3518]))])
tensor(0.0675, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4328])), ('bias', tensor([0.3568]))])
tensor(0.0640, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4361])), ('bias', tensor([0.3613]))])
tensor(0.0610, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4394])), ('bias', tensor([0.3653]))])
tensor(0.0585, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4425])), ('bias', tensor([0.3688]))])
tensor(0.0564, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4455])), ('bias', tensor([0.3718]))])
tensor(0.0546, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4483])), ('bias', tensor([0.3743]))])
tensor(0.0531, grad_fn=<MeanBackward0>)
Epoch: 30 | Loss: 0.053148526698350906 | Test Loss: 0.14464017748832703
OrderedDict([('weights', tensor([0.4512])), ('bias', tensor([0.3768]))])
tensor(0.0518, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4539])), ('bias', tensor([0.3788]))])
tensor(0.0507, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4564])), ('bias', tensor([0.3803]))])
tensor(0.0498, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4590])), ('bias', tensor([0.3818]))])
tensor(0.0490, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4615])), ('bias', tensor([0.3833]))])
tensor(0.0482, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4639])), ('bias', tensor([0.3843]))])
tensor(0.0475, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4662])), ('bias', tensor([0.3853]))])
tensor(0.0469, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4684])), ('bias', tensor([0.3858]))])
tensor(0.0464, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4706])), ('bias', tensor([0.3863]))])
tensor(0.0459, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4728])), ('bias', tensor([0.3868]))])
tensor(0.0454, grad_fn=<MeanBackward0>)
Epoch: 40 | Loss: 0.04543796554207802 | Test Loss: 0.11360953003168106
OrderedDict([('weights', tensor([0.4748])), ('bias', tensor([0.3868]))])
tensor(0.0450, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4768])), ('bias', tensor([0.3868]))])
tensor(0.0446, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4788])), ('bias', tensor([0.3868]))])
tensor(0.0442, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4808])), ('bias', tensor([0.3868]))])
tensor(0.0438, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4828])), ('bias', tensor([0.3868]))])
tensor(0.0434, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4848])), ('bias', tensor([0.3868]))])
tensor(0.0431, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4866])), ('bias', tensor([0.3863]))])
tensor(0.0427, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4884])), ('bias', tensor([0.3858]))])
tensor(0.0424, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4902])), ('bias', tensor([0.3853]))])
tensor(0.0420, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4920])), ('bias', tensor([0.3848]))])
tensor(0.0417, grad_fn=<MeanBackward0>)
Epoch: 50 | Loss: 0.04167863354086876 | Test Loss: 0.09919948130846024
OrderedDict([('weights', tensor([0.4938])), ('bias', tensor([0.3843]))])
tensor(0.0413, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4956])), ('bias', tensor([0.3838]))])
tensor(0.0410, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4974])), ('bias', tensor([0.3833]))])
tensor(0.0406, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.4992])), ('bias', tensor([0.3828]))])
tensor(0.0403, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5010])), ('bias', tensor([0.3823]))])
tensor(0.0399, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5028])), ('bias', tensor([0.3818]))])
tensor(0.0396, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5046])), ('bias', tensor([0.3813]))])
tensor(0.0392, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5064])), ('bias', tensor([0.3808]))])
tensor(0.0389, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5082])), ('bias', tensor([0.3803]))])
tensor(0.0385, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5100])), ('bias', tensor([0.3798]))])
tensor(0.0382, grad_fn=<MeanBackward0>)
Epoch: 60 | Loss: 0.03818932920694351 | Test Loss: 0.08886633068323135
OrderedDict([('weights', tensor([0.5116])), ('bias', tensor([0.3788]))])
tensor(0.0379, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5134])), ('bias', tensor([0.3783]))])
tensor(0.0375, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5152])), ('bias', tensor([0.3778]))])
tensor(0.0372, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5168])), ('bias', tensor([0.3768]))])
tensor(0.0368, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5186])), ('bias', tensor([0.3763]))])
tensor(0.0365, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5202])), ('bias', tensor([0.3753]))])
tensor(0.0361, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5220])), ('bias', tensor([0.3748]))])
tensor(0.0358, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5236])), ('bias', tensor([0.3738]))])
tensor(0.0354, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5254])), ('bias', tensor([0.3733]))])
tensor(0.0351, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5272])), ('bias', tensor([0.3728]))])
tensor(0.0348, grad_fn=<MeanBackward0>)
Epoch: 70 | Loss: 0.03476089984178543 | Test Loss: 0.0805937647819519
OrderedDict([('weights', tensor([0.5288])), ('bias', tensor([0.3718]))])
tensor(0.0344, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5306])), ('bias', tensor([0.3713]))])
tensor(0.0341, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5322])), ('bias', tensor([0.3703]))])
tensor(0.0337, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5340])), ('bias', tensor([0.3698]))])
tensor(0.0334, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5355])), ('bias', tensor([0.3688]))])
tensor(0.0330, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5373])), ('bias', tensor([0.3683]))])
tensor(0.0327, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5391])), ('bias', tensor([0.3678]))])
tensor(0.0324, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5407])), ('bias', tensor([0.3668]))])
tensor(0.0320, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5425])), ('bias', tensor([0.3663]))])
tensor(0.0317, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5441])), ('bias', tensor([0.3653]))])
tensor(0.0313, grad_fn=<MeanBackward0>)
Epoch: 80 | Loss: 0.03132382780313492 | Test Loss: 0.07232122868299484
OrderedDict([('weights', tensor([0.5459])), ('bias', tensor([0.3648]))])
tensor(0.0310, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5475])), ('bias', tensor([0.3638]))])
tensor(0.0306, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5493])), ('bias', tensor([0.3633]))])
tensor(0.0303, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5509])), ('bias', tensor([0.3623]))])
tensor(0.0300, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5527])), ('bias', tensor([0.3618]))])
tensor(0.0296, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5545])), ('bias', tensor([0.3613]))])
tensor(0.0293, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5561])), ('bias', tensor([0.3603]))])
tensor(0.0289, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5579])), ('bias', tensor([0.3598]))])
tensor(0.0286, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5595])), ('bias', tensor([0.3588]))])
tensor(0.0282, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5613])), ('bias', tensor([0.3583]))])
tensor(0.0279, grad_fn=<MeanBackward0>)
Epoch: 90 | Loss: 0.02788739837706089 | Test Loss: 0.06473556160926819
OrderedDict([('weights', tensor([0.5629])), ('bias', tensor([0.3573]))])
tensor(0.0275, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5647])), ('bias', tensor([0.3568]))])
tensor(0.0272, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5665])), ('bias', tensor([0.3563]))])
tensor(0.0269, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5681])), ('bias', tensor([0.3553]))])
tensor(0.0265, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5699])), ('bias', tensor([0.3548]))])
tensor(0.0262, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5715])), ('bias', tensor([0.3538]))])
tensor(0.0258, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5733])), ('bias', tensor([0.3533]))])
tensor(0.0255, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5748])), ('bias', tensor([0.3523]))])
tensor(0.0251, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5766])), ('bias', tensor([0.3518]))])
tensor(0.0248, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5784])), ('bias', tensor([0.3513]))])
tensor(0.0245, grad_fn=<MeanBackward0>)
Epoch: 100 | Loss: 0.024458957836031914 | Test Loss: 0.05646304413676262
OrderedDict([('weights', tensor([0.5800])), ('bias', tensor([0.3503]))])
tensor(0.0241, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5818])), ('bias', tensor([0.3498]))])
tensor(0.0238, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5834])), ('bias', tensor([0.3488]))])
tensor(0.0234, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5852])), ('bias', tensor([0.3483]))])
tensor(0.0231, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5868])), ('bias', tensor([0.3473]))])
tensor(0.0227, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5886])), ('bias', tensor([0.3468]))])
tensor(0.0224, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5902])), ('bias', tensor([0.3458]))])
tensor(0.0221, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5920])), ('bias', tensor([0.3453]))])
tensor(0.0217, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5938])), ('bias', tensor([0.3448]))])
tensor(0.0214, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5954])), ('bias', tensor([0.3438]))])
tensor(0.0210, grad_fn=<MeanBackward0>)
Epoch: 110 | Loss: 0.021020207554101944 | Test Loss: 0.04819049686193466
OrderedDict([('weights', tensor([0.5972])), ('bias', tensor([0.3433]))])
tensor(0.0207, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.5988])), ('bias', tensor([0.3423]))])
tensor(0.0203, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6006])), ('bias', tensor([0.3418]))])
tensor(0.0200, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6022])), ('bias', tensor([0.3408]))])
tensor(0.0196, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6040])), ('bias', tensor([0.3403]))])
tensor(0.0193, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6058])), ('bias', tensor([0.3398]))])
tensor(0.0190, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6074])), ('bias', tensor([0.3388]))])
tensor(0.0186, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6092])), ('bias', tensor([0.3383]))])
tensor(0.0183, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6108])), ('bias', tensor([0.3373]))])
tensor(0.0179, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6126])), ('bias', tensor([0.3368]))])
tensor(0.0176, grad_fn=<MeanBackward0>)
Epoch: 120 | Loss: 0.01758546568453312 | Test Loss: 0.04060482233762741
OrderedDict([('weights', tensor([0.6141])), ('bias', tensor([0.3358]))])
tensor(0.0172, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6159])), ('bias', tensor([0.3353]))])
tensor(0.0169, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6175])), ('bias', tensor([0.3343]))])
tensor(0.0166, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6193])), ('bias', tensor([0.3338]))])
tensor(0.0162, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6211])), ('bias', tensor([0.3333]))])
tensor(0.0159, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6227])), ('bias', tensor([0.3323]))])
tensor(0.0155, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6245])), ('bias', tensor([0.3318]))])
tensor(0.0152, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6261])), ('bias', tensor([0.3308]))])
tensor(0.0148, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6279])), ('bias', tensor([0.3303]))])
tensor(0.0145, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6295])), ('bias', tensor([0.3293]))])
tensor(0.0142, grad_fn=<MeanBackward0>)
Epoch: 130 | Loss: 0.014155393466353416 | Test Loss: 0.03233227878808975
OrderedDict([('weights', tensor([0.6313])), ('bias', tensor([0.3288]))])
tensor(0.0138, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6331])), ('bias', tensor([0.3283]))])
tensor(0.0135, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6347])), ('bias', tensor([0.3273]))])
tensor(0.0131, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6365])), ('bias', tensor([0.3268]))])
tensor(0.0128, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6381])), ('bias', tensor([0.3258]))])
tensor(0.0124, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6399])), ('bias', tensor([0.3253]))])
tensor(0.0121, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6415])), ('bias', tensor([0.3243]))])
tensor(0.0118, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6433])), ('bias', tensor([0.3238]))])
tensor(0.0114, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6451])), ('bias', tensor([0.3233]))])
tensor(0.0111, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6467])), ('bias', tensor([0.3223]))])
tensor(0.0107, grad_fn=<MeanBackward0>)
Epoch: 140 | Loss: 0.010716589167714119 | Test Loss: 0.024059748277068138
OrderedDict([('weights', tensor([0.6485])), ('bias', tensor([0.3218]))])
tensor(0.0104, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6501])), ('bias', tensor([0.3208]))])
tensor(0.0100, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6519])), ('bias', tensor([0.3203]))])
tensor(0.0097, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6534])), ('bias', tensor([0.3193]))])
tensor(0.0093, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6552])), ('bias', tensor([0.3188]))])
tensor(0.0090, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6568])), ('bias', tensor([0.3178]))])
tensor(0.0087, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6586])), ('bias', tensor([0.3173]))])
tensor(0.0083, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6604])), ('bias', tensor([0.3168]))])
tensor(0.0080, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6620])), ('bias', tensor([0.3158]))])
tensor(0.0076, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6638])), ('bias', tensor([0.3153]))])
tensor(0.0073, grad_fn=<MeanBackward0>)
Epoch: 150 | Loss: 0.0072835334576666355 | Test Loss: 0.016474086791276932
OrderedDict([('weights', tensor([0.6654])), ('bias', tensor([0.3143]))])
tensor(0.0069, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6672])), ('bias', tensor([0.3138]))])
tensor(0.0066, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6688])), ('bias', tensor([0.3128]))])
tensor(0.0063, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6706])), ('bias', tensor([0.3123]))])
tensor(0.0059, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6724])), ('bias', tensor([0.3118]))])
tensor(0.0056, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6740])), ('bias', tensor([0.3108]))])
tensor(0.0052, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6758])), ('bias', tensor([0.3103]))])
tensor(0.0049, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6774])), ('bias', tensor([0.3093]))])
tensor(0.0045, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6792])), ('bias', tensor([0.3088]))])
tensor(0.0042, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6808])), ('bias', tensor([0.3078]))])
tensor(0.0039, grad_fn=<MeanBackward0>)
Epoch: 160 | Loss: 0.0038517764769494534 | Test Loss: 0.008201557211577892
OrderedDict([('weights', tensor([0.6826])), ('bias', tensor([0.3073]))])
tensor(0.0035, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6844])), ('bias', tensor([0.3068]))])
tensor(0.0032, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6860])), ('bias', tensor([0.3058]))])
tensor(0.0028, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6878])), ('bias', tensor([0.3053]))])
tensor(0.0025, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6894])), ('bias', tensor([0.3043]))])
tensor(0.0021, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6912])), ('bias', tensor([0.3038]))])
tensor(0.0018, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6927])), ('bias', tensor([0.3028]))])
tensor(0.0015, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6947])), ('bias', tensor([0.3028]))])
tensor(0.0012, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
Epoch: 170 | Loss: 0.008932482451200485 | Test Loss: 0.005023092031478882
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
Epoch: 180 | Loss: 0.008932482451200485 | Test Loss: 0.005023092031478882
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
Epoch: 190 | Loss: 0.008932482451200485 | Test Loss: 0.005023092031478882
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
tensor(0.0089, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6951])), ('bias', tensor([0.2993]))])
tensor(0.0026, grad_fn=<MeanBackward0>)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
with torch.inference_mode(): #inference mode basically tells the model that this is now training/cross-validation data, not to be used for updating parameters
y_hat = model_0(x_test)
plt.scatter(x, y)
plt.scatter(x_test, y_hat)
<matplotlib.collections.PathCollection at 0x7f608837beb0>
plt.scatter(epoch_count, torch.tensor(train_loss_values).numpy(), label="Training Cost")
plt.scatter(epoch_count, torch.tensor(test_loss_values).numpy(), label="Testing Cost")
plt.legend()
/tmp/ipykernel_349241/1254886168.py:1: UserWarning: Converting a tensor with requires_grad=True to a scalar may lead to unexpected behavior. Consider using tensor.detach() first. (Triggered internally at /pytorch/torch/csrc/autograd/generated/python_variable_methods.cpp:836.) plt.scatter(epoch_count, torch.tensor(train_loss_values).numpy(), label="Training Cost")
<matplotlib.legend.Legend at 0x7f60883d6d40>
# Want to be able to save the model parameters!
# There are 3 main methods for saving + loading model parameters in PyTorch:
print(model_0.state_dict())
from pathlib import Path
# Create a directory to save models into
model_path = Path("models")
model_path.mkdir(parents=True, exist_ok=True)
# Create model save path
model_name = "pytorch_workflow_model_0.pth"
model_save_path = model_path / model_name
# Save the model state_dict
torch.save(model_0.state_dict(), model_save_path)
OrderedDict([('weights', tensor([0.6990])), ('bias', tensor([0.3093]))])
!ls -l models
total 4 -rw-r--r-- 1 william william 2093 Jan 27 13:54 pytorch_workflow_model_0.pth
# Loading a PyTorch model
# Since only saved the model state_dict, will create new
# object instance of the LinearRegressionModel model class and
loaded_model_0 = LinearRegressionModel()
# Load the saved state_dict of model_0 (this will update the new instance with the updated parameters)
loaded_model_0.load_state_dict(torch.load(f=model_save_path))
<All keys matched successfully>
next(model_0.parameters()).device
device(type='cpu')
Exercises!
#1
import torch
import matplotlib.pyplot as plt
weight = 0.3
bias = 0.9
N = 100
# something I got reminded of in a nasty way is that all features should be normalized!!! When blindly using torch.linspace(0, 100, N), got exploding gradients...
x = torch.linspace(0, 1, N).unsqueeze(dim=1) #the need for unsqueeze is subtle...need for computing cost function C below as y_hat_train and y_hat_test will both be of shape (n, 1) for some n
y = weight * x + bias # in practice don't know how y relates to x
x_train = x[:int(0.8 * N)]
y_train = y[:int(0.8 * N)]
x_test = x[int(0.8 * N):]
y_test = y[int(0.8 * N):]
plt.plot(x_train, y_train, label="Training Data") #hmm...didn't need to convert PyTorch tensors to NumPy arrays for plotting
plt.plot(x_test, y_test, label="Testing Data")
plt.legend()
<matplotlib.legend.Legend at 0x7f60a1f5b640>
#2
from torch import nn
torch.manual_seed(42)
class PyTorchModel(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.randn(1,
requires_grad=True,
dtype=torch.float))
self.b = nn.Parameter(torch.randn(1,
requires_grad=True,
dtype=torch.float))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.w * x + self.b
model_instance = PyTorchModel()
print(model_instance.state_dict())
#3
C = nn.L1Loss()
optimizer = torch.optim.SGD(params=model_instance.parameters(), lr=0.01)
N_epochs = 300
C_train_curve = []
C_test_curve = []
for i in range(N_epochs):
# training loop
model_instance.train()
y_hat_train = model_instance(x_train) # or model_instance.forward(x_train)
C_train = C(y_hat_train, y_train)
optimizer.zero_grad()
C_train.backward()
optimizer.step()
# testing/evaluation loop
if i % 20 == 0:
C_train_curve.append(C_train)
model_instance.eval()
with torch.inference_mode(): # stop gradient tape recording during testing
y_hat_test = model_instance(x_test) # or model_instance.forward(x_test)
C_test = C(y_hat_test, y_test)
C_test_curve.append(C_test)
print(f"Epoch: {i}, Training Cost: {C_train}, Test Cost: {C_test}")
print(f"Final Trained Model Parameters: {model_instance.state_dict()}")
OrderedDict([('w', tensor([0.3367])), ('b', tensor([0.1288]))])
Epoch: 0, Training Cost: 0.7565514445304871, Test Cost: 0.7244139909744263
Epoch: 20, Training Cost: 0.524712860584259, Test Cost: 0.45227327942848206
Epoch: 40, Training Cost: 0.29287439584732056, Test Cost: 0.18013274669647217
Epoch: 60, Training Cost: 0.07645779848098755, Test Cost: 0.07569172978401184
Epoch: 80, Training Cost: 0.0533239021897316, Test Cost: 0.11738457530736923
Epoch: 100, Training Cost: 0.046195853501558304, Test Cost: 0.10600712150335312
Epoch: 120, Training Cost: 0.03922543674707413, Test Cost: 0.09009645879268646
Epoch: 140, Training Cost: 0.03225494548678398, Test Cost: 0.07418543100357056
Epoch: 160, Training Cost: 0.02528444491326809, Test Cost: 0.05827441066503525
Epoch: 180, Training Cost: 0.018313953652977943, Test Cost: 0.04236338287591934
Epoch: 200, Training Cost: 0.01134470570832491, Test Cost: 0.025760680437088013
Epoch: 220, Training Cost: 0.004374831914901733, Test Cost: 0.009503781795501709
Epoch: 240, Training Cost: 0.004876463208347559, Test Cost: 0.0061147273518145084
Epoch: 260, Training Cost: 0.004876463208347559, Test Cost: 0.0061147273518145084
Epoch: 280, Training Cost: 0.004876463208347559, Test Cost: 0.0061147273518145084
Final Trained Model Parameters: OrderedDict([('w', tensor([0.3052])), ('b', tensor([0.9028]))])
# 4
import numpy as np
every_20_epochs = np.arange(0, N_epochs, 20)
plt.scatter(every_20_epochs, torch.tensor(C_train_curve).numpy(), label="Training Cost Curve")
plt.scatter(every_20_epochs, torch.tensor(C_test_curve).numpy(), label="Testing Cost Curve")
plt.legend()
#5
from pathlib import Path
model_path = Path("models")
model_path.mkdir(parents=True, exist_ok=True)
model_name = "pytorch_lin_regress_model.pth"
model_save_path = model_path / model_name
torch.save(model_instance.state_dict(), model_save_path)
#6
another_model_instance = PyTorchModel()
another_model_instance.load_state_dict(torch.load(model_save_path))
another_y_hat = another_model_instance(x)
plt.plot(x, y, label="Original Data (Both Training + Testing)")
plt.plot(x, torch.tensor(another_y_hat).numpy(), label="Loaded model prediction")
plt.legend()
/tmp/ipykernel_349241/188330038.py:7: UserWarning: To copy construct from a tensor, it is recommended to use sourceTensor.detach().clone() or sourceTensor.detach().clone().requires_grad_(True), rather than torch.tensor(sourceTensor). plt.plot(x, torch.tensor(another_y_hat).numpy(), label="Loaded model prediction")
<matplotlib.legend.Legend at 0x7f609fe06e90>