implemented wandb to classifier

This commit is contained in:
Clemens Dautermann 2020-01-26 15:42:01 +01:00
parent 8aec5e4d07
commit 1fe643d464
11 changed files with 630 additions and 7 deletions

View file

@ -3,7 +3,10 @@ import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision import transforms, datasets
from tqdm import tqdm
import wandb
wandb.init(project='pytorch_ai')
train = datasets.MNIST('./datasets', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor()
@ -37,31 +40,32 @@ class Net(nn.Module):
net = Net()
wandb.watch(net)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
for epoch in range(10): # 3 full passes over the data
for data in trainset: # `data` is a batch of data
for epoch in range(10): # 10 full passes over the data
for data in tqdm(trainset): # `data` is a batch of data
X, y = data # X is the batch of features, y is the batch of targets.
net.zero_grad() # sets gradients to 0 before loss calc. You will do this likely every step.
output = net(X.view(-1, 784)) # pass in the reshaped batch (recall they are 28x28 atm)
loss = F.nll_loss(output, y) # calc and grab the loss value
loss = loss_function(output, y) # calc and grab the loss value
loss.backward() # apply this loss backwards thru the network's parameters
optimizer.step() # attempt to optimize weights to account for loss/gradients
wandb.log({'loss': loss})
print(loss) # print loss. We hope loss (a measure of wrong-ness) declines!
torch.save(net, './nets/net_' + str(epoch) + ".pt")
# torch.save(net, './nets/net_' + str(epoch) + ".pt")
correct = 0
total = 0
with torch.no_grad():
for data in testset:
X, y = data
output = net(X.view(-1, 784))
# print(output)
for idx, i in enumerate(output):
# print(torch.argmax(i), y[idx])
if torch.argmax(i) == y[idx]:
correct += 1
total += 1
wandb.log({'test_accuracy': correct / total})
print("Accuracy: ", round(correct / total, 3))
wandb.log({'epoch': epoch})