Network Security Internet Technology Development Database Servers Mobile Phone Android Software Apple Software Computer Software News IT Information

In addition to Weibo, there is also WeChat

Please pay attention

WeChat public account

Shulou

How to use PyTorch to solve classification tasks

2025-01-19 Update From: SLTechnology News&Howtos shulou NAV: SLTechnology News&Howtos > Development >

Share

Shulou(Shulou.com)06/03 Report--

This article will explain in detail how to use PyTorch to solve classification tasks. The editor thinks it is very practical, so I share it for you as a reference. I hope you can get something after reading this article.

Guide package import torchvisionimport torchimport torch.nn as nnimport torch.nn.functional as Fimport torch.optim as optimimport matplotlib.pyplot as plt set hyperparameter # set hyperparameter n_epochs = 3batch_size_train = 64batch_size_test = 1000learning_rate = 0.5log_interval = 10random_seed = 1torch.manual_seed (random_seed) read data # data read train_loader = torch.utils.data.DataLoader ('. / data/', train=True, download=True Transform=torchvision.transforms.Compose ([torchvision.transforms.ToTensor (), torchvision.transforms.Normalize ((0.1307,), (0.3081,)])) Batch_size=batch_size_train, shuffle=True) test_loader = torch.utils.data.DataLoader (torchvision.datasets.MNIST ('. / data/', train=False, download=True, transform=torchvision.transforms.Compose) [torchvision.transforms.ToTensor () Torchvision.transforms.Normalize ((0.1307,), (0.3081,)]), batch_size=batch_size_test, shuffle=True) examples = enumerate (test_loader) batch_idx, (example_data Example_targets) = next (examples) # debug output print (example_targets) print (example_data.shape)

Output result:

Tensor ([7, 6, 7, 5, 6, 7, 8, 1, 1, 2, 4, 1, 0, 8, 4, 4, 4, 9, 8, 1, 3, 3, 8, 6

2, 7, 5, 1, 6, 5, 6, 2, 9, 2, 8, 4, 9, 4, 8, 6, 7, 7, 9, 8, 4, 9, 5, 3

1, 0, 9, 1, 7, 3, 7, 0, 9, 2, 5, 1, 8, 9, 3, 7, 8, 4, 1, 9, 0, 3, 1, 2

3, 6, 2, 9, 9, 0, 3, 8, 3, 0, 8, 8, 5, 3, 8, 2, 8, 5, 5, 7, 1, 5, 5, 1

0, 9, 7, 5, 2, 0, 7, 6, 1, 2, 2, 7, 5, 4, 7, 3, 0, 6, 7, 5, 1, 7, 6, 7

2, 1, 9, 1, 9, 2, 7, 6, 8, 8, 8, 4, 6, 0, 0, 2, 3, 0, 1, 7, 8, 7, 4, 1

3, 8, 3, 5, 5, 9, 6, 0, 5, 3, 3, 9, 4, 0, 1, 9, 9, 1, 5, 6, 2, 0, 4, 7

3, 5, 8, 8, 2, 5, 9, 5, 0, 7, 8, 9, 3, 8, 5, 3, 2, 4, 4, 6, 3, 0, 8, 2

7, 0, 5, 2, 0, 6, 2, 6, 3, 6, 6, 7, 9, 3, 4, 1, 6, 2, 8, 4, 7, 7, 2, 7

4, 2, 4, 9, 7, 7, 5, 9, 1, 3, 0, 4, 4, 8, 9, 6, 6, 5, 3, 3, 2, 3, 9, 1

1, 4, 4, 8, 1, 5, 1, 8, 8, 0, 7, 5, 8, 4, 0, 0, 0, 6, 3, 0, 9, 0, 6, 6

9, 8, 1, 2, 3, 7, 6, 1, 5, 9, 3, 9, 3, 2, 5, 9, 9, 5, 4, 9, 3, 9, 6, 0

3, 3, 8, 3, 1, 4, 1, 4, 7, 3, 1, 6, 8, 4, 7, 7, 3, 3, 6, 1, 3, 2, 3, 5

9, 9, 9, 2, 9, 0, 2, 7, 0, 7, 5, 0, 2, 6, 7, 3, 7, 1, 4, 6, 4, 0, 0, 3

2, 1, 9, 3, 5, 5, 1, 6, 4, 7, 4, 6, 4, 4, 9, 7, 4, 1, 5, 4, 8, 7, 5, 9

2, 9, 4, 0, 8, 7, 3, 4, 2, 7, 9, 4, 4, 0, 1, 4, 1, 2, 5, 2, 8, 5, 3, 9

1, 3, 5, 1, 9, 5, 3, 6, 8, 1, 7, 9, 9, 9, 9, 9, 2, 3, 5, 1, 4, 2, 3, 1

1, 3, 8, 2, 8, 1, 9, 2, 9, 0, 7, 3, 5, 8, 3, 7, 8, 5, 6, 4, 1, 9, 7, 1

7, 1, 1, 8, 6, 7, 5, 6, 7, 4, 9, 5, 8, 6, 5, 6, 8, 4, 1, 0, 9, 1, 4, 3

5, 1, 8, 7, 5, 4, 6, 6, 0, 2, 4, 2, 9, 5, 9, 8, 1, 4, 8, 1, 1, 6, 7, 5

9, 1, 1, 7, 8, 7, 5, 5, 2, 6, 5, 8, 1, 0, 7, 2, 2, 4, 3, 9, 7, 3, 5, 7

6, 9, 5, 9, 6, 5, 7, 2, 3, 7, 2, 9, 7, 4, 8, 4, 9, 3, 8, 7, 5, 0, 0, 3

4, 3, 3, 6, 0, 1, 7, 7, 4, 6, 3, 0, 8, 0, 9, 8, 2, 4, 2, 9, 4, 9, 9, 9

7, 7, 6, 8, 2, 4, 9, 3, 0, 4, 4, 1, 5, 7, 7, 6, 9, 7, 0, 2, 4, 2, 1, 4

7, 4, 5, 1, 4, 7, 3, 1, 7, 6, 9, 0, 0, 7, 3, 6, 3, 3, 6, 5, 8, 1, 7, 1

6, 1, 2, 3, 1, 6, 8, 8, 7, 4, 3, 7, 7, 1, 8, 9, 2, 6, 6, 6, 2, 8, 8, 1

6, 0, 3, 0, 5, 1, 3, 2, 4, 1, 5, 5, 7, 3, 5, 6, 2, 1, 8, 0, 2, 0, 8, 4

4, 5, 0, 0, 1, 5, 0, 7, 4, 0, 9, 2, 5, 7, 4, 0, 3, 7, 0, 3, 5, 1, 0, 6

4, 7, 6, 4, 7, 0, 0, 5, 8, 2, 0, 6, 2, 4, 2, 3, 2, 7, 7, 6, 9, 8, 5, 9

7, 1, 3, 4, 3, 1, 8, 0, 3, 0, 7, 4, 9, 0, 8, 1, 5, 7, 3, 2, 2, 0, 7, 3

1, 8, 8, 2, 2, 6, 2, 7, 6, 6, 9, 4, 9, 3, 7, 0, 4, 6, 1, 9, 7, 4, 4, 5

8, 2, 3, 2, 4, 9, 1, 9, 6, 7, 1, 2, 1, 1, 2, 6, 9, 7, 1, 0, 1, 4, 2, 7

7, 8, 3, 2, 8, 2, 7, 6, 1, 1, 9, 1, 0, 9, 1, 3, 9, 3, 7, 6, 5, 6, 2, 0

0, 3, 9, 4, 7, 3, 2, 9, 0, 9, 5, 2, 2, 4, 1, 6, 3, 4, 0, 1, 6, 9, 1, 7

0, 8, 0, 0, 9, 8, 5, 9, 4, 4, 7, 1, 9, 0, 0, 2, 4, 3, 5, 0, 4, 0, 1, 0

5, 8, 1, 8, 3, 3, 2, 1, 2, 6, 8, 2, 5, 3, 7, 9, 3, 6, 2, 2, 6, 2, 7, 7

6, 1, 8, 0, 3, 5, 7, 5, 0, 8, 6, 7, 2, 4, 1, 4, 3, 7, 7, 2, 9, 3, 5, 5

9, 4, 8, 7, 6, 7, 4, 9, 2, 7, 7, 1, 0, 7, 2, 8, 0, 3, 5, 4, 5, 1, 5, 7

6, 7, 3, 5, 3, 4, 5, 3, 4, 3, 2, 3, 1, 7, 4, 4, 8, 5, 5, 3, 2, 2, 9, 5

8, 2, 0, 6, 0, 7, 9, 9, 6, 1, 6, 6, 2, 3, 7, 4, 7, 5, 2, 9, 4, 2, 9, 0

8, 1, 7, 5, 5, 7, 0, 5, 2, 9, 5, 2, 3, 4, 6, 0, 0, 2, 9, 2, 0, 5, 4, 8

9, 0, 9, 1, 3, 4, 1, 8, 0, 0, 4, 0, 8, 5, 9, 8])

Torch.Size ([1000, 1, 28, 28])

Visual display # drawing (first 6) fig = plt.figure () for i in range (6): plt.subplot (2,3, I + 1) plt.tight_layout () plt.imshow (example_ data [I] [0], cmap='gray', interpolation='none') plt.title ("Ground Truth: {}" .format (example_ targets [I]) plt.xticks ([]) plt.yticks ([]) plt.show ()

Output result:

Build the model # create modelclass Net (nn.Module): def _ _ init__ (self): super (Net, self). _ _ init__ () self.conv1 = nn.Conv2d (1,10, kernel_size=5) self.conv2 = nn.Conv2d (10,20, kernel_size=5) self.conv2_drop = nn.Dropout2d () self.fc1 = nn.Linear (320,50) self.fc2 = nn.Linear (50 10) def forward (self, x): X = F.relu (F.max_pool2d (self.conv1 (x), 2)) x = F.relu (F.max_pool2d (self.conv2_drop (self.conv2 (x)), 2)) x = x.view (- 1,320) x = F.relu (self.fc1 (x)) x = F.dropout (x) Training=self.training) x = self.fc2 (x) return F.log_softmax (x) network = Net () optimizer = optim.SGD (network.parameters (), lr=learning_rate Momentum=momentum) training model # training train_losses = [] train_counter = [] test_losses = [] test_counter = [I * len (train_loader.dataset) for i in range (n_epochs + 1)] def train (epoch): network.train () for batch_idx, (data Target) in enumerate (train_loader): optimizer.zero_grad () output = network (data) loss = F.nll_loss (output) Target) loss.backward () optimizer.step () if batch_idx% log_interval = 0: print ('Train Epoch: {} [{} / {} ({: .0f}%)]\ tLoss: {: .6f}' .format (epoch, batch_idx * len (data), len (train_loader.dataset), 100. * batch_idx / len (train_loader), loss.item () train_losses.append (loss.item ()) train_counter.append ((batch_idx * 64) + ((epoch-1) * len (train_loader.dataset)) torch.save (network.state_dict (),'. / model.pth') torch.save (optimizer.state_dict ()) '. / optimizer.pth') def test (): network.eval () test_loss = 0 correct = 0 with torch.no_grad (): for data, target in test_loader: output = network (data) test_loss + = F.nll_loss (output, target, size_average=False). Item () pred = output.data.max (1 Keepdim=True) [1] correct + = pred.eq (target.data.view_as (pred). Sum () test_loss / = len (test_loader.dataset) test_losses.append (test_loss) print ('\ nTest set: Avg. Loss: {: .4F}, Accuracy: {} / {} ({: .0f}%)\ n'.format (test_loss, correct, len (test_loader.dataset), 100. * correct / len (test_loader.dataset)) for epoch in range (1, n_epochs + 1): train (epoch) test ()

Output result:

Train Epoch: 1 [0max 60000 (0%)] Loss: 2.297471

Train Epoch: 1 [6400amp 60000 (11%)] Loss: 1.934886

Train Epoch: 1 [12800amp 60000 (21%)] Loss: 1.242982

Train Epoch: 1 [19200 Compact 60000 (32%)] Loss: 0.979296

Train Epoch: 1 [25600amp 60000 (43%)] Loss: 1.277279

Train Epoch: 1 [32 000 Universe 60000 (53%)] Loss: 0.721533

Train Epoch: 1 [38400amp 60000 (64%)] Loss: 0.759595

Train Epoch: 1 [44800amp 6000075] Loss: 0.469635

Train Epoch: 1 [51200 Universe 60000 (85%)] Loss: 0.422614

Train Epoch: 1 [57600amp 60000 (96%)] Loss: 0.417603

Test set: Avg. Loss: 0.1988, Accuracy: 9431 Universe 10000 (94%)

Train Epoch: 2 [0amp 60000 (0%)] Loss: 0.277207

Train Epoch: 2 [6400amp 60000 (11%)] Loss: 0.328862

Train Epoch: 2 [12800amp 60000 (21%)] Loss: 0.396312

Train Epoch: 2 [19200 Compact 60000 (32%)] Loss: 0.301772

Train Epoch: 2 [25600amp 60000 (43%)] Loss: 0.253600

Train Epoch: 2 [32 000 Universe 60000 (53%)] Loss: 0.217821

Train Epoch: 2 [38400amp 60000 (64%)] Loss: 0.395815

Train Epoch: 2 [44800 / 60000] Loss: 0.265737

Train Epoch: 2 [51200 Universe 60000 (85%)] Loss: 0.323627

Train Epoch: 2 [57600amp 60000 (96%)] Loss: 0.236692

Test set: Avg. Loss: 0.1233, Accuracy: 9622 Compact 10000 (96%)

Train Epoch: 3 [0amp 60000 (0%)] Loss: 0.500148

Train Epoch: 3 [6400amp 60000 (11%)] Loss: 0.338118

Train Epoch: 3 [12800amp 60000 (21%)] Loss: 0.452308

Train Epoch: 3 [19200 Compact 60000 (32%)] Loss: 0.374940

Train Epoch: 3 [25600amp 60000 (43%)] Loss: 0.323300

Train Epoch: 3 [32 000 amp 60000 (53%)] Loss: 0.203830

Train Epoch: 3 [38400amp 60000 (64%)] Loss: 0.379557

Train Epoch: 3 [44800amp 6000075] Loss: 0.334822

Train Epoch: 3 [51200amp 60000 (85%)] Loss: 0.361676

Train Epoch: 3 [57600amp 60000 (96%)] Loss: 0.218833

Test set: Avg. Loss: 0.0911, Accuracy: 9723Universe 10000 (97%)

The complete code import torchvisionimport torchimport torch.nn as nnimport torch.nn.functional as Fimport torch.optim as optimimport matplotlib.pyplot as plt# sets the hyperparameter n_epochs = 3batch_size_train = 64batch_size_test = 1000learning_rate = 0.01momentum = 0.5log_interval = 100random_seed = 1torch.manual_seed (random_seed) # data read train_loader = torch.utils.data.DataLoader (torchvision.datasets.MNIST ('. / data/', train=True, download=True) Transform=torchvision.transforms.Compose ([torchvision.transforms.ToTensor (), torchvision.transforms.Normalize ((0.1307,), (0.3081,)])) Batch_size=batch_size_train, shuffle=True) test_loader = torch.utils.data.DataLoader (torchvision.datasets.MNIST ('. / data/', train=False, download=True, transform=torchvision.transforms.Compose) [torchvision.transforms.ToTensor () Torchvision.transforms.Normalize ((0.1307,), (0.3081,)]), batch_size=batch_size_test, shuffle=True) examples = enumerate (test_loader) batch_idx, (example_data Example_targets) = next (examples) # debug output print (example_targets) print (example_data.shape) # drawing (first 6) fig = plt.figure () for i in range (6): plt.subplot (2,3, I + 1) plt.tight_layout () plt.imshow (example_ data [I] [0], cmap='gray' Interpolation='none') plt.title ("Ground Truth: {}" .format (example_ targets [I]) plt.xticks ([]) plt.yticks ([]) plt.show () # create modelclass Net (nn.Module): def _ init__ (self): super (Net, self). _ _ init__ () self.conv1 = nn.Conv2d (1,10, kernel_size=5) self.conv2 = nn.Conv2d (10,20) Kernel_size=5) self.conv2_drop = nn.Dropout2d () self.fc1 = nn.Linear (320,50) self.fc2 = nn.Linear (50,10) def forward (self, x): X = F.relu (F.max_pool2d (self.conv1 (x), 2)) x = F.relu (self.conv2_drop (self.conv2 (x) 2)) x = x.view (- 1,320) x = F.relu (self.fc1 (x)) x = F.dropout (x, training=self.training) x = self.fc2 (x) return F.log_softmax (x) network = Net () optimizer = optim.SGD (network.parameters (), lr=learning_rate Momentum=momentum) # training train_losses = [] train_counter = [] test_losses = [] test_counter = [I * len (train_loader.dataset) for i in range (n_epochs + 1)] def train (epoch): network.train () for batch_idx, (data, target) in enumerate (train_loader): optimizer.zero_grad () output = network (data) loss = F.nll_loss (output) Target) loss.backward () optimizer.step () if batch_idx% log_interval = 0: print ('Train Epoch: {} [{} / {} ({: .0f}%)]\ tLoss: {: .6f}' .format (epoch, batch_idx * len (data), len (train_loader.dataset), 100. * batch_idx / len (train_loader), loss.item () train_losses.append (loss.item ()) train_counter.append ((batch_idx * 64) + ((epoch-1) * len (train_loader.dataset)) torch.save (network.state_dict (),'. / model.pth') torch.save (optimizer.state_dict ()) '. / optimizer.pth') def test (): network.eval () test_loss = 0 correct = 0 with torch.no_grad (): for data, target in test_loader: output = network (data) test_loss + = F.nll_loss (output, target, size_average=False). Item () pred = output.data.max (1 Keepdim=True) [1] correct + = pred.eq (target.data.view_as (pred). Sum () test_loss / = len (test_loader.dataset) test_losses.append (test_loss) print ('\ nTest set: Avg. Loss: {: .4F}, Accuracy: {} / {} ({: .0f}%)\ n'.format (test_loss, correct, len (test_loader.dataset), 100. * correct / len (test_loader.dataset)) for epoch in range (1, n_epochs + 1): train (epoch) test () on "how to use PyTorch to solve classification tasks" is here. I hope the above content can be helpful to you, so that you can learn more knowledge. If you think the article is good, please share it for more people to see.

Welcome to subscribe "Shulou Technology Information " to get latest news, interesting things and hot topics in the IT industry, and controls the hottest and latest Internet news, technology news and IT industry trends.

Views: 0

*The comments in the above article only represent the author's personal views and do not represent the views and positions of this website. If you have more insights, please feel free to contribute and share.

Share To

Development

Wechat

© 2024 shulou.com SLNews company. All rights reserved.

12
Report