/ pyod / test / test_torch_utility.py
test_torch_utility.py
  1  # -*- coding: utf-8 -*-
  2  
  3  
  4  import os
  5  import sys
  6  import unittest
  7  
  8  import numpy as np
  9  
 10  # temporary solution for relative imports in case pyod is not installed
 11  # if pyod is installed, no need to use the following line
 12  sys.path.append(
 13      os.path.abspath(os.path.join(os.path.dirname("__file__"), '..')))
 14  sys.path.append(os.path.abspath(os.path.dirname("__file__")))
 15  
 16  from pyod.utils.torch_utility import *
 17  
 18  
 19  class TestBaseDL(unittest.TestCase):
 20      def setUp(self):
 21          # create a dummy dataset
 22          self.X_train = torch.ones(2, 2)
 23          self.y_train = torch.ones(2)
 24          self.X_test = torch.ones(1, 2)
 25          self.y_test = torch.ones(1)
 26          self.mean = torch.mean(self.X_train, dim=0)
 27          self.std = torch.std(self.X_train, dim=0)
 28  
 29      def test_torch_dataset(self):
 30          train_dataset = TorchDataset(X=self.X_train, y=self.y_train)
 31          train_loader = torch.utils.data.DataLoader(train_dataset,
 32                                                     batch_size=2,
 33                                                     shuffle=True)
 34          self.assertEqual(len(train_dataset), 2)
 35          self.assertEqual(len(train_loader), 1)
 36          for data, target in train_loader:
 37              self.assertTrue(torch.equal(data, torch.ones(2, 2)))
 38              self.assertTrue(torch.equal(target, torch.ones(2)))
 39  
 40          train_dataset = TorchDataset(X=self.X_train, mean=self.mean,
 41                                       std=self.std)
 42          train_loader = torch.utils.data.DataLoader(train_dataset,
 43                                                     batch_size=2,
 44                                                     shuffle=True)
 45          self.assertEqual(len(train_dataset), 2)
 46          self.assertEqual(len(train_loader), 1)
 47          for data in train_loader:
 48              self.assertTrue(torch.equal(data, torch.zeros(2, 2)))
 49  
 50      def test_linear_block(self):
 51          train_dataset = TorchDataset(X=self.X_train, mean=self.mean,
 52                                       std=self.std)
 53          train_loader = torch.utils.data.DataLoader(train_dataset,
 54                                                     batch_size=2,
 55                                                     shuffle=True)
 56          dummy_block = LinearBlock(in_features=2, out_features=1,
 57                                    batch_norm=True, dropout_rate=0.2)
 58  
 59          for data in train_loader:
 60              output = dummy_block(data)
 61              self.assertTrue(torch.equal(output, torch.zeros(2, 1)))
 62  
 63      def test_get_activation_by_name(self):
 64          # test relu activation
 65          dummy_relu = get_activation_by_name('relu')
 66          self.assertIsInstance(dummy_relu, nn.ReLU)
 67          self.assertEqual(dummy_relu.inplace, False)
 68          self.assertTrue(
 69              torch.equal(
 70                  dummy_relu(torch.tensor([-1.0, 0.0, 1.0])),
 71                  torch.tensor([0.0, 0.0, 1.0])
 72              )
 73          )
 74  
 75          # test leaky relu activation
 76          dummy_elu = get_activation_by_name('elu', elu_alpha=1.0)
 77          self.assertIsInstance(dummy_elu, nn.ELU)
 78          self.assertEqual(dummy_elu.inplace, False)
 79          self.assertEqual(dummy_elu.alpha, 1.0)
 80          self.assertTrue(
 81              torch.equal(
 82                  dummy_elu(
 83                      torch.tensor([torch.log(torch.tensor(0.5)), 0.0, 1.0])),
 84                  torch.tensor([-0.5, 0.0, 1.0])
 85              )
 86          )
 87  
 88          # test identity activation
 89          dummy_identity = get_activation_by_name('identity')
 90          self.assertIsInstance(dummy_identity, nn.Identity)
 91          self.assertTrue(
 92              torch.equal(
 93                  dummy_identity(torch.tensor([-1.0, 0.0, 1.0])),
 94                  torch.tensor([-1.0, 0.0, 1.0])
 95              )
 96          )
 97  
 98          # test leaky relu activation
 99          dummy_leaky_relu = get_activation_by_name('leaky_relu',
100                                                    leaky_relu_negative_slope=0.1)
101          self.assertIsInstance(dummy_leaky_relu, nn.LeakyReLU)
102          self.assertEqual(dummy_leaky_relu.inplace, False)
103          self.assertEqual(dummy_leaky_relu.negative_slope, 0.1)
104          self.assertTrue(
105              torch.equal(
106                  dummy_leaky_relu(torch.tensor([-1.0, 0.0, 1.0])),
107                  torch.tensor([-0.1, 0.0, 1.0])
108              )
109          )
110  
111          # test sigmoid activation
112          dummy_sigmoid = get_activation_by_name('sigmoid')
113          self.assertIsInstance(dummy_sigmoid, nn.Sigmoid)
114          self.assertTrue(
115              torch.equal(
116                  dummy_sigmoid(torch.tensor([torch.log(torch.tensor(0.25)), 0.0,
117                                              torch.log(torch.tensor(4.0))])),
118                  torch.tensor([0.2, 0.5, 0.8])
119              )
120          )
121  
122          # test softmax activation
123          dummy_softmax = get_activation_by_name('softmax', softmax_dim=1)
124          self.assertIsInstance(dummy_softmax, nn.Softmax)
125          self.assertEqual(dummy_softmax.dim, 1)
126          self.assertTrue(
127              torch.equal(
128                  dummy_softmax(torch.tensor(
129                      [[0.0, 0.0, torch.log(torch.tensor(2.0))],
130                       [0.0, torch.log(torch.tensor(2.0)), 0.0]])),
131                  torch.tensor([[0.25, 0.25, 0.5], [0.25, 0.5, 0.25]])
132              )
133          )
134  
135          # test softplus activation
136          dummy_softplus = get_activation_by_name('softplus',
137                                                  softplus_beta=1.0,
138                                                  softplus_threshold=20.0)
139          self.assertIsInstance(dummy_softplus, nn.Softplus)
140          self.assertEqual(dummy_softplus.beta, 1.0)
141          self.assertEqual(dummy_softplus.threshold, 20.0)
142          self.assertTrue(
143              torch.equal(
144                  dummy_softplus(torch.tensor([torch.log(torch.tensor(np.e - 1)),
145                                               torch.log(
146                                                   torch.tensor(np.e ** 2 - 1)),
147                                               torch.log(torch.tensor(
148                                                   np.e ** 3 - 1))])),
149                  torch.tensor([1.0, 2.0, 3.0])
150              )
151          )
152  
153          # test tanh activation
154          dummy_tanh = get_activation_by_name('tanh')
155          self.assertIsInstance(dummy_tanh, nn.Tanh)
156          self.assertTrue(
157              torch.equal(
158                  dummy_tanh(torch.tensor([torch.log(torch.tensor(0.5)), 0.0,
159                                           torch.log(torch.tensor(2.0))])),
160                  torch.tensor([-0.6, 0.0, 0.6])
161              )
162          )
163  
164          # test invalid activation
165          self.assertRaises(ValueError, get_activation_by_name, name='random')
166  
167      def test_get_optimizer_by_name(self):
168          # define a dummy model
169          dummy_model = nn.Linear(2, 1)
170  
171          # test adam optimizer
172          dummy_optimizer = get_optimizer_by_name(model=dummy_model,
173                                                  name='adam',
174                                                  lr=0.1,
175                                                  weight_decay=0.01,
176                                                  adam_eps=1e-8)
177          self.assertIsInstance(dummy_optimizer, torch.optim.Adam)
178          self.assertEqual(dummy_optimizer.param_groups[0]['lr'], 0.1)
179          self.assertEqual(dummy_optimizer.param_groups[0]['weight_decay'], 0.01)
180          self.assertEqual(dummy_optimizer.param_groups[0]['eps'], 1e-8)
181  
182          # test sgd optimizer
183          dummy_optimizer = get_optimizer_by_name(model=dummy_model,
184                                                  name='sgd',
185                                                  lr=0.1,
186                                                  weight_decay=0.01,
187                                                  sgd_momentum=0.9,
188                                                  sgd_nesterov=False)
189          self.assertIsInstance(dummy_optimizer, torch.optim.SGD)
190          self.assertEqual(dummy_optimizer.param_groups[0]['lr'], 0.1)
191          self.assertEqual(dummy_optimizer.param_groups[0]['momentum'], 0.9)
192          self.assertEqual(dummy_optimizer.param_groups[0]['weight_decay'], 0.01)
193  
194          # test invalid optimizer
195          self.assertRaises(ValueError, get_optimizer_by_name, model=dummy_model,
196                            name='random')
197  
198      def test_get_criterion_by_name(self):
199          # test mse loss with reduction mean
200          dummy_criterion = get_criterion_by_name(name='mse', reduction='mean')
201          self.assertIsInstance(dummy_criterion, nn.MSELoss)
202          self.assertTrue(
203              torch.equal(
204                  dummy_criterion(torch.tensor([3.0, 3.0]),
205                                  torch.tensor([0.0, 0.0])),
206                  torch.tensor(9.0)
207              )
208          )
209  
210          # test mse loss with reduction sum
211          dummy_criterion = get_criterion_by_name(name='mse', reduction='sum')
212          self.assertIsInstance(dummy_criterion, nn.MSELoss)
213          self.assertTrue(
214              torch.equal(
215                  dummy_criterion(torch.tensor([3.0, 3.0]),
216                                  torch.tensor([0.0, 0.0])),
217                  torch.tensor(18.0)
218              )
219          )
220  
221          # test mse loss with reduction none
222          dummy_criterion = get_criterion_by_name(name='mse', reduction='none')
223          self.assertIsInstance(dummy_criterion, nn.MSELoss)
224          self.assertTrue(
225              torch.equal(
226                  dummy_criterion(torch.tensor([3.0, 3.0]),
227                                  torch.tensor([0.0, 0.0])),
228                  torch.tensor([9.0, 9.0])
229              )
230          )
231  
232          # test mae(l1) loss with reduction none
233          dummy_criterion = get_criterion_by_name(name='mae', reduction='none')
234          self.assertIsInstance(dummy_criterion, nn.L1Loss)
235          self.assertTrue(
236              torch.equal(
237                  dummy_criterion(torch.tensor([3.0, 3.0]),
238                                  torch.tensor([0.0, 0.0])),
239                  torch.tensor([3.0, 3.0])
240              )
241          )
242  
243          # test bce loss (for binary classification) with reduction none
244          dummy_criterion = get_criterion_by_name(name='bce', reduction='none')
245          self.assertIsInstance(dummy_criterion, nn.BCELoss)
246          self.assertTrue(
247              torch.equal(
248                  dummy_criterion(torch.tensor([1 / np.e, 1 - 1 / np.e]),
249                                  torch.tensor([1.0, 0.0])),
250                  torch.tensor([1.0, 1.0])
251              )
252          )
253  
254          # test invalid criterion
255          self.assertRaises(ValueError, get_criterion_by_name, name='random')
256  
257      def test_init_weights(self):
258          # define a dummy layer
259          dummy_layer = nn.Linear(2, 1)
260  
261          # For the following initializers, 
262          # we only test if the function can be called without error 
263          # since the actual initialization is random and cannot be tested.
264          for name in ['uniform', 'normal', 'xavier_uniform',
265                       'xavier_normal', 'kaiming_uniform', 'kaiming_normal',
266                       'trunc_normal', 'orthogonal']:
267              init_weights(layer=dummy_layer, name=name)
268          init_weights(layer=dummy_layer, name='sparse', sparse_sparsity=0.1)
269  
270          # test constant initializer
271          init_weights(layer=dummy_layer, name='constant', constant_val=0.1)
272          self.assertTrue(
273              torch.equal(dummy_layer.weight, torch.tensor([[0.1, 0.1]])))
274  
275          # test ones initializer
276          init_weights(layer=dummy_layer, name='ones')
277          self.assertTrue(
278              torch.equal(dummy_layer.weight, torch.tensor([[1.0, 1.0]])))
279  
280          # test zeros initializer
281          init_weights(layer=dummy_layer, name='zeros')
282          self.assertTrue(
283              torch.equal(dummy_layer.weight, torch.tensor([[0.0, 0.0]])))
284  
285          # test eye initializer
286          init_weights(layer=dummy_layer, name='eye')
287          self.assertTrue(
288              torch.equal(dummy_layer.weight, torch.tensor([[1.0, 0.0]])))
289  
290          # test invalid initializer
291          self.assertRaises(ValueError, init_weights, layer=dummy_layer,
292                            name='random')
293  
294      def tearDown(self):
295          pass
296  
297  
298  if __name__ == '__main__':
299      unittest.main()