Coverage for tests/core/test_neural_net.py: 98%

94 statements  

« prev     ^ index     » next       coverage.py v7.6.1, created at 2024-12-05 17:26 +0000

1import sys 

2from os.path import dirname as up 

3 

4import torch 

5from dantro._import_tools import import_module_from_path 

6from pkg_resources import resource_filename 

7 

8from utopya.yaml import load_yml 

9 

10sys.path.insert(0, up(up(up(__file__)))) 

11 

12nn = import_module_from_path( 

13 mod_path=up(up(up(__file__))), mod_str="include.neural_net" 

14) 

15 

16# Load the test config 

17CFG_FILENAME = resource_filename("tests", "cfgs/neural_net.yml") 

18test_cfg = load_yml(CFG_FILENAME) 

19 

20# Generate some training data 

21test_data, train_data = torch.rand((10, 10), dtype=torch.float), torch.rand( 

22 (3, 10), dtype=torch.float 

23) 

24input_size = output_size = test_data.shape[1] 

25num_epochs = 10 

26 

27 

28# Test initialisation of the layers with activation functions and bias 

29def test_initialisation(): 

30 for _, config in test_cfg.items(): 

31 net = nn.NeuralNet(input_size=input_size, output_size=output_size, **config) 

32 

33 assert net 

34 

35 # Assert correct number of layers 

36 assert ( 

37 len(net.layers) 

38 == config["num_layers"] + 1 # input layer + number of hidden layers 

39 ) 

40 

41 # Assert correct input size 

42 assert net.layers[0].in_features == input_size 

43 

44 # Assert correct output size 

45 assert net.layers[-1].out_features == output_size 

46 

47 # Assert correct dimensions of hidden layers 

48 layer_cfg: dict = config["nodes_per_layer"] 

49 layer_specific_cfg: dict = layer_cfg.get("layer_specific", {}) 

50 if -1 in layer_specific_cfg.keys(): 

51 layer_specific_cfg[len(net.layers) - 2] = layer_specific_cfg.pop(-1) 

52 hidden_layers = net.layers[1:] 

53 

54 # Assert all settings have been checked 

55 checked = {key: False for key in layer_specific_cfg.keys()} 

56 

57 # Check layers have correct number of nodes 

58 for idx, layer in enumerate(hidden_layers): 

59 if idx in layer_specific_cfg.keys(): 

60 assert layer.in_features == layer_specific_cfg[idx] 

61 checked[idx] = True 

62 else: 

63 assert layer.in_features == layer_cfg["default"] 

64 

65 if idx != len(net.layers) - 2: 

66 assert layer.out_features == net.layers[idx + 2].in_features 

67 elif idx == len(net.layers) - 2: 

68 assert layer.out_features == output_size 

69 

70 if checked: 

71 assert all(item for item in list(checked.values())) 

72 del checked 

73 

74 # Assert correct bias on each layer 

75 bias_default: dict = config.get("biases").get("default") 

76 bias_layer_specific: dict = config.get("biases").get("layer_specific", {}) 

77 if -1 in bias_layer_specific.keys(): 

78 bias_layer_specific[len(net.layers) - 1] = bias_layer_specific.pop(-1) 

79 

80 # Assert all settings have been checked 

81 checked = {key: False for key in bias_layer_specific.keys()} 

82 

83 for idx, layer in enumerate(net.layers): 

84 if idx in bias_layer_specific.keys(): 

85 if bias_layer_specific[idx] == "default": 

86 assert layer.bias is not None 

87 else: 

88 assert [ 

89 bias_layer_specific[idx][0] <= b <= bias_layer_specific[idx][1] 

90 for b in layer.bias 

91 ] 

92 checked[idx] = True 

93 

94 else: 

95 if bias_default is None: 

96 assert layer.bias is None 

97 else: 

98 if bias_default == "default": 

99 assert layer.bias is not None 

100 else: 

101 assert [ 

102 bias_default[0] <= b <= bias_default[1] for b in layer.bias 

103 ] 

104 

105 if checked: 

106 assert all(item for item in list(checked.values())) 

107 

108 

109# Test the model forward pass 

110def test_forward_pass(): 

111 for _, config in test_cfg.items(): 

112 net = nn.NeuralNet(input_size=input_size, output_size=output_size, **config) 

113 

114 activation_funcs: dict = config.get("activation_funcs") 

115 

116 for x in train_data: 

117 y = net(x) 

118 

119 assert len(y) == output_size 

120 

121 if list(activation_funcs.values())[-1] in ["sigmoid", "tanh"]: 

122 assert (torch.abs(y) <= 1).all() 

123 elif activation_funcs in ["abs", "sigmoid"]: 

124 assert (y >= 0).all() 

125 

126 

127# Test the model trains using the optimizer 

128def test_training(): 

129 for _, config in test_cfg.items(): 

130 net = nn.NeuralNet(input_size=input_size, output_size=output_size, **config) 

131 

132 # Calculate the initial loss 

133 initial_loss = torch.stack([torch.nn.functional.mse_loss(net(x), test_data[idx]).detach() for idx, x in enumerate(train_data)]).sum() 

134 

135 # Train the model for n steps 

136 for it in range(num_epochs): 

137 for idx, x in enumerate(train_data): 

138 

139 net.optimizer.zero_grad() 

140 loss = torch.nn.functional.mse_loss(net(x), test_data[idx]) 

141 loss.backward() 

142 net.optimizer.step() 

143 

144 # Assert that the loss has changed 

145 new_loss = torch.stack([torch.nn.functional.mse_loss(net(x), test_data[idx]).detach() for idx, x in enumerate(train_data)]).sum() 

146 assert new_loss != initial_loss 

147 

148 

149# Test the model outputs values according to the prior 

150def test_prior(): 

151 def _test_entry(cfg, tensor): 

152 if cfg["distribution"] == "uniform": 

153 assert cfg["parameters"]["lower"] <= tensor <= cfg["parameters"]["upper"] 

154 

155 tested = False 

156 for _, config in test_cfg.items(): 

157 net = nn.NeuralNet(input_size=input_size, output_size=output_size, **config) 

158 

159 if net.prior_distribution is not None: 

160 tested = True 

161 

162 t = net( 

163 torch.rand( 

164 input_size, 

165 ) 

166 ) 

167 

168 for _ in range(len(t)): 

169 if isinstance(net.prior_distribution, dict): 

170 _test_entry(net.prior_distribution, t[_]) 

171 else: 

172 _test_entry(net.prior_distribution[_], t[_]) 

173 assert tested