Coverage for models/HarrisWilson/ABM.py: 12%

41 statements  

« prev     ^ index     » next       coverage.py v7.6.1, created at 2024-12-05 17:26 +0000

1import torch 

2 

3""" The Harris and Wilson model numerical solver """ 

4 

5 

6class HarrisWilsonABM: 

7 def __init__( 

8 self, 

9 *, 

10 origin_sizes, 

11 network, 

12 M, 

13 true_parameters: dict = None, 

14 epsilon: float = 1.0, 

15 dt: float = 0.001, 

16 device: str, 

17 ): 

18 """The Harris and Wilson model of economic activity. 

19 

20 :param origin_sizes: the origin sizes of the network 

21 :param network: the network adjacency matrix 

22 :param M: the number of destination zones 

23 :param true_parameters: (optional) a dictionary of the true parameters 

24 :param epsilon: (optional) the epsilon value to use for the solver 

25 :param dt: (optional) the time differential to use for the solver 

26 :param device: the training device to use 

27 """ 

28 

29 # The origin zone sizes, number of origin zones, and number of destination zones 

30 self.or_sizes = origin_sizes 

31 self.N = len(origin_sizes) 

32 self.M = M 

33 

34 # The network 

35 self.nw = network 

36 

37 # Model parameters 

38 self.true_parameters = true_parameters 

39 params_to_learn = ( 

40 {} 

41 if true_parameters is not None 

42 else {"alpha": 0, "beta": 1, "kappa": 2, "sigma": 3} 

43 ) 

44 if true_parameters is not None: 

45 idx = 0 

46 for param in ["alpha", "beta", "kappa", "sigma"]: 

47 if param not in true_parameters.keys(): 

48 params_to_learn[param] = idx 

49 idx += 1 

50 self.parameters_to_learn = params_to_learn 

51 self.epsilon = torch.tensor(epsilon).to(device) 

52 self.dt = torch.tensor(dt).to(device) 

53 self.device = device 

54 

55 # ... Model run functions .......................................................................................... 

56 

57 def run_single( 

58 self, 

59 *, 

60 curr_vals, 

61 input_data=None, 

62 epsilon: float = None, 

63 dt: float = None, 

64 ): 

65 """Runs the model for a single iteration. 

66 

67 :param curr_vals: the current values which to take as initial data. 

68 :param input_data: the input parameters (to learn). Defaults to the model defaults. 

69 :param epsilon: (optional) the epsilon value to use. Defaults to the model default. 

70 :param dt: (optional) the time differential to use. Defaults to the model default. 

71 :return: the updated values 

72 

73 """ 

74 

75 # Parameters to learn 

76 alpha = ( 

77 self.true_parameters["alpha"] 

78 if "alpha" not in self.parameters_to_learn.keys() 

79 else input_data[self.parameters_to_learn["alpha"]] 

80 ) 

81 beta = ( 

82 self.true_parameters["beta"] 

83 if "beta" not in self.parameters_to_learn.keys() 

84 else input_data[self.parameters_to_learn["beta"]] 

85 ) 

86 kappa = ( 

87 self.true_parameters["kappa"] 

88 if "kappa" not in self.parameters_to_learn.keys() 

89 else input_data[self.parameters_to_learn["kappa"]] 

90 ) 

91 sigma = ( 

92 self.true_parameters["sigma"] 

93 if "sigma" not in self.parameters_to_learn.keys() 

94 else input_data[self.parameters_to_learn["sigma"]] 

95 ) 

96 

97 # Training parameters 

98 epsilon = self.epsilon if epsilon is None else epsilon 

99 dt = self.dt if dt is None else dt 

100 

101 new_sizes = curr_vals.clone() 

102 

103 # Calculate the weight matrix C^beta 

104 weights = torch.pow(self.nw, beta) 

105 

106 # Calculate the exponential sizes W_j^alpha 

107 W_alpha = torch.pow(curr_vals, alpha) 

108 

109 # Calculate the normalisations sum_k W_k^alpha exp(-beta * c_ik) (double transposition of weight matrix 

110 # necessary for this step) 

111 normalisations = torch.sum( 

112 torch.transpose(torch.mul(W_alpha, torch.transpose(weights, 0, 1)), 0, 1), 

113 dim=1, 

114 keepdim=True, 

115 ) 

116 

117 # Calculate the vector of demands 

118 demand = torch.mul( 

119 W_alpha, 

120 torch.reshape( 

121 torch.sum( 

122 torch.div(torch.mul(self.or_sizes, weights), normalisations), 

123 dim=0, 

124 keepdim=True, 

125 ), 

126 (self.M, 1), 

127 ), 

128 ) 

129 

130 # Update the current values 

131 new_sizes = ( 

132 new_sizes 

133 + torch.mul( 

134 curr_vals, 

135 epsilon * (demand - kappa * curr_vals) 

136 + sigma 

137 * 1 

138 / torch.sqrt(torch.tensor(2, dtype=torch.float) * torch.pi * dt).to( 

139 self.device 

140 ) 

141 * torch.normal(0, 1, size=(self.M, 1)).to(self.device), 

142 ) 

143 * dt 

144 ) 

145 

146 return new_sizes 

147 

148 def run( 

149 self, 

150 *, 

151 init_data, 

152 input_data=None, 

153 n_iterations: int, 

154 epsilon: float = None, 

155 dt: float = None, 

156 generate_time_series: bool = False, 

157 ) -> torch.tensor: 

158 """Runs the model for n_iterations. 

159 

160 :param init_data: the initial destination zone size values 

161 :param input_data: (optional) the parameters to use during training. Defaults to the model defaults. 

162 :param n_iterations: the number of iteration steps. 

163 :param epsilon: (optional) the epsilon value to use. Defaults to the model default. 

164 :param dt: (optional) the time differential to use. Defaults to the model default. 

165 :param generate_time_series: whether to generate a complete time series or only return the final value 

166 :return: the time series data 

167 

168 """ 

169 sizes = [init_data.clone()] 

170 

171 for _ in range(n_iterations): 

172 sizes.append( 

173 self.run_single( 

174 curr_vals=sizes[-1], 

175 input_data=input_data, 

176 epsilon=epsilon, 

177 dt=dt, 

178 ) 

179 ) 

180 sizes = torch.stack(sizes) 

181 if not generate_time_series: 

182 return sizes[-1] 

183 else: 

184 return sizes