@@ -18,6 +18,7 @@ def __init__(
18
18
self .dx_list = self ._generate_dx_list (config .max_dr_trans )
19
19
# self.dx_list = self._generate_dx_list_continous(config.max_dr_trans)
20
20
self .scale_vector = np .zeros (self .num_blocks ) + config .max_dr_isometry
21
+ self .rng = self .rng .default_rng ()
21
22
22
23
def __iter__ (self ):
23
24
while True :
@@ -42,9 +43,9 @@ def _gen_data_kernel(self):
42
43
batch_size = self .config .batch_size
43
44
config = self .config
44
45
45
- theta = np . random .random (size = int (batch_size * 1.5 )) * 2 * np .pi
46
+ theta = self . rng .random (size = int (batch_size * 1.5 )) * 2 * np .pi
46
47
dr = (
47
- np .abs (np . random .normal (size = int (batch_size * 1.5 )) * config .sigma_data )
48
+ np .abs (self . rng .normal (size = int (batch_size * 1.5 )) * config .sigma_data )
48
49
* self .num_grid
49
50
)
50
51
dx = _dr_theta_to_dx (dr , theta )
@@ -57,7 +58,7 @@ def _gen_data_kernel(self):
57
58
x_max , x_min , dx = x_max [select_idx ], x_min [select_idx ], dx [select_idx ]
58
59
assert len (dx ) == batch_size
59
60
60
- x = np . random .random (size = (batch_size , 2 )) * (x_max - x_min ) + x_min
61
+ x = self . rng .random (size = (batch_size , 2 )) * (x_max - x_min ) + x_min
61
62
x_prime = x + dx
62
63
63
64
return {"x" : x , "x_prime" : x_prime }
@@ -69,7 +70,7 @@ def _gen_data_trans_rnn(self):
69
70
n_steps = self .rnn_step
70
71
dx_list = self .dx_list
71
72
72
- dx_idx = np . random .choice (len (dx_list ), size = [n_traj * 10 , n_steps ])
73
+ dx_idx = self . rng .choice (len (dx_list ), size = [n_traj * 10 , n_steps ])
73
74
dx = dx_list [dx_idx ] # [N, T, 2]
74
75
dx_cumsum = np .cumsum (dx , axis = 1 ) # [N, T, 2]
75
76
@@ -86,7 +87,7 @@ def _gen_data_trans_rnn(self):
86
87
x_start_max , x_start_min = x_start_max [select_idx ], x_start_min [select_idx ]
87
88
dx_cumsum = dx_cumsum [select_idx ]
88
89
x_start = (
89
- np . random .random ((n_traj , 2 )) * (x_start_max - x_start_min ) + x_start_min
90
+ self . rng .random ((n_traj , 2 )) * (x_start_max - x_start_min ) + x_start_min
90
91
)
91
92
x_start = x_start [:, None ] # [N, 1, 2]
92
93
x_start = np .round (x_start - 0.5 )
@@ -99,13 +100,13 @@ def _gen_data_iso_numerical(self):
99
100
batch_size = self .config .batch_size
100
101
config = self .config
101
102
102
- theta = np . random .random (size = (batch_size , 2 )) * 2 * np .pi
103
- dr = np .sqrt (np . random .random (size = (batch_size , 1 ))) * config .max_dr_isometry
103
+ theta = self . rng .random (size = (batch_size , 2 )) * 2 * np .pi
104
+ dr = np .sqrt (self . rng .random (size = (batch_size , 1 ))) * config .max_dr_isometry
104
105
dx = _dr_theta_to_dx (dr , theta ) # [N, 2, 2]
105
106
106
107
x_max = np .fmin (self .num_grid - 0.5 , np .min (self .num_grid - 0.5 - dx , axis = 1 ))
107
108
x_min = np .fmax (- 0.5 , np .max (- 0.5 - dx , axis = 1 ))
108
- x = np . random .random (size = (batch_size , 2 )) * (x_max - x_min ) + x_min
109
+ x = self . rng .random (size = (batch_size , 2 )) * (x_max - x_min ) + x_min
109
110
x_plus_dx1 = x + dx [:, 0 ]
110
111
x_plus_dx2 = x + dx [:, 1 ]
111
112
@@ -117,18 +118,18 @@ def _gen_data_iso_numerical_adaptive(self):
117
118
config = self .config
118
119
119
120
theta = (
120
- np . random .random (size = (batch_size , num_blocks , 2 )) * 2 * np .pi
121
+ self . rng .random (size = (batch_size , num_blocks , 2 )) * 2 * np .pi
121
122
) # (batch_size, num_blocks, 2)
122
123
dr = (
123
- np .sqrt (np . random .random (size = (batch_size , num_blocks , 1 )))
124
+ np .sqrt (self . rng .random (size = (batch_size , num_blocks , 1 )))
124
125
* np .tile (self .scale_vector , (batch_size , 1 ))[:, :, None ]
125
126
) # (batch_size, num_blocks, 1)
126
127
dx = _dr_theta_to_dx (dr , theta ) # [N, num_blocks, 2, 2]
127
128
128
129
x_max = np .fmin (self .num_grid - 0.5 , np .min (self .num_grid - 0.5 - dx , axis = 2 ))
129
130
x_min = np .fmax (- 0.5 , np .max (- 0.5 - dx , axis = 2 ))
130
131
x = (
131
- np . random .random (size = (batch_size , num_blocks , 2 )) * (x_max - x_min ) + x_min
132
+ self . rng .random (size = (batch_size , num_blocks , 2 )) * (x_max - x_min ) + x_min
132
133
) # (batch_size, num_blocks, 2)
133
134
x_plus_dx1 = x + dx [:, :, 0 ]
134
135
x_plus_dx2 = x + dx [:, :, 1 ]
@@ -157,9 +158,9 @@ def _generate_dx_list_continous(self, max_dr):
157
158
dx_list = []
158
159
batch_size = self .config .batch_size
159
160
160
- dr = np .sqrt (np . random .random (size = (batch_size ,))) * max_dr
161
- np . random .shuffle (dr )
162
- theta = np . random .random (size = (batch_size ,)) * 2 * np .pi
161
+ dr = np .sqrt (self . rng .random (size = (batch_size ,))) * max_dr
162
+ self . rng .shuffle (dr )
163
+ theta = self . rng .random (size = (batch_size ,)) * 2 * np .pi
163
164
164
165
dx = _dr_theta_to_dx (dr , theta )
165
166
@@ -202,7 +203,7 @@ def _gen_trajectory_vis(self, n_traj, n_steps):
202
203
x_start = np .reshape ([5 , 5 ], newshape = (1 , 1 , 2 )) # [1, 1, 2]
203
204
dx_idx_pool = np .where ((dx_list [:, 0 ] >= - 1 ) & (dx_list [:, 1 ] >= - 1 ))[0 ]
204
205
# dx_idx_pool = np.where((dx_list[:, 0] >= 0) & (dx_list[:, 1] >= -1))[0]
205
- dx_idx = np . random .choice (dx_idx_pool , size = [n_traj * 50 , n_steps ])
206
+ dx_idx = self . rng .choice (dx_idx_pool , size = [n_traj * 50 , n_steps ])
206
207
dx = dx_list [dx_idx ]
207
208
dx_cumsum = np .cumsum (dx , axis = 1 ) # [N, T, 2]
208
209
@@ -224,7 +225,7 @@ def _gen_trajectory(self, n_traj, n_steps):
224
225
# uniformly wihtin the whole region.
225
226
dx_list = self .dx_list
226
227
227
- dx_idx = np . random .choice (len (dx_list ), size = [n_traj * 10 , n_steps ])
228
+ dx_idx = self . rng .choice (len (dx_list ), size = [n_traj * 10 , n_steps ])
228
229
dx = dx_list [dx_idx ] # [N, T, 2]
229
230
dx_cumsum = np .cumsum (dx , axis = 1 ) # [N, T, 2]
230
231
@@ -241,7 +242,7 @@ def _gen_trajectory(self, n_traj, n_steps):
241
242
x_start_max , x_start_min = x_start_max [select_idx ], x_start_min [select_idx ]
242
243
dx_cumsum = dx_cumsum [select_idx ]
243
244
x_start = (
244
- np . random .random ((n_traj , 2 )) * (x_start_max - x_start_min ) + x_start_min
245
+ self . rng .random ((n_traj , 2 )) * (x_start_max - x_start_min ) + x_start_min
245
246
)
246
247
x_start = x_start [:, None ] # [N, 1, 2]
247
248
x_start = np .round (x_start - 0.5 )
0 commit comments