@@ -220,19 +220,19 @@ def test_gradients(
220
220
221
221
@abstractmethod
222
222
def expected_grad_x (
223
- self , x : ArrayLike , y : ArrayLike , kernel : _ScalarValuedKernel
223
+ self , x : Array , y : Array , kernel : _ScalarValuedKernel
224
224
) -> Union [Array , np .ndarray ]:
225
225
"""Compute expected gradient of the kernel w.r.t ``x``."""
226
226
227
227
@abstractmethod
228
228
def expected_grad_y (
229
- self , x : ArrayLike , y : ArrayLike , kernel : _ScalarValuedKernel
229
+ self , x : Array , y : Array , kernel : _ScalarValuedKernel
230
230
) -> Union [Array , np .ndarray ]:
231
231
"""Compute expected gradient of the kernel w.r.t ``y``."""
232
232
233
233
@abstractmethod
234
234
def expected_divergence_x_grad_y (
235
- self , x : ArrayLike , y : ArrayLike , kernel : _ScalarValuedKernel
235
+ self , x : Array , y : Array , kernel : _ScalarValuedKernel
236
236
) -> Union [Array , np .ndarray ]:
237
237
"""Compute expected divergence of the kernel w.r.t ``x`` gradient ``y``."""
238
238
@@ -402,9 +402,7 @@ def problem(self, request: pytest.FixtureRequest, kernel: PowerKernel) -> _Probl
402
402
)
403
403
return _Problem (x , y , expected_distances , modified_kernel )
404
404
405
- def expected_grad_x (
406
- self , x : ArrayLike , y : ArrayLike , kernel : PowerKernel
407
- ) -> np .ndarray :
405
+ def expected_grad_x (self , x : Array , y : Array , kernel : PowerKernel ) -> np .ndarray :
408
406
num_points , dimension = np .atleast_2d (x ).shape
409
407
410
408
expected_grad = (
@@ -419,9 +417,7 @@ def expected_grad_x(
419
417
420
418
return np .array (expected_grad )
421
419
422
- def expected_grad_y (
423
- self , x : ArrayLike , y : ArrayLike , kernel : PowerKernel
424
- ) -> np .ndarray :
420
+ def expected_grad_y (self , x : Array , y : Array , kernel : PowerKernel ) -> np .ndarray :
425
421
num_points , dimension = np .atleast_2d (x ).shape
426
422
427
423
expected_grad = (
@@ -437,7 +433,7 @@ def expected_grad_y(
437
433
return np .array (expected_grad )
438
434
439
435
def expected_divergence_x_grad_y (
440
- self , x : ArrayLike , y : ArrayLike , kernel : PowerKernel
436
+ self , x : Array , y : Array , kernel : PowerKernel
441
437
) -> np .ndarray :
442
438
divergence = self .power * (
443
439
(
@@ -579,9 +575,7 @@ def problem(
579
575
)
580
576
return _Problem (x , y , expected_distances , modified_kernel )
581
577
582
- def expected_grad_x (
583
- self , x : ArrayLike , y : ArrayLike , kernel : AdditiveKernel
584
- ) -> np .ndarray :
578
+ def expected_grad_x (self , x : Array , y : Array , kernel : AdditiveKernel ) -> np .ndarray :
585
579
num_points , dimension = np .atleast_2d (x ).shape
586
580
587
581
# Variable rename allows for nicer automatic formatting
@@ -595,9 +589,7 @@ def expected_grad_x(
595
589
596
590
return np .array (expected_grad )
597
591
598
- def expected_grad_y (
599
- self , x : ArrayLike , y : ArrayLike , kernel : AdditiveKernel
600
- ) -> np .ndarray :
592
+ def expected_grad_y (self , x : Array , y : Array , kernel : AdditiveKernel ) -> np .ndarray :
601
593
num_points , dimension = np .atleast_2d (x ).shape
602
594
603
595
# Variable rename allows for nicer automatic formatting
@@ -612,7 +604,7 @@ def expected_grad_y(
612
604
return np .array (expected_grad )
613
605
614
606
def expected_divergence_x_grad_y (
615
- self , x : ArrayLike , y : ArrayLike , kernel : AdditiveKernel
607
+ self , x : Array , y : Array , kernel : AdditiveKernel
616
608
) -> np .ndarray :
617
609
num_points , _ = np .atleast_2d (x ).shape
618
610
@@ -691,9 +683,7 @@ def problem(
691
683
)
692
684
return _Problem (x , y , expected_distances , modified_kernel )
693
685
694
- def expected_grad_x (
695
- self , x : ArrayLike , y : ArrayLike , kernel : ProductKernel
696
- ) -> np .ndarray :
686
+ def expected_grad_x (self , x : Array , y : Array , kernel : ProductKernel ) -> np .ndarray :
697
687
num_points , dimension = np .atleast_2d (x ).shape
698
688
699
689
# Variable rename allows for nicer automatic formatting
@@ -709,9 +699,7 @@ def expected_grad_x(
709
699
710
700
return np .array (expected_grad )
711
701
712
- def expected_grad_y (
713
- self , x : ArrayLike , y : ArrayLike , kernel : ProductKernel
714
- ) -> np .ndarray :
702
+ def expected_grad_y (self , x : Array , y : Array , kernel : ProductKernel ) -> np .ndarray :
715
703
num_points , dimension = np .atleast_2d (x ).shape
716
704
717
705
# Variable rename allows for nicer automatic formatting
@@ -728,7 +716,7 @@ def expected_grad_y(
728
716
return np .array (expected_grad )
729
717
730
718
def expected_divergence_x_grad_y (
731
- self , x : ArrayLike , y : ArrayLike , kernel : ProductKernel
719
+ self , x : Array , y : Array , kernel : ProductKernel
732
720
) -> np .ndarray :
733
721
# Variable rename allows for nicer automatic formatting
734
722
k1 , k2 = kernel .first_kernel , kernel .second_kernel
@@ -748,7 +736,7 @@ def test_symmetric_product_kernel(self):
748
736
We consider a product kernel with equal input kernels and check that
749
737
the second kernel is never called.
750
738
"""
751
- x = np .array ([1 ])
739
+ x = jnp .array ([1 ])
752
740
753
741
# Form two simple mocked kernels and force any == operation to return True
754
742
first_kernel = MagicMock (spec = ScalarValuedKernel )
@@ -1362,8 +1350,10 @@ def problem( # noqa: C901
1362
1350
expected_distances = np .zeros ((num_points , num_points ))
1363
1351
for x_idx , x_ in enumerate (x ):
1364
1352
for y_idx , y_ in enumerate (y ):
1365
- expected_distances [x_idx , y_idx ] = scipy_norm (y_ , length_scale ).pdf (
1366
- x_
1353
+ expected_distances [x_idx , y_idx ] = (
1354
+ scipy_norm (y_ , length_scale )
1355
+ # Ignore Pyright here - the .pdf() function definitely exists!
1356
+ .pdf (x_ ) # pyright: ignore[reportAttributeAccessIssue]
1367
1357
)
1368
1358
x , y = x .reshape (- 1 , 1 ), y .reshape (- 1 , 1 )
1369
1359
elif mode == "negative_length_scale" :
0 commit comments