@@ -66,7 +66,7 @@ def test_forward_x1_neq_x2(self, use_keops=True, ard=False, **kwargs):
66
66
# The patch makes sure that we're actually using KeOps
67
67
k1 = kern1 (x1 , x2 ).to_dense ()
68
68
k2 = kern2 (x1 , x2 ).to_dense ()
69
- self .assertLess (torch .norm (k1 - k2 ), 1e-4 )
69
+ self .assertLess (torch .norm (k1 - k2 ), 1e-3 )
70
70
71
71
if use_keops :
72
72
self .assertTrue (keops_mock .called )
@@ -86,7 +86,7 @@ def test_batch_matmul(self, use_keops=True, **kwargs):
86
86
# The patch makes sure that we're actually using KeOps
87
87
res1 = kern1 (x1 , x1 ).matmul (rhs )
88
88
res2 = kern2 (x1 , x1 ).matmul (rhs )
89
- self .assertLess (torch .norm (res1 - res2 ), 1e-4 )
89
+ self .assertLess (torch .norm (res1 - res2 ), 1e-3 )
90
90
91
91
if use_keops :
92
92
self .assertTrue (keops_mock .called )
@@ -115,7 +115,7 @@ def test_gradient(self, use_keops=True, ard=False, **kwargs):
115
115
# stack all gradients into a tensor
116
116
grad_s1 = torch .vstack (torch .autograd .grad (s1 , [* kern1 .hyperparameters ()]))
117
117
grad_s2 = torch .vstack (torch .autograd .grad (s2 , [* kern2 .hyperparameters ()]))
118
- self .assertAllClose (grad_s1 , grad_s2 , rtol = 1e-4 , atol = 1e-5 )
118
+ self .assertAllClose (grad_s1 , grad_s2 , rtol = 1e-3 , atol = 1e-3 )
119
119
120
120
if use_keops :
121
121
self .assertTrue (keops_mock .called )
0 commit comments