Skip to content

Commit f288fdb

Browse files
rincebrainbehlendorf
authored andcommitted
Avoid save/restoring AMX registers to avoid a SPR erratum
Intel SPR erratum SPR4 says that if you trip into a vmexit while doing FPU save/restore, your AMX register state might misbehave... and by misbehave, I mean save all zeroes incorrectly, leading to explosions if you restore it. Since we're not using AMX for anything, the simple way to avoid this is to just not save/restore those when we do anything, since we're killing preemption of any sort across our save/restores. If we ever decide to use AMX, it's not clear that we have any way to mitigate this, on Linux...but I am not an expert. Reviewed-by: Brian Behlendorf <[email protected]> Signed-off-by: Rich Ercolani <[email protected]> Closes #14989 Closes #15168 Signed-off-by: Rich Ercolani <[email protected]>
1 parent fb6d532 commit f288fdb

File tree

1 file changed

+16
-7
lines changed

1 file changed

+16
-7
lines changed

include/os/linux/kernel/linux/simd_x86.h

Lines changed: 16 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,15 @@
157157
#endif
158158
#endif
159159

160+
#ifndef XFEATURE_MASK_XTILE
161+
/*
162+
* For kernels where this doesn't exist yet, we still don't want to break
163+
* by save/restoring this broken nonsense.
164+
* See issue #14989 or Intel errata SPR4 for why
165+
*/
166+
#define XFEATURE_MASK_XTILE 0x60000
167+
#endif
168+
160169
#include <linux/mm.h>
161170
#include <linux/slab.h>
162171

@@ -290,7 +299,7 @@ kfpu_begin(void)
290299
*/
291300
union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()];
292301
if (static_cpu_has(X86_FEATURE_XSAVE)) {
293-
kfpu_save_xsave(&state->xsave, ~0);
302+
kfpu_save_xsave(&state->xsave, ~XFEATURE_MASK_XTILE);
294303
} else if (static_cpu_has(X86_FEATURE_FXSR)) {
295304
kfpu_save_fxsr(&state->fxsave);
296305
} else {
@@ -319,18 +328,18 @@ kfpu_begin(void)
319328
union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()];
320329
#if defined(HAVE_XSAVES)
321330
if (static_cpu_has(X86_FEATURE_XSAVES)) {
322-
kfpu_do_xsave("xsaves", &state->xsave, ~0);
331+
kfpu_do_xsave("xsaves", &state->xsave, ~XFEATURE_MASK_XTILE);
323332
return;
324333
}
325334
#endif
326335
#if defined(HAVE_XSAVEOPT)
327336
if (static_cpu_has(X86_FEATURE_XSAVEOPT)) {
328-
kfpu_do_xsave("xsaveopt", &state->xsave, ~0);
337+
kfpu_do_xsave("xsaveopt", &state->xsave, ~XFEATURE_MASK_XTILE);
329338
return;
330339
}
331340
#endif
332341
if (static_cpu_has(X86_FEATURE_XSAVE)) {
333-
kfpu_do_xsave("xsave", &state->xsave, ~0);
342+
kfpu_do_xsave("xsave", &state->xsave, ~XFEATURE_MASK_XTILE);
334343
} else if (static_cpu_has(X86_FEATURE_FXSR)) {
335344
kfpu_save_fxsr(&state->fxsave);
336345
} else {
@@ -396,7 +405,7 @@ kfpu_end(void)
396405
union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()];
397406

398407
if (static_cpu_has(X86_FEATURE_XSAVE)) {
399-
kfpu_restore_xsave(&state->xsave, ~0);
408+
kfpu_restore_xsave(&state->xsave, ~XFEATURE_MASK_XTILE);
400409
} else if (static_cpu_has(X86_FEATURE_FXSR)) {
401410
kfpu_restore_fxsr(&state->fxsave);
402411
} else {
@@ -415,12 +424,12 @@ kfpu_end(void)
415424
union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()];
416425
#if defined(HAVE_XSAVES)
417426
if (static_cpu_has(X86_FEATURE_XSAVES)) {
418-
kfpu_do_xrstor("xrstors", &state->xsave, ~0);
427+
kfpu_do_xrstor("xrstors", &state->xsave, ~XFEATURE_MASK_XTILE);
419428
goto out;
420429
}
421430
#endif
422431
if (static_cpu_has(X86_FEATURE_XSAVE)) {
423-
kfpu_do_xrstor("xrstor", &state->xsave, ~0);
432+
kfpu_do_xrstor("xrstor", &state->xsave, ~XFEATURE_MASK_XTILE);
424433
} else if (static_cpu_has(X86_FEATURE_FXSR)) {
425434
kfpu_restore_fxsr(&state->fxsave);
426435
} else {

0 commit comments

Comments
 (0)