summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '0005-x86-spec-ctrl-Defer-CR4_PV32_RESTORE-on-the-cstar_en.patch')
-rw-r--r--0005-x86-spec-ctrl-Defer-CR4_PV32_RESTORE-on-the-cstar_en.patch56
1 files changed, 56 insertions, 0 deletions
diff --git a/0005-x86-spec-ctrl-Defer-CR4_PV32_RESTORE-on-the-cstar_en.patch b/0005-x86-spec-ctrl-Defer-CR4_PV32_RESTORE-on-the-cstar_en.patch
new file mode 100644
index 0000000..5a65bda
--- /dev/null
+++ b/0005-x86-spec-ctrl-Defer-CR4_PV32_RESTORE-on-the-cstar_en.patch
@@ -0,0 +1,56 @@
+From 11193e13e5359ba1896be46be3e9b468154c1295 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Fri, 10 Feb 2023 21:11:14 +0000
+Subject: [PATCH 05/13] x86/spec-ctrl: Defer CR4_PV32_RESTORE on the
+ cstar_enter path
+
+As stated (correctly) by the comment next to SPEC_CTRL_ENTRY_FROM_PV, between
+the two hunks visible in the patch, RET's are not safe prior to this point.
+
+CR4_PV32_RESTORE hides a CALL/RET pair in certain configurations (PV32
+compiled in, SMEP or SMAP active), and the RET can be attacked with one of
+several known speculative issues.
+
+Furthermore, CR4_PV32_RESTORE also hides a reference to the cr4_pv32_mask
+global variable, which is not safe when XPTI is active before restoring Xen's
+full pagetables.
+
+This crash has gone unnoticed because it is only AMD CPUs which permit the
+SYSCALL instruction in compatibility mode, and these are not vulnerable to
+Meltdown so don't activate XPTI by default.
+
+This is XSA-429 / CVE-2022-42331
+
+Fixes: 5e7962901131 ("x86/entry: Organise the use of MSR_SPEC_CTRL at each entry/exit point")
+Fixes: 5784de3e2067 ("x86: Meltdown band-aid against malicious 64-bit PV guests")
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+(cherry picked from commit df5b055b12116d9e63ced59ae5389e69a2a3de48)
+---
+ xen/arch/x86/x86_64/compat/entry.S | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
+index 5c999271e6..09a86768ac 100644
+--- a/xen/arch/x86/x86_64/compat/entry.S
++++ b/xen/arch/x86/x86_64/compat/entry.S
+@@ -206,7 +206,6 @@ ENTRY(cstar_enter)
+ ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
+ #endif
+ push %rax /* Guest %rsp */
+- CR4_PV32_RESTORE
+ movq 8(%rsp), %rax /* Restore guest %rax. */
+ movq $FLAT_USER_SS32, 8(%rsp) /* Assume a 64bit domain. Compat handled lower. */
+ pushq %r11
+@@ -230,6 +229,8 @@ ENTRY(cstar_enter)
+ .Lcstar_cr3_okay:
+ sti
+
++ CR4_PV32_RESTORE
++
+ movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
+ movq VCPU_domain(%rbx),%rcx
+ cmpb $0,DOMAIN_is_32bit_pv(%rcx)
+--
+2.40.0
+