aports/main/xen/xsa469-4.20-02.patch
omni c0be0d7aa8 main/xen: add mitigations for XSA-469 (Intel CPUs)
https://xenbits.xen.org/xsa/advisory-469.html

> Only Intel x86 CPUs are potentially affected.  CPUs from other
> manufacturers are not known to be affected.
>
> The affected Intel CPUs are believed to be those which have eIBRS
> (hardware Spectre-v2 mitigations, starting with Cascade Lake) up to but
> not including those which have BHI_CTRL (Alder Lake and Sapphire
> Rapids).  See the Intel whitepaper for full details.
2025-05-19 18:36:05 +00:00

314 lines
12 KiB
Diff

From: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: x86/guest: Remove use of the Xen hypercall_page
In order to protect against ITS, Xen needs to start using return thunks.
Therefore the advice in XSA-466 becomes relevant, and the hypercall_page needs
to be removed.
Implement early_hypercall(), with infrastructure to figure out the correct
instruction on first use. Use ALTERNATIVE()s to result in inline hypercalls,
including the ALT_NOT() form so we only need a single synthetic feature bit.
No overall change.
This is part of XSA-469 / CVE-2024-28956
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
diff --git a/xen/arch/x86/guest/xen/Makefile b/xen/arch/x86/guest/xen/Makefile
index 26fb4b1007c0..8b3250aa8886 100644
--- a/xen/arch/x86/guest/xen/Makefile
+++ b/xen/arch/x86/guest/xen/Makefile
@@ -1,4 +1,4 @@
-obj-y += hypercall_page.o
+obj-bin-y += hypercall.init.o
obj-y += xen.o
obj-bin-$(CONFIG_PVH_GUEST) += pvh-boot.init.o
diff --git a/xen/arch/x86/guest/xen/hypercall.S b/xen/arch/x86/guest/xen/hypercall.S
new file mode 100644
index 000000000000..05e429794cc4
--- /dev/null
+++ b/xen/arch/x86/guest/xen/hypercall.S
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <xen/linkage.h>
+
+ .section .init.text, "ax", @progbits
+
+ /*
+ * Used during early boot, before alternatives have run and inlined
+ * the appropriate instruction. Called using the hypercall ABI.
+ */
+FUNC(early_hypercall)
+ cmpb $0, early_hypercall_insn(%rip)
+ jl .L_setup
+ je 1f
+
+ vmmcall
+ ret
+
+1: vmcall
+ ret
+
+.L_setup:
+ /*
+ * When setting up the first time around, all registers need
+ * preserving. Save the non-callee-saved ones.
+ */
+ push %r11
+ push %r10
+ push %r9
+ push %r8
+ push %rdi
+ push %rsi
+ push %rdx
+ push %rcx
+ push %rax
+
+ call early_hypercall_setup
+
+ pop %rax
+ pop %rcx
+ pop %rdx
+ pop %rsi
+ pop %rdi
+ pop %r8
+ pop %r9
+ pop %r10
+ pop %r11
+
+ jmp early_hypercall
+END(early_hypercall)
diff --git a/xen/arch/x86/guest/xen/hypercall_page.S b/xen/arch/x86/guest/xen/hypercall_page.S
deleted file mode 100644
index 7ab55fc1f6e6..000000000000
--- a/xen/arch/x86/guest/xen/hypercall_page.S
+++ /dev/null
@@ -1,76 +0,0 @@
-#include <asm/page.h>
-#include <asm/asm_defns.h>
-#include <public/xen.h>
-
- .section ".text.page_aligned", "ax", @progbits
-
-DATA(hypercall_page, PAGE_SIZE)
- /* Poisoned with `ret` for safety before hypercalls are set up. */
- .fill PAGE_SIZE, 1, 0xc3
-END(hypercall_page)
-
-/*
- * Identify a specific hypercall in the hypercall page
- * @param name Hypercall name.
- */
-#define DECLARE_HYPERCALL(name) \
- .globl HYPERCALL_ ## name; \
- .type HYPERCALL_ ## name, STT_FUNC; \
- .size HYPERCALL_ ## name, 32; \
- .set HYPERCALL_ ## name, hypercall_page + __HYPERVISOR_ ## name * 32
-
-DECLARE_HYPERCALL(set_trap_table)
-DECLARE_HYPERCALL(mmu_update)
-DECLARE_HYPERCALL(set_gdt)
-DECLARE_HYPERCALL(stack_switch)
-DECLARE_HYPERCALL(set_callbacks)
-DECLARE_HYPERCALL(fpu_taskswitch)
-DECLARE_HYPERCALL(sched_op_compat)
-DECLARE_HYPERCALL(platform_op)
-DECLARE_HYPERCALL(set_debugreg)
-DECLARE_HYPERCALL(get_debugreg)
-DECLARE_HYPERCALL(update_descriptor)
-DECLARE_HYPERCALL(memory_op)
-DECLARE_HYPERCALL(multicall)
-DECLARE_HYPERCALL(update_va_mapping)
-DECLARE_HYPERCALL(set_timer_op)
-DECLARE_HYPERCALL(event_channel_op_compat)
-DECLARE_HYPERCALL(xen_version)
-DECLARE_HYPERCALL(console_io)
-DECLARE_HYPERCALL(physdev_op_compat)
-DECLARE_HYPERCALL(grant_table_op)
-DECLARE_HYPERCALL(vm_assist)
-DECLARE_HYPERCALL(update_va_mapping_otherdomain)
-DECLARE_HYPERCALL(iret)
-DECLARE_HYPERCALL(vcpu_op)
-DECLARE_HYPERCALL(set_segment_base)
-DECLARE_HYPERCALL(mmuext_op)
-DECLARE_HYPERCALL(xsm_op)
-DECLARE_HYPERCALL(nmi_op)
-DECLARE_HYPERCALL(sched_op)
-DECLARE_HYPERCALL(callback_op)
-DECLARE_HYPERCALL(xenoprof_op)
-DECLARE_HYPERCALL(event_channel_op)
-DECLARE_HYPERCALL(physdev_op)
-DECLARE_HYPERCALL(hvm_op)
-DECLARE_HYPERCALL(sysctl)
-DECLARE_HYPERCALL(domctl)
-DECLARE_HYPERCALL(kexec_op)
-DECLARE_HYPERCALL(argo_op)
-DECLARE_HYPERCALL(xenpmu_op)
-
-DECLARE_HYPERCALL(arch_0)
-DECLARE_HYPERCALL(arch_1)
-DECLARE_HYPERCALL(arch_2)
-DECLARE_HYPERCALL(arch_3)
-DECLARE_HYPERCALL(arch_4)
-DECLARE_HYPERCALL(arch_5)
-DECLARE_HYPERCALL(arch_6)
-DECLARE_HYPERCALL(arch_7)
-
-/*
- * Local variables:
- * tab-width: 8
- * indent-tabs-mode: nil
- * End:
- */
diff --git a/xen/arch/x86/guest/xen/xen.c b/xen/arch/x86/guest/xen/xen.c
index c17f5c447ab6..5c9f393c7514 100644
--- a/xen/arch/x86/guest/xen/xen.c
+++ b/xen/arch/x86/guest/xen/xen.c
@@ -26,7 +26,6 @@
bool __read_mostly xen_guest;
uint32_t __read_mostly xen_cpuid_base;
-extern char hypercall_page[];
static struct rangeset *mem;
DEFINE_PER_CPU(unsigned int, vcpu_id);
@@ -35,6 +34,50 @@ static struct vcpu_info *vcpu_info;
static unsigned long vcpu_info_mapped[BITS_TO_LONGS(NR_CPUS)];
DEFINE_PER_CPU(struct vcpu_info *, vcpu_info);
+/*
+ * Which instruction to use for early hypercalls:
+ * < 0 setup
+ * 0 vmcall
+ * > 0 vmmcall
+ */
+int8_t __initdata early_hypercall_insn = -1;
+
+/*
+ * Called once during the first hypercall to figure out which instruction to
+ * use. Error handling options are limited.
+ */
+void asmlinkage __init early_hypercall_setup(void)
+{
+ BUG_ON(early_hypercall_insn != -1);
+
+ if ( !boot_cpu_data.x86_vendor )
+ {
+ unsigned int eax, ebx, ecx, edx;
+
+ cpuid(0, &eax, &ebx, &ecx, &edx);
+
+ boot_cpu_data.x86_vendor = x86_cpuid_lookup_vendor(ebx, ecx, edx);
+ }
+
+ switch ( boot_cpu_data.x86_vendor )
+ {
+ case X86_VENDOR_INTEL:
+ case X86_VENDOR_CENTAUR:
+ case X86_VENDOR_SHANGHAI:
+ early_hypercall_insn = 0;
+ setup_force_cpu_cap(X86_FEATURE_USE_VMCALL);
+ break;
+
+ case X86_VENDOR_AMD:
+ case X86_VENDOR_HYGON:
+ early_hypercall_insn = 1;
+ break;
+
+ default:
+ BUG();
+ }
+}
+
static void __init find_xen_leaves(void)
{
uint32_t eax, ebx, ecx, edx, base;
@@ -337,9 +380,6 @@ const struct hypervisor_ops *__init xg_probe(void)
if ( !xen_cpuid_base )
return NULL;
- /* Fill the hypercall page. */
- wrmsrl(cpuid_ebx(xen_cpuid_base + 2), __pa(hypercall_page));
-
xen_guest = true;
return &ops;
diff --git a/xen/arch/x86/include/asm/cpufeatures.h b/xen/arch/x86/include/asm/cpufeatures.h
index ba3df174b76e..9e3ed21c026d 100644
--- a/xen/arch/x86/include/asm/cpufeatures.h
+++ b/xen/arch/x86/include/asm/cpufeatures.h
@@ -42,6 +42,7 @@ XEN_CPUFEATURE(XEN_SHSTK, X86_SYNTH(26)) /* Xen uses CET Shadow Stacks *
XEN_CPUFEATURE(XEN_IBT, X86_SYNTH(27)) /* Xen uses CET Indirect Branch Tracking */
XEN_CPUFEATURE(IBPB_ENTRY_PV, X86_SYNTH(28)) /* MSR_PRED_CMD used by Xen for PV */
XEN_CPUFEATURE(IBPB_ENTRY_HVM, X86_SYNTH(29)) /* MSR_PRED_CMD used by Xen for HVM */
+XEN_CPUFEATURE(USE_VMCALL, X86_SYNTH(30)) /* Use VMCALL instead of VMMCALL */
/* Bug words follow the synthetic words. */
#define X86_NR_BUG 1
diff --git a/xen/arch/x86/include/asm/guest/xen-hcall.h b/xen/arch/x86/include/asm/guest/xen-hcall.h
index 665b472d05ac..96004dec9909 100644
--- a/xen/arch/x86/include/asm/guest/xen-hcall.h
+++ b/xen/arch/x86/include/asm/guest/xen-hcall.h
@@ -30,9 +30,11 @@
({ \
long res, tmp__; \
asm volatile ( \
- "call hypercall_page + %c[offset]" \
+ ALTERNATIVE_2("call early_hypercall", \
+ "vmmcall", ALT_NOT(X86_FEATURE_USE_VMCALL), \
+ "vmcall", X86_FEATURE_USE_VMCALL) \
: "=a" (res), "=D" (tmp__) ASM_CALL_CONSTRAINT \
- : [offset] "i" (hcall * 32), \
+ : "0" (hcall), \
"1" ((long)(a1)) \
: "memory" ); \
(type)res; \
@@ -42,10 +44,12 @@
({ \
long res, tmp__; \
asm volatile ( \
- "call hypercall_page + %c[offset]" \
+ ALTERNATIVE_2("call early_hypercall", \
+ "vmmcall", ALT_NOT(X86_FEATURE_USE_VMCALL), \
+ "vmcall", X86_FEATURE_USE_VMCALL) \
: "=a" (res), "=D" (tmp__), "=S" (tmp__) \
ASM_CALL_CONSTRAINT \
- : [offset] "i" (hcall * 32), \
+ : "0" (hcall), \
"1" ((long)(a1)), "2" ((long)(a2)) \
: "memory" ); \
(type)res; \
@@ -55,10 +59,12 @@
({ \
long res, tmp__; \
asm volatile ( \
- "call hypercall_page + %c[offset]" \
+ ALTERNATIVE_2("call early_hypercall", \
+ "vmmcall", ALT_NOT(X86_FEATURE_USE_VMCALL), \
+ "vmcall", X86_FEATURE_USE_VMCALL) \
: "=a" (res), "=D" (tmp__), "=S" (tmp__), "=d" (tmp__) \
ASM_CALL_CONSTRAINT \
- : [offset] "i" (hcall * 32), \
+ : "0" (hcall), \
"1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)) \
: "memory" ); \
(type)res; \
@@ -69,10 +75,12 @@
long res, tmp__; \
register long _a4 asm ("r10") = ((long)(a4)); \
asm volatile ( \
- "call hypercall_page + %c[offset]" \
+ ALTERNATIVE_2("call early_hypercall", \
+ "vmmcall", ALT_NOT(X86_FEATURE_USE_VMCALL), \
+ "vmcall", X86_FEATURE_USE_VMCALL) \
: "=a" (res), "=D" (tmp__), "=S" (tmp__), "=d" (tmp__), \
"=&r" (tmp__) ASM_CALL_CONSTRAINT \
- : [offset] "i" (hcall * 32), \
+ : "0" (hcall), \
"1" ((long)(a1)), "2" ((long)(a2)), "3" ((long)(a3)), \
"4" (_a4) \
: "memory" ); \