Messages in this thread Patch in this message | | | Date | Tue, 07 Apr 2020 13:02:39 +0200 | From | Peter Zijlstra <> | Subject | [PATCH 3/4] x86,module: Detect VMX vs SLD conflicts |
| |
It turns out that with Split-Lock-Detect enabled (default) any VMX hypervisor needs at least a little modification in order to not blindly inject the #AC into the guest without the guest being ready for it.
Since there is no telling which module implements a hypervisor, scan all out-of-tree modules' text and look for VMX instructions and refuse to load it when SLD is enabled (default) and the module isn't marked 'sld_safe'.
Hypervisors, which have been modified and are known to work correctly, can add:
MODULE_INFO(sld_safe, "Y");
to explicitly tell the module loader they're good.
Reported-by: "Kenneth R. Crudup" <kenny@panix.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200402124205.242674296@linutronix.de --- arch/x86/include/asm/cpu.h | 5 ++ arch/x86/kernel/cpu/intel.c | 5 ++ arch/x86/kernel/module.c | 87 ++++++++++++++++++++++++++++++++++++++++++- include/linux/moduleloader.h | 2 kernel/module.c | 3 - 5 files changed, 99 insertions(+), 3 deletions(-)
--- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -44,6 +44,7 @@ unsigned int x86_stepping(unsigned int s extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c); extern void switch_to_sld(unsigned long tifn); extern bool handle_user_split_lock(struct pt_regs *regs, long error_code); +extern bool split_lock_enabled(void); #else static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {} static inline void switch_to_sld(unsigned long tifn) {} @@ -51,5 +52,9 @@ static inline bool handle_user_split_loc { return false; } +static inline bool split_lock_enabled(void) +{ + return false; +} #endif #endif /* _ASM_X86_CPU_H */ --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -1061,6 +1061,11 @@ static void sld_update_msr(bool on) wrmsrl(MSR_TEST_CTRL, test_ctrl_val); } +bool split_lock_enabled(void) +{ + return sld_state != sld_off; +} + static void split_lock_init(void) { split_lock_verify_msr(sld_state != sld_off); --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -24,6 +24,8 @@ #include <asm/pgtable.h> #include <asm/setup.h> #include <asm/unwind.h> +#include <asm/cpu.h> +#include <asm/insn.h> #if 0 #define DEBUGP(fmt, ...) \ @@ -217,6 +219,78 @@ int apply_relocate_add(Elf64_Shdr *sechd } #endif +static bool insn_is_vmx(struct insn *insn) +{ + u8 modrm = insn->modrm.bytes[0]; + u8 modrm_mod = X86_MODRM_MOD(modrm); + u8 modrm_reg = X86_MODRM_REG(modrm); + + u8 prefix = insn->prefixes.bytes[0]; + + if (insn->opcode.bytes[0] != 0x0f) + return false; + + switch (insn->opcode.bytes[1]) { + case 0x01: + switch (insn->opcode.bytes[2]) { + case 0xc1: /* VMCALL */ + case 0xc2: /* VMLAUNCH */ + case 0xc3: /* VMRESUME */ + case 0xc4: /* VMXOFF */ + return true; + + default: + break; + } + break; + + case 0x78: /* VMREAD */ + case 0x79: /* VMWRITE */ + return true; + + case 0xc7: + /* VMPTRLD, VMPTRST, VMCLEAR, VMXON */ + if (modrm_mod == 0x03) + break; + + if ((modrm_reg == 6 && (!prefix || prefix == 0x66 || prefix == 0xf3)) || + (modrm_reg == 7 && (!prefix || prefix == 0xf3))) + return true; + + break; + + default: + break; + } + + return false; +} + +static int decode_module(struct module *mod, void *text, void *text_end, bool sld_safe) +{ + bool allow_vmx = sld_safe || !split_lock_enabled(); + struct insn insn; + + while (text < text_end) { + kernel_insn_init(&insn, text, text_end - text); + insn_get_length(&insn); + + if (WARN_ON_ONCE(!insn_complete(&insn))) { + pr_err("Module text malformed: %s\n", mod->name); + return -ENOEXEC; + } + + if (!allow_vmx && insn_is_vmx(&insn)) { + pr_err("Module has VMX instructions and is not marked 'sld_safe', boot with: 'split_lock_detect=off': %s\n", mod->name); + return -ENOEXEC; + } + + text += insn.length; + } + + return 0; +} + int module_finalize(const struct load_info *info, struct module *me) { const Elf_Ehdr *hdr = info->hdr; @@ -253,6 +327,16 @@ int module_finalize(const struct load_in tseg, tseg + text->sh_size); } + if (text && !get_modinfo(info, "intree")) { + bool sld_safe = get_modinfo(info, "sld_safe"); + void *tseg = (void *)text->sh_addr; + int ret; + + ret = decode_module(me, tseg, tseg + text->sh_size, sld_safe); + if (ret) + return ret; + } + if (para) { void *pseg = (void *)para->sh_addr; apply_paravirt(pseg, pseg + para->sh_size); @@ -261,9 +345,10 @@ int module_finalize(const struct load_in /* make jump label nops */ jump_label_apply_nops(me); - if (orc && orc_ip) + if (orc && orc_ip) { unwind_module_init(me, (void *)orc_ip->sh_addr, orc_ip->sh_size, (void *)orc->sh_addr, orc->sh_size); + } return 0; } --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -26,6 +26,8 @@ struct load_info { } index; }; +extern char *get_modinfo(const struct load_info *info, const char *tag); + /* These may be implemented by architectures that need to hook into the * module loader code. Architectures that don't need to do anything special * can just rely on the 'weak' default hooks defined in kernel/module.c. --- a/kernel/module.c +++ b/kernel/module.c @@ -1393,7 +1393,6 @@ static inline int same_magic(const char } #endif /* CONFIG_MODVERSIONS */ -static char *get_modinfo(const struct load_info *info, const char *tag); static char *get_next_modinfo(const struct load_info *info, const char *tag, char *prev); @@ -2521,7 +2520,7 @@ static char *get_next_modinfo(const stru return NULL; } -static char *get_modinfo(const struct load_info *info, const char *tag) +char *get_modinfo(const struct load_info *info, const char *tag) { return get_next_modinfo(info, tag, NULL); }
| |