Messages in this thread | | | Date | Thu, 24 Feb 2022 17:09:32 -0800 | From | Kees Cook <> | Subject | Re: [PATCH v2 18/39] x86/ibt: Add IBT feature, MSR and #CP handling |
| |
On Thu, Feb 24, 2022 at 03:51:56PM +0100, Peter Zijlstra wrote: > [...] > @@ -438,7 +439,8 @@ static __always_inline void setup_umip(s > > /* These bits should not change their value after CPU init is finished. */ > static const unsigned long cr4_pinned_mask = > - X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE; > + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | > + X86_CR4_FSGSBASE | X86_CR4_CET;
Thanks!
> static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning); > static unsigned long cr4_pinned_bits __ro_after_init; > > @@ -592,6 +594,29 @@ static __init int setup_disable_pku(char > __setup("nopku", setup_disable_pku); > #endif /* CONFIG_X86_64 */ > > +static __always_inline void setup_cet(struct cpuinfo_x86 *c) > +{ > + u64 msr = CET_ENDBR_EN; > + > + if (!HAS_KERNEL_IBT || > + !cpu_feature_enabled(X86_FEATURE_IBT)) > + return; > + > + wrmsrl(MSR_IA32_S_CET, msr); > + cr4_set_bits(X86_CR4_CET); > + > + if (!ibt_selftest()) { > + pr_err("IBT selftest: Failed!\n"); > + setup_clear_cpu_cap(X86_FEATURE_IBT); > + }
For easy boot-output testing, I'd love to see something like:
} else { pr_info("CET detected: Indirect Branch Tracking enabled.\n") }
or maybe: pr_info("CET detected: Indirect Branch Tracking is %s.\n", ibt_fatal ? "enforced" : "warning only");
> [...] > +bool ibt_selftest(void) > +{ > + unsigned long ret; > + > + asm ("1: lea 2f(%%rip), %%rax\n\t" > + ANNOTATE_RETPOLINE_SAFE > + " jmp *%%rax\n\t" > + ASM_REACHABLE > + ANNOTATE_NOENDBR > + "2: nop\n\t" > + > + /* unsigned ibt_selftest_ip = 2b */ > + ".pushsection .rodata,\"a\"\n\t" > + ".align 8\n\t" > + ".type ibt_selftest_ip, @object\n\t" > + ".size ibt_selftest_ip, 8\n\t" > + "ibt_selftest_ip:\n\t" > + ".quad 2b\n\t" > + ".popsection\n\t" > + > + : "=a" (ret) : : "memory"); > + > + return !ret; > +}
I did something like this for LKDTM, but I realize it depends on having no frame pointer, and is likely x86-specific too, as I think arm64's function preamble is responsible for pushing the return address on the stack:
static volatile int lkdtm_just_count;
/* Function taking one argument, returning int. */ static noinline void *lkdtm_just_return(void) { /* Do something after preamble but before label. */ lkdtm_just_count++;
yolo: { void *right_here = &&yolo;
OPTIMIZER_HIDE_VAR(right_here); return right_here; } } /* * This tries to call an indirect function in the middle. */ void lkdtm_CFI_FORWARD_ENTRY(void) { /* * Matches lkdtm_increment_void()'s prototype, but not * lkdtm_increment_int()'s prototype. */ void * (*func)(void);
func = lkdtm_just_return; pr_info("Calling actual function entry point %px ...\n", func); func = func();
pr_info("Calling middle of function %px ...\n", func); func = func();
pr_err("FAIL: survived non-entry point call!\n"); #ifdef CONFIG_X86 pr_expected_config(CONFIG_X86_BTI); #endif }
-- Kees Cook
| |