Messages in this thread Patch in this message | | | Date | Tue, 19 Jan 2016 14:57:14 +0100 | From | Borislav Petkov <> | Subject | Re: [PATCH] x86: static_cpu_has_safe: discard dynamic check after init |
| |
On Tue, Jan 19, 2016 at 12:25:47AM +0100, Borislav Petkov wrote: > Luckily, I have this disassembler tool which dumps the alternatives > sections in a more readable format. I can dump all the static_cpu_has() > call sites tomorrow and we can see what gcc generates.
Ok, below is the list of all 12(!) places where static_cpu_has() has generated a 2-byte JMP. Which has saved us a whopping 36 bytes! On an x86_64 allyesconfig!
Patch removing it below. I better look at a 32-bit allyesconfig too first, though.
old insn VA: 0xffffffff8146b5ac, CPU feat: X86_FEATURE_PCOMMIT, size: 2, padlen: 0 wmb_pmem: ffffffff8146b5ac: eb 02 jmp ffffffff8146b5b0 repl insn: 0xffffffff8ca7c651, size: 0
old insn VA: 0xffffffff8200dcd6, CPU feat: X86_FEATURE_PCOMMIT, size: 2, padlen: 0 arch_has_wmb_pmem: ffffffff8200dcd6: eb 02 jmp ffffffff8200dcda repl insn: 0xffffffff8ca7ebd8, size: 0
old insn VA: 0xffffffff828afe51, CPU feat: X86_FEATURE_PCOMMIT, size: 2, padlen: 0 arch_has_wmb_pmem: ffffffff828afe51: eb 02 jmp ffffffff828afe55 repl insn: 0xffffffff8ca80f98, size: 0
old insn VA: 0xffffffff81072f77, CPU feat: X86_FEATURE_NRIPS, size: 2, padlen: 0 rdpmc_interception: ffffffff81072f77: eb 4d jmp ffffffff81072fc6 repl insn: 0xffffffff8ca79f6b, size: 0
old insn VA: 0xffffffff8107437d, CPU feat: X86_FEATURE_NRIPS, size: 2, padlen: 0 svm_queue_exception: ffffffff8107437d: eb 6f jmp ffffffff810743ee repl insn: 0xffffffff8ca79f9f, size: 0
old insn VA: 0xffffffff8107741b, CPU feat: X86_FEATURE_NRIPS, size: 2, padlen: 0 svm_check_intercept: ffffffff8107741b: eb 67 jmp ffffffff81077484 repl insn: 0xffffffff8ca79fed, size: 0
old insn VA: 0xffffffff8107741b, CPU feat: X86_FEATURE_NRIPS, size: 2, padlen: 0 svm_check_intercept: ffffffff8107741b: eb 67 jmp ffffffff81077484 repl insn: 0xffffffff8ca79fed, size: 0
old insn VA: 0xffffffff81075c4f, CPU feat: X86_FEATURE_TSCRATEMSR, size: 2, padlen: 0 svm_hardware_enable: ffffffff81075c4f: eb 57 jmp ffffffff81075ca8 repl insn: 0xffffffff8ca79fb9, size: 0
old insn VA: 0xffffffff81072c00, CPU feat: X86_FEATURE_DECODEASSISTS, size: 2, padlen: 0 invlpg_interception: ffffffff81072c00: eb 55 jmp ffffffff81072c57 repl insn: 0xffffffff8ca79f51, size: 0
old insn VA: 0xffffffff8107097c, CPU feat: X86_FEATURE_FLUSHBYASID, size: 2, padlen: 0 svm_flush_tlb: ffffffff8107097c: eb 35 jmp ffffffff810709b3 repl insn: 0xffffffff8ca79ee9, size: 0
old insn VA: 0xffffffff8108c0d0, CPU feat: X86_BUG_SYSRET_SS_ATTRS, size: 2, padlen: 0 __switch_to: ffffffff8108c0d0: eb 70 jmp ffffffff8108c142 repl insn: 0xffffffff8ca7a1a7, size: 0
old insn VA: 0xffffffff81075d90, CPU feat: X86_BUG_AMD_TLB_MMATCH, size: 2, padlen: 0 svm_hardware_enable: ffffffff81075d90: eb 7c jmp ffffffff81075e0e repl insn: 0xffffffff8ca79fd3, size: 0
--- diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 9b18ed9..68a2d1f 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -350,16 +350,6 @@ config DEBUG_IMR_SELFTEST If unsure say N here. -config X86_DEBUG_STATIC_CPU_HAS - bool "Debug alternatives" - depends on DEBUG_KERNEL - ---help--- - This option causes additional code to be generated which - fails if static_cpu_has() is used before alternatives have - run. - - If unsure, say N. - config X86_DEBUG_FPU bool "Debug the x86 FPU code" depends on DEBUG_KERNEL diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 0366374..c2d7a97 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -477,7 +477,7 @@ __visible long do_fast_syscall_32(struct pt_regs *regs) * We don't allow syscalls at all from VM86 mode, but we still * need to check VM, because we might be returning from sys_vm86. */ - return static_cpu_has(X86_FEATURE_SEP) && + return static_cpu_has_safe(X86_FEATURE_SEP) && regs->cs == __USER_CS && regs->ss == __USER_DS && regs->ip == landing_pad && (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0; diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h index 69f1366..2fb511b 100644 --- a/arch/x86/include/asm/archrandom.h +++ b/arch/x86/include/asm/archrandom.h @@ -114,8 +114,8 @@ GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4); #endif /* CONFIG_X86_64 */ -#define arch_has_random() static_cpu_has(X86_FEATURE_RDRAND) -#define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED) +#define arch_has_random() static_cpu_has_safe(X86_FEATURE_RDRAND) +#define arch_has_random_seed() static_cpu_has_safe(X86_FEATURE_RDSEED) #else diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 7ad8c94..5fe399a 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -419,89 +419,6 @@ extern bool __static_cpu_has_safe(u16 bit); * These are only valid after alternatives have run, but will statically * patch the target code for additional performance. */ -static __always_inline __pure bool __static_cpu_has(u16 bit) -{ -#ifdef CC_HAVE_ASM_GOTO - -#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS - - /* - * Catch too early usage of this before alternatives - * have run. - */ - asm_volatile_goto("1: jmp %l[t_warn]\n" - "2:\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" - " .long 0\n" /* no replacement */ - " .word %P0\n" /* 1: do replace */ - " .byte 2b - 1b\n" /* source len */ - " .byte 0\n" /* replacement len */ - " .byte 0\n" /* pad len */ - ".previous\n" - /* skipping size check since replacement size = 0 */ - : : "i" (X86_FEATURE_ALWAYS) : : t_warn); - -#endif - - asm_volatile_goto("1: jmp %l[t_no]\n" - "2:\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" - " .long 0\n" /* no replacement */ - " .word %P0\n" /* feature bit */ - " .byte 2b - 1b\n" /* source len */ - " .byte 0\n" /* replacement len */ - " .byte 0\n" /* pad len */ - ".previous\n" - /* skipping size check since replacement size = 0 */ - : : "i" (bit) : : t_no); - return true; - t_no: - return false; - -#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS - t_warn: - warn_pre_alternatives(); - return false; -#endif - -#else /* CC_HAVE_ASM_GOTO */ - - u8 flag; - /* Open-coded due to __stringify() in ALTERNATIVE() */ - asm volatile("1: movb $0,%0\n" - "2:\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" - " .long 3f - .\n" - " .word %P1\n" /* feature bit */ - " .byte 2b - 1b\n" /* source len */ - " .byte 4f - 3f\n" /* replacement len */ - " .byte 0\n" /* pad len */ - ".previous\n" - ".section .discard,\"aw\",@progbits\n" - " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */ - ".previous\n" - ".section .altinstr_replacement,\"ax\"\n" - "3: movb $1,%0\n" - "4:\n" - ".previous\n" - : "=qm" (flag) : "i" (bit)); - return flag; - -#endif /* CC_HAVE_ASM_GOTO */ -} - -#define static_cpu_has(bit) \ -( \ - __builtin_constant_p(boot_cpu_has(bit)) ? \ - boot_cpu_has(bit) : \ - __builtin_constant_p(bit) ? \ - __static_cpu_has(bit) : \ - boot_cpu_has(bit) \ -) - static __always_inline __pure bool _static_cpu_has_safe(u16 bit) { #ifdef CC_HAVE_ASM_GOTO @@ -588,7 +505,6 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) /* * gcc 3.x is too stupid to do the static test; fall back to dynamic. */ -#define static_cpu_has(bit) boot_cpu_has(bit) #define static_cpu_has_safe(bit) boot_cpu_has(bit) #endif @@ -596,7 +512,6 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) #define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit)) -#define static_cpu_has_bug(bit) static_cpu_has((bit)) #define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit)) #define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit)) diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index c70689b..2bd6e47 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -96,7 +96,7 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx) static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) { if (!current_set_polling_and_test()) { - if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) { + if (static_cpu_has_bug_safe(X86_BUG_CLFLUSH_MONITOR)) { mb(); clflush((void *)¤t_thread_info()->flags); mb(); diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index 1544fab..5d7abb4 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h @@ -142,7 +142,7 @@ static inline bool __arch_has_wmb_pmem(void) * We require that wmb() be an 'sfence', that is only guaranteed on * 64-bit builds */ - return static_cpu_has(X86_FEATURE_PCOMMIT); + return static_cpu_has_safe(X86_FEATURE_PCOMMIT); } #endif /* CONFIG_ARCH_HAS_PMEM_API */ #endif /* __ASM_X86_PMEM_H__ */ diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index eaba080..f456616 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -43,7 +43,7 @@ static inline void queued_spin_unlock(struct qspinlock *lock) #define virt_spin_lock virt_spin_lock static inline bool virt_spin_lock(struct qspinlock *lock) { - if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) + if (!static_cpu_has_safe(X86_FEATURE_HYPERVISOR)) return false; /* diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 18ca99f..f8a6cfb 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -32,11 +32,11 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) "fpu_exception\t: %s\n" "cpuid level\t: %d\n" "wp\t\t: %s\n", - static_cpu_has_bug(X86_BUG_FDIV) ? "yes" : "no", - static_cpu_has_bug(X86_BUG_F00F) ? "yes" : "no", - static_cpu_has_bug(X86_BUG_COMA) ? "yes" : "no", - static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no", - static_cpu_has(X86_FEATURE_FPU) ? "yes" : "no", + static_cpu_has_bug_safe(X86_BUG_FDIV) ? "yes" : "no", + static_cpu_has_bug_safe(X86_BUG_F00F) ? "yes" : "no", + static_cpu_has_bug_safe(X86_BUG_COMA) ? "yes" : "no", + static_cpu_has_safe(X86_FEATURE_FPU) ? "yes" : "no", + static_cpu_has_safe(X86_FEATURE_FPU) ? "yes" : "no", c->cpuid_level, c->wp_works_ok ? "yes" : "no"); } diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c index 0bc3490..9b0163c 100644 --- a/arch/x86/kernel/fpu/regset.c +++ b/arch/x86/kernel/fpu/regset.c @@ -275,7 +275,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset, fpu__activate_fpstate_read(fpu); - if (!static_cpu_has(X86_FEATURE_FPU)) + if (!static_cpu_has_safe(X86_FEATURE_FPU)) return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf); if (!cpu_has_fxsr) @@ -306,7 +306,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, fpu__activate_fpstate_write(fpu); fpstate_sanitize_xstate(fpu); - if (!static_cpu_has(X86_FEATURE_FPU)) + if (!static_cpu_has_safe(X86_FEATURE_FPU)) return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf); if (!cpu_has_fxsr) diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 31c6a60..05b6ede 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c @@ -162,7 +162,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size) if (!access_ok(VERIFY_WRITE, buf, size)) return -EACCES; - if (!static_cpu_has(X86_FEATURE_FPU)) + if (!static_cpu_has_safe(X86_FEATURE_FPU)) return fpregs_soft_get(current, NULL, 0, sizeof(struct user_i387_ia32_struct), NULL, (struct _fpstate_32 __user *) buf) ? -1 : 1; @@ -267,7 +267,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) fpu__activate_curr(fpu); - if (!static_cpu_has(X86_FEATURE_FPU)) + if (!static_cpu_has_safe(X86_FEATURE_FPU)) return fpregs_soft_set(current, NULL, 0, sizeof(struct user_i387_ia32_struct), NULL, buf) != 0; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index b9d99e0..c49a284 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -411,7 +411,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) __switch_to_xtra(prev_p, next_p, tss); - if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) { + if (static_cpu_has_bug_safe(X86_BUG_SYSRET_SS_ATTRS)) { /* * AMD CPUs have a misfeature: SYSRET sets the SS selector but * does not update the cached descriptor. As a result, if we diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index c8eda14..89d5ad7 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -32,7 +32,7 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; - if (!static_cpu_has(X86_FEATURE_XSAVE)) + if (!static_cpu_has_safe(X86_FEATURE_XSAVE)) return false; best = kvm_find_cpuid_entry(vcpu, 1, 0); diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index c13a64b..1892bdd 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -516,7 +516,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); if (svm->vmcb->control.next_rip != 0) { - WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS)); + WARN_ON_ONCE(!static_cpu_has_safe(X86_FEATURE_NRIPS)); svm->next_rip = svm->vmcb->control.next_rip; } @@ -548,7 +548,7 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, nested_svm_check_exception(svm, nr, has_error_code, error_code)) return; - if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) { + if (nr == BP_VECTOR && !static_cpu_has_safe(X86_FEATURE_NRIPS)) { unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); /* @@ -577,7 +577,7 @@ static void svm_init_erratum_383(void) int err; u64 val; - if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH)) + if (!static_cpu_has_bug_safe(X86_BUG_AMD_TLB_MMATCH)) return; /* Use _safe variants to not break nested virtualization */ @@ -631,7 +631,7 @@ static int has_svm(void) static void svm_hardware_disable(void) { /* Make sure we clean up behind us */ - if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) + if (static_cpu_has_safe(X86_FEATURE_TSCRATEMSR)) wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); cpu_svm_disable(); @@ -674,7 +674,7 @@ static int svm_hardware_enable(void) wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT); - if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { + if (static_cpu_has_safe(X86_FEATURE_TSCRATEMSR)) { wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT); __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT); } @@ -1233,7 +1233,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); - if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) { + if (static_cpu_has_safe(X86_FEATURE_TSCRATEMSR)) { u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { __this_cpu_write(current_tsc_ratio, tsc_ratio); @@ -1241,7 +1241,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) } } /* This assumes that the kernel never uses MSR_TSC_AUX */ - if (static_cpu_has(X86_FEATURE_RDTSCP)) + if (static_cpu_has_safe(X86_FEATURE_RDTSCP)) wrmsrl(MSR_TSC_AUX, svm->tsc_aux); } @@ -2806,7 +2806,7 @@ static int iret_interception(struct vcpu_svm *svm) static int invlpg_interception(struct vcpu_svm *svm) { - if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) + if (!static_cpu_has_safe(X86_FEATURE_DECODEASSISTS)) return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); @@ -2823,7 +2823,7 @@ static int rdpmc_interception(struct vcpu_svm *svm) { int err; - if (!static_cpu_has(X86_FEATURE_NRIPS)) + if (!static_cpu_has_safe(X86_FEATURE_NRIPS)) return emulate_on_interception(svm); err = kvm_rdpmc(&svm->vcpu); @@ -2864,7 +2864,7 @@ static int cr_interception(struct vcpu_svm *svm) unsigned long val; int err; - if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) + if (!static_cpu_has_safe(X86_FEATURE_DECODEASSISTS)) return emulate_on_interception(svm); if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) @@ -3710,7 +3710,7 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu) { struct vcpu_svm *svm = to_svm(vcpu); - if (static_cpu_has(X86_FEATURE_FLUSHBYASID)) + if (static_cpu_has_safe(X86_FEATURE_FLUSHBYASID)) svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; else svm->asid_generation--; @@ -4282,7 +4282,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, } /* TODO: Advertise NRIPS to guest hypervisor unconditionally */ - if (static_cpu_has(X86_FEATURE_NRIPS)) + if (static_cpu_has_safe(X86_FEATURE_NRIPS)) vmcb->control.next_rip = info->next_rip; vmcb->control.exit_code = icpt_info.exit_code; vmexit = nested_svm_exit_handled(svm); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e2951b6..4bef603 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8993,7 +8993,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) if (cpu_has_secondary_exec_ctrls()) vmcs_set_secondary_exec_control(secondary_exec_ctl); - if (static_cpu_has(X86_FEATURE_PCOMMIT) && nested) { + if (static_cpu_has_safe(X86_FEATURE_PCOMMIT) && nested) { if (guest_cpuid_has_pcommit(vcpu)) vmx->nested.nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_PCOMMIT; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index eef44d9..711e9bc 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1033,7 +1033,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs) if (!IS_ENABLED(CONFIG_X86_SMAP)) return false; - if (!static_cpu_has(X86_FEATURE_SMAP)) + if (!static_cpu_has_safe(X86_FEATURE_SMAP)) return false; if (error_code & PF_USER) diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c index 55d38cf..191650f 100644 --- a/arch/x86/ras/mce_amd_inj.c +++ b/arch/x86/ras/mce_amd_inj.c @@ -275,7 +275,7 @@ static void do_inject(void) * only on the node base core. Refer to D18F3x44[NbMcaToMstCpuEn] for * Fam10h and later BKDGs. */ - if (static_cpu_has(X86_FEATURE_AMD_DCM) && b == 4) { + if (static_cpu_has_safe(X86_FEATURE_AMD_DCM) && b == 4) { toggle_nb_mca_mst_cpu(amd_get_nb_id(cpu)); cpu = get_nbc_for_node(amd_get_nb_id(cpu)); } diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c index f6b79ab..87b0cf4 100644 --- a/drivers/cpufreq/amd_freq_sensitivity.c +++ b/drivers/cpufreq/amd_freq_sensitivity.c @@ -115,7 +115,7 @@ static int __init amd_freq_sensitivity_init(void) if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) return -ENODEV; - if (!static_cpu_has(X86_FEATURE_PROC_FEEDBACK)) + if (!static_cpu_has_safe(X86_FEATURE_PROC_FEEDBACK)) return -ENODEV; if (rdmsrl_safe(MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, &val)) diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 0b5bf13..aa71612 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1193,7 +1193,7 @@ static int powernowk8_init(void) unsigned int i, supported_cpus = 0; int ret; - if (static_cpu_has(X86_FEATURE_HW_PSTATE)) { + if (static_cpu_has_safe(X86_FEATURE_HW_PSTATE)) { __request_acpi_cpufreq(); return -ENODEV; }
-- Regards/Gruss, Boris.
SUSE Linux GmbH, GF: Felix Imendörffer, Jane Smithard, Graham Norton, HRB 21284 (AG Nürnberg) --
| |