lkml.org 
[lkml]   [2020]   [Feb]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
SubjectRe: [PATCH 45/61] KVM: x86: Fold CPUID 0x7 masking back into __do_cpuid_func()
Date
Sean Christopherson <sean.j.christopherson@intel.com> writes:

> Move the CPUID 0x7 masking back into __do_cpuid_func() now that the
> size of the code has been trimmed down significantly.
>
> Tweak the WARN case, which is impossible to hit unless the CPU is
> completely broken, to break the loop before creating the bogus entry.
>
> Opportunustically reorder the cpuid_entry_set() calls and shorten the
> comment about emulation to further reduce the footprint of CPUID 0x7.
>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
> arch/x86/kvm/cpuid.c | 62 ++++++++++++++++----------------------------
> 1 file changed, 22 insertions(+), 40 deletions(-)
>
> diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> index 77a6c1db138d..7362e5238799 100644
> --- a/arch/x86/kvm/cpuid.c
> +++ b/arch/x86/kvm/cpuid.c
> @@ -456,44 +456,6 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
> return 0;
> }
>
> -static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry)
> -{
> - switch (entry->index) {
> - case 0:
> - entry->eax = min(entry->eax, 1u);
> - cpuid_entry_mask(entry, CPUID_7_0_EBX);
> - /* TSC_ADJUST is emulated */
> - cpuid_entry_set(entry, X86_FEATURE_TSC_ADJUST);
> - cpuid_entry_mask(entry, CPUID_7_ECX);
> - cpuid_entry_mask(entry, CPUID_7_EDX);
> - if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
> - cpuid_entry_set(entry, X86_FEATURE_SPEC_CTRL);
> - if (boot_cpu_has(X86_FEATURE_STIBP))
> - cpuid_entry_set(entry, X86_FEATURE_INTEL_STIBP);
> - if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
> - cpuid_entry_set(entry, X86_FEATURE_SPEC_CTRL_SSBD);
> - /*
> - * We emulate ARCH_CAPABILITIES in software even
> - * if the host doesn't support it.
> - */
> - cpuid_entry_set(entry, X86_FEATURE_ARCH_CAPABILITIES);
> - break;
> - case 1:
> - cpuid_entry_mask(entry, CPUID_7_1_EAX);
> - entry->ebx = 0;
> - entry->ecx = 0;
> - entry->edx = 0;
> - break;
> - default:
> - WARN_ON_ONCE(1);
> - entry->eax = 0;
> - entry->ebx = 0;
> - entry->ecx = 0;
> - entry->edx = 0;
> - break;
> - }
> -}
> -
> static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
> {
> struct kvm_cpuid_entry2 *entry;
> @@ -555,14 +517,34 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
> break;
> /* function 7 has additional index. */
> case 7:
> - do_cpuid_7_mask(entry);
> + entry->eax = min(entry->eax, 1u);
> + cpuid_entry_mask(entry, CPUID_7_0_EBX);
> + cpuid_entry_mask(entry, CPUID_7_ECX);
> + cpuid_entry_mask(entry, CPUID_7_EDX);
> +
> + /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
> + cpuid_entry_set(entry, X86_FEATURE_TSC_ADJUST);
> + cpuid_entry_set(entry, X86_FEATURE_ARCH_CAPABILITIES);
> +
> + if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
> + cpuid_entry_set(entry, X86_FEATURE_SPEC_CTRL);
> + if (boot_cpu_has(X86_FEATURE_STIBP))
> + cpuid_entry_set(entry, X86_FEATURE_INTEL_STIBP);
> + if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
> + cpuid_entry_set(entry, X86_FEATURE_SPEC_CTRL_SSBD);
>
> for (i = 1, max_idx = entry->eax; i <= max_idx; i++) {
> + if (WARN_ON_ONCE(i > 1))
> + break;
> +
> entry = do_host_cpuid(array, function, i);
> if (!entry)
> goto out;
>
> - do_cpuid_7_mask(entry);
> + cpuid_entry_mask(entry, CPUID_7_1_EAX);
> + entry->ebx = 0;
> + entry->ecx = 0;
> + entry->edx = 0;
> }
> break;
> case 9:

Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>

--
Vitaly

\
 
 \ /
  Last update: 2020-02-24 23:23    [W:0.732 / U:0.276 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site