lkml.org 
[lkml]   [2018]   [Jul]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 08/10] kvm: selftests: add test for nested state save/restore
    Date
    Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
    ---
    tools/testing/selftests/kvm/include/vmx.h | 32 +++++++++++++
    tools/testing/selftests/kvm/lib/x86.c | 27 ++++++++++-
    tools/testing/selftests/kvm/state_test.c | 75 ++++++++++++++++++++++++++++++-
    3 files changed, 131 insertions(+), 3 deletions(-)

    diff --git a/tools/testing/selftests/kvm/include/vmx.h b/tools/testing/selftests/kvm/include/vmx.h
    index 9caaf56696a4..2b37f9b6f7df 100644
    --- a/tools/testing/selftests/kvm/include/vmx.h
    +++ b/tools/testing/selftests/kvm/include/vmx.h
    @@ -380,6 +380,30 @@ static inline int vmptrld(uint64_t vmcs_pa)
    return ret;
    }

    +static inline int vmptrst(uint64_t *value)
    +{
    + uint64_t tmp;
    + uint8_t ret;
    +
    + __asm__ __volatile__("vmptrst %[value]; setna %[ret]"
    + : [value]"=m"(tmp), [ret]"=rm"(ret)
    + : : "cc", "memory");
    +
    + *value = tmp;
    + return ret;
    +}
    +
    +/*
    + * A wrapper around vmptrst that ignores errors and returns zero if the
    + * vmptrst instruction fails.
    + */
    +static inline uint64_t vmptrstz(void)
    +{
    + uint64_t value = 0;
    + vmptrst(&value);
    + return value;
    +}
    +
    /*
    * No guest state (e.g. GPRs) is established by this vmlaunch.
    */
    @@ -444,6 +468,15 @@ static inline int vmresume(void)
    return ret;
    }

    +static inline void vmcall(void)
    +{
    + /* Currently, L1 destroys our GPRs during vmexits. */
    + __asm__ __volatile__("push %%rbp; vmcall; pop %%rbp" : : :
    + "rax", "rbx", "rcx", "rdx",
    + "rsi", "rdi", "r8", "r9", "r10", "r11", "r12",
    + "r13", "r14", "r15");
    +}
    +
    static inline int vmread(uint64_t encoding, uint64_t *value)
    {
    uint64_t tmp;
    diff --git a/tools/testing/selftests/kvm/lib/x86.c b/tools/testing/selftests/kvm/lib/x86.c
    index 78aabcb91de1..e38345252df5 100644
    --- a/tools/testing/selftests/kvm/lib/x86.c
    +++ b/tools/testing/selftests/kvm/lib/x86.c
    @@ -736,6 +736,10 @@ struct kvm_x86_state {
    struct kvm_xcrs xcrs;
    struct kvm_sregs sregs;
    struct kvm_debugregs debugregs;
    + union {
    + struct kvm_nested_state nested;
    + char nested_[16384];
    + };
    struct kvm_msrs msrs;
    };

    @@ -764,6 +762,14 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
    struct kvm_msr_list *list;
    struct kvm_x86_state *state;
    int nmsrs, r, i;
    + static int nested_size = -1;
    +
    + if (nested_size == -1) {
    + nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE);
    + TEST_ASSERT(nested_size <= sizeof(state->nested_),
    + "Nested state size too big, %i > %zi",
    + nested_size, sizeof(state->nested_));
    + }

    nmsrs = kvm_get_num_msrs(vm);
    list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
    @@ -797,6 +803,17 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
    TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
    r);

    + if (nested_size) {
    + state->nested.size = sizeof(state->nested_);
    + r = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, &state->nested);
    + TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_NESTED_STATE, r: %i",
    + r);
    + TEST_ASSERT(state->nested.size <= nested_size,
    + "Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
    + state->nested.size, nested_size);
    + } else
    + state->nested.size = 0;
    +
    state->msrs.nmsrs = nmsrs;
    for (i = 0; i < nmsrs; i++)
    state->msrs.entries[i].index = list->indices[i];
    @@ -817,6 +834,12 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
    struct vcpu *vcpu = vcpu_find(vm, vcpuid);
    int r;

    + if (state->nested.size) {
    + r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
    + TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
    + r);
    + }
    +
    r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
    TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
    r);
    diff --git a/tools/testing/selftests/kvm/state_test.c b/tools/testing/selftests/kvm/state_test.c
    index e47b27b5406e..447da4f76b56 100644
    --- a/tools/testing/selftests/kvm/state_test.c
    +++ b/tools/testing/selftests/kvm/state_test.c
    @@ -18,6 +18,7 @@

    #include "kvm_util.h"
    #include "x86.h"
    +#include "vmx.h"

    #define VCPU_ID 5
    #define PORT_SYNC 0x1000
    @@ -45,16 +46,75 @@ static inline void __exit_to_l0(uint16_t port, uint64_t arg0, uint64_t arg1)

    static bool have_nested_state;

    -void guest_code(void)
    +void l2_guest_code(void)
    +{
    + GUEST_SYNC(5);
    +
    + /* Exit to L1 */
    + vmcall();
    +
    + GUEST_SYNC(7);
    +
    + /* Done, exit to L1 and never come back. */
    + vmcall();
    +}
    +
    +void l1_guest_code(struct vmx_pages *vmx_pages)
    +{
    +#define L2_GUEST_STACK_SIZE 64
    + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
    +
    + GUEST_ASSERT(vmx_pages->vmcs_gpa);
    + GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
    + GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
    +
    + GUEST_SYNC(3);
    + GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
    +
    + prepare_vmcs(vmx_pages, l2_guest_code,
    + &l2_guest_stack[L2_GUEST_STACK_SIZE]);
    +
    + GUEST_SYNC(4);
    + GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
    + GUEST_ASSERT(!vmlaunch());
    + GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
    + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
    +
    + /* Check that the launched state is preserved. */
    + GUEST_ASSERT(vmlaunch());
    +
    + GUEST_ASSERT(!vmresume());
    + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
    +
    + GUEST_SYNC(6);
    + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
    +
    + GUEST_ASSERT(!vmresume());
    + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
    +
    + vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3);
    +
    + GUEST_ASSERT(!vmresume());
    + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
    + GUEST_SYNC(8);
    +}
    +
    +void guest_code(struct vmx_pages *vmx_pages)
    {
    GUEST_SYNC(1);
    GUEST_SYNC(2);

    + if (vmx_pages)
    + l1_guest_code(vmx_pages);
    +
    exit_to_l0(PORT_DONE, 0, 0);
    }

    int main(int argc, char *argv[])
    {
    + struct vmx_pages *vmx_pages = NULL;
    + vm_vaddr_t vmx_pages_gva = 0;
    +
    struct kvm_regs regs1, regs2;
    struct kvm_vm *vm;
    struct kvm_run *run;
    @@ -69,6 +133,15 @@ int main(int argc, char *argv[])
    run = vcpu_state(vm, VCPU_ID);

    vcpu_regs_get(vm, VCPU_ID, &regs1);
    +
    + if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
    + vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
    + vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
    + } else {
    + printf("will skip nested state checks\n");
    + vcpu_args_set(vm, VCPU_ID, 1, 0);
    + }
    +
    for (stage = 1;; stage++) {
    _vcpu_run(vm, VCPU_ID);
    TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
    --
    1.8.3.1
    \
     
     \ /
      Last update: 2018-07-29 01:11    [W:4.081 / U:0.188 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site