lkml.org 
[lkml]   [2017]   [Oct]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH RFC 07/10] KVM: VMX: Introduce ioctls to set/get Sub-Page Write Protection.
    Date
    From: Zhang Yi Z <yi.z.zhang@linux.intel.com>

    We introduced 2 ioctls to let user application to set/get subpage write
    protection bitmap per gfn, each gfn corresponds to a bitmap.

    The user application, qemu, or some other security control daemon. will
    set the protection bitmap via this ioctl.

    the API defined as:

    struct kvm_subpage {
    __u64 base_gfn;
    __u64 npages;
    /* sub-page write-access bitmap array */
    __u32 access_map[SUBPAGE_MAX_BITMAP];
    }sp;

    kvm_vm_ioctl(s, KVM_SUBPAGES_SET_ACCESS, &sp)
    kvm_vm_ioctl(s, KVM_SUBPAGES_GET_ACCESS, &sp)

    Signed-off-by: Zhang Yi Z <yi.z.zhang@linux.intel.com>
    Signed-off-by: He Chen <he.chen@linux.intel.com>
    ---
    arch/x86/include/asm/kvm_host.h | 8 ++++
    arch/x86/kvm/mmu.c | 49 ++++++++++++++++++++
    arch/x86/kvm/vmx.c | 19 ++++++++
    arch/x86/kvm/x86.c | 99 ++++++++++++++++++++++++++++++++++++++++-
    include/linux/kvm_host.h | 5 +++
    include/uapi/linux/kvm.h | 11 +++++
    virt/kvm/kvm_main.c | 26 +++++++++++
    7 files changed, 216 insertions(+), 1 deletion(-)

    diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
    index 5e8fdda..763cd7e 100644
    --- a/arch/x86/include/asm/kvm_host.h
    +++ b/arch/x86/include/asm/kvm_host.h
    @@ -345,6 +345,8 @@ struct kvm_mmu {
    void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
    void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
    u64 *spte, const void *pte);
    + int (*get_subpages)(struct kvm *kvm, struct kvm_subpage *spp_info);
    + int (*set_subpages)(struct kvm *kvm, struct kvm_subpage *spp_info);
    hpa_t root_hpa;
    hpa_t sppt_root;
    union kvm_mmu_page_role base_role;
    @@ -703,6 +705,7 @@ struct kvm_lpage_info {

    struct kvm_arch_memory_slot {
    struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
    + u32 *subpage_wp_info;
    struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
    unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
    };
    @@ -1063,6 +1066,8 @@ struct kvm_x86_ops {
    void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);

    void (*setup_mce)(struct kvm_vcpu *vcpu);
    + int (*get_subpages)(struct kvm *kvm, struct kvm_subpage *spp_info);
    + int (*set_subpages)(struct kvm *kvm, struct kvm_subpage *spp_info);
    };

    struct kvm_arch_async_pf {
    @@ -1254,6 +1259,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code,
    void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
    void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu);

    +int kvm_mmu_get_subpages(struct kvm *kvm, struct kvm_subpage *spp_info);
    +int kvm_mmu_set_subpages(struct kvm *kvm, struct kvm_subpage *spp_info);
    +
    void kvm_enable_tdp(void);
    void kvm_disable_tdp(void);

    diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
    index 1fbe467..6c92d19 100644
    --- a/arch/x86/kvm/mmu.c
    +++ b/arch/x86/kvm/mmu.c
    @@ -1342,6 +1342,15 @@ static u64 *rmap_get_next(struct rmap_iterator *iter)
    return sptep;
    }

    +static u32 *gfn_to_subpage_wp_info(struct kvm_memory_slot *slot,
    + gfn_t gfn)
    +{
    + unsigned long idx;
    +
    + idx = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
    + return &slot->arch.subpage_wp_info[idx];
    +}
    +
    #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \
    for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \
    _spte_; _spte_ = rmap_get_next(_iter_))
    @@ -3971,6 +3980,44 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
    return 0;
    }

    +int kvm_mmu_get_subpages(struct kvm *kvm, struct kvm_subpage *spp_info)
    +{
    + u32 *access = spp_info->access_map;
    + gfn_t gfn = spp_info->base_gfn;
    + int npages = spp_info->npages;
    + struct kvm_memory_slot *slot;
    + int i;
    +
    + for (i = 0; i < npages; i++, gfn++) {
    + slot = gfn_to_memslot(kvm, gfn);
    + if (!slot)
    + return -EFAULT;
    + access[i] = *gfn_to_subpage_wp_info(slot, gfn);
    + }
    +
    + return i;
    +}
    +
    +int kvm_mmu_set_subpages(struct kvm *kvm, struct kvm_subpage *spp_info)
    +{
    + u32 access = spp_info->access_map[0];
    + gfn_t gfn = spp_info->base_gfn;
    + int npages = spp_info->npages;
    + struct kvm_memory_slot *slot;
    + u32 *wp_map;
    + int i;
    +
    + for (i = 0; i < npages; i++, gfn++) {
    + slot = gfn_to_memslot(kvm, gfn);
    + if (!slot)
    + return -EFAULT;
    + wp_map = gfn_to_subpage_wp_info(slot, gfn);
    + *wp_map = access;
    + }
    +
    + return i;
    +}
    +
    static void nonpaging_init_context(struct kvm_vcpu *vcpu,
    struct kvm_mmu *context)
    {
    @@ -4523,6 +4570,8 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
    context->get_cr3 = get_cr3;
    context->get_pdptr = kvm_pdptr_read;
    context->inject_page_fault = kvm_inject_page_fault;
    + context->get_subpages = kvm_x86_ops->get_subpages;
    + context->set_subpages = kvm_x86_ops->set_subpages;

    if (!is_paging(vcpu)) {
    context->nx = false;
    diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
    index fa4f548..9116b53 100644
    --- a/arch/x86/kvm/vmx.c
    +++ b/arch/x86/kvm/vmx.c
    @@ -6879,6 +6879,11 @@ static __init int hardware_setup(void)
    kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
    }

    + if (!enable_ept_spp) {
    + kvm_x86_ops->get_subpages = NULL;
    + kvm_x86_ops->set_subpages = NULL;
    + }
    +
    if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
    u64 vmx_msr;

    @@ -12014,6 +12019,18 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
    ~FEATURE_CONTROL_LMCE;
    }

    +static int vmx_get_subpages(struct kvm *kvm,
    + struct kvm_subpage *spp_info)
    +{
    + return kvm_get_subpages(kvm, spp_info);
    +}
    +
    +static int vmx_set_subpages(struct kvm *kvm,
    + struct kvm_subpage *spp_info)
    +{
    + return kvm_set_subpages(kvm, spp_info);
    +}
    +
    static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
    .cpu_has_kvm_support = cpu_has_kvm_support,
    .disabled_by_bios = vmx_disabled_by_bios,
    @@ -12139,6 +12156,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
    #endif

    .setup_mce = vmx_setup_mce,
    + .get_subpages = vmx_get_subpages,
    + .set_subpages = vmx_set_subpages,
    };

    static int __init vmx_init(void)
    diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
    index cd17b7d..9c6fc52 100644
    --- a/arch/x86/kvm/x86.c
    +++ b/arch/x86/kvm/x86.c
    @@ -4010,6 +4010,18 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
    return r;
    }

    +static int kvm_vm_ioctl_get_subpages(struct kvm *kvm,
    + struct kvm_subpage *spp_info)
    +{
    + return kvm_arch_get_subpages(kvm, spp_info);
    +}
    +
    +static int kvm_vm_ioctl_set_subpages(struct kvm *kvm,
    + struct kvm_subpage *spp_info)
    +{
    + return kvm_arch_set_subpages(kvm, spp_info);
    +}
    +
    long kvm_arch_vm_ioctl(struct file *filp,
    unsigned int ioctl, unsigned long arg)
    {
    @@ -4270,6 +4282,40 @@ long kvm_arch_vm_ioctl(struct file *filp,
    r = kvm_vm_ioctl_enable_cap(kvm, &cap);
    break;
    }
    + case KVM_SUBPAGES_GET_ACCESS: {
    + struct kvm_subpage spp_info;
    +
    + r = -EFAULT;
    + if (copy_from_user(&spp_info, argp, sizeof(spp_info)))
    + goto out;
    +
    + r = -EINVAL;
    + if (spp_info.npages == 0 ||
    + spp_info.npages > SUBPAGE_MAX_BITMAP)
    + goto out;
    +
    + r = kvm_vm_ioctl_get_subpages(kvm, &spp_info);
    + if (copy_to_user(argp, &spp_info, sizeof(spp_info))) {
    + r = -EFAULT;
    + goto out;
    + }
    + break;
    + }
    + case KVM_SUBPAGES_SET_ACCESS: {
    + struct kvm_subpage spp_info;
    +
    + r = -EFAULT;
    + if (copy_from_user(&spp_info, argp, sizeof(spp_info)))
    + goto out;
    +
    + r = -EINVAL;
    + if (spp_info.npages == 0 ||
    + spp_info.npages > SUBPAGE_MAX_BITMAP)
    + goto out;
    +
    + r = kvm_vm_ioctl_set_subpages(kvm, &spp_info);
    + break;
    + }
    default:
    r = -ENOTTY;
    }
    @@ -8240,6 +8286,34 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
    kvm_page_track_cleanup(kvm);
    }

    +int kvm_subpage_create_memslot(struct kvm_memory_slot *slot,
    + unsigned long npages)
    +{
    + int lpages;
    +
    + lpages = gfn_to_index(slot->base_gfn + npages - 1,
    + slot->base_gfn, 1) + 1;
    +
    + slot->arch.subpage_wp_info =
    + kvzalloc(lpages * sizeof(*slot->arch.subpage_wp_info),
    + GFP_KERNEL);
    +
    + if (!slot->arch.subpage_wp_info)
    + return -ENOMEM;
    +
    + return 0;
    +}
    +
    +void kvm_subpage_free_memslot(struct kvm_memory_slot *free,
    + struct kvm_memory_slot *dont)
    +{
    + if (!dont || free->arch.subpage_wp_info !=
    + dont->arch.subpage_wp_info) {
    + kvfree(free->arch.subpage_wp_info);
    + free->arch.subpage_wp_info = NULL;
    + }
    +}
    +
    void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
    struct kvm_memory_slot *dont)
    {
    @@ -8261,6 +8335,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
    }

    kvm_page_track_free_memslot(free, dont);
    + kvm_subpage_free_memslot(free, dont);
    }

    int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
    @@ -8312,8 +8387,12 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
    if (kvm_page_track_create_memslot(slot, npages))
    goto out_free;

    - return 0;
    + if (kvm_subpage_create_memslot(slot, npages))
    + goto out_free_page_track;

    + return 0;
    +out_free_page_track:
    + kvm_page_track_free_memslot(slot, NULL);
    out_free:
    for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
    kvfree(slot->arch.rmap[i]);
    @@ -8790,6 +8869,24 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
    return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
    }

    +int kvm_arch_get_subpages(struct kvm *kvm,
    + struct kvm_subpage *spp_info)
    +{
    + if (!kvm_x86_ops->get_subpages)
    + return -EINVAL;
    +
    + return kvm_x86_ops->get_subpages(kvm, spp_info);
    +}
    +
    +int kvm_arch_set_subpages(struct kvm *kvm,
    + struct kvm_subpage *spp_info)
    +{
    + if (!kvm_x86_ops->set_subpages)
    + return -EINVAL;
    +
    + return kvm_x86_ops->set_subpages(kvm, spp_info);
    +}
    +
    bool kvm_vector_hashing_enabled(void)
    {
    return vector_hashing;
    diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
    index 6882538..9f33a57 100644
    --- a/include/linux/kvm_host.h
    +++ b/include/linux/kvm_host.h
    @@ -803,6 +803,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
    bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
    int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);

    +int kvm_get_subpages(struct kvm *kvm, struct kvm_subpage *spp_info);
    +int kvm_set_subpages(struct kvm *kvm, struct kvm_subpage *spp_info);
    +int kvm_arch_get_subpages(struct kvm *kvm, struct kvm_subpage *spp_info);
    +int kvm_arch_set_subpages(struct kvm *kvm, struct kvm_subpage *spp_info);
    +
    #ifndef __KVM_HAVE_ARCH_VM_ALLOC
    static inline struct kvm *kvm_arch_alloc_vm(void)
    {
    diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
    index 0cd821e..fca4dc7 100644
    --- a/include/uapi/linux/kvm.h
    +++ b/include/uapi/linux/kvm.h
    @@ -101,6 +101,15 @@ struct kvm_userspace_memory_region {
    __u64 userspace_addr; /* start of the userspace allocated memory */
    };

    +/* for KVM_SUBPAGES_GET_ACCESS and KVM_SUBPAGES_SET_ACCESS */
    +#define SUBPAGE_MAX_BITMAP 128
    +struct kvm_subpage {
    + __u64 base_gfn;
    + __u64 npages;
    + /* sub-page write-access bitmap array */
    + __u32 access_map[SUBPAGE_MAX_BITMAP];
    +};
    +
    /*
    * The bit 0 ~ bit 15 of kvm_memory_region::flags are visible for userspace,
    * other bits are reserved for kvm internal use which are defined in
    @@ -1184,6 +1193,8 @@ struct kvm_vfio_spapr_tce {
    struct kvm_userspace_memory_region)
    #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
    #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64)
    +#define KVM_SUBPAGES_GET_ACCESS _IOR(KVMIO, 0x49, __u64)
    +#define KVM_SUBPAGES_SET_ACCESS _IOW(KVMIO, 0x4a, __u64)

    /* enable ucontrol for s390 */
    struct kvm_s390_ucas_mapping {
    diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
    index 9deb5a2..9a51ee4 100644
    --- a/virt/kvm/kvm_main.c
    +++ b/virt/kvm/kvm_main.c
    @@ -1104,6 +1104,32 @@ int kvm_get_dirty_log(struct kvm *kvm,
    }
    EXPORT_SYMBOL_GPL(kvm_get_dirty_log);

    +int kvm_get_subpages(struct kvm *kvm,
    + struct kvm_subpage *spp_info)
    +{
    + int ret;
    +
    + mutex_lock(&kvm->slots_lock);
    + ret = kvm_mmu_get_subpages(kvm, spp_info);
    + mutex_unlock(&kvm->slots_lock);
    +
    + return ret;
    +}
    +EXPORT_SYMBOL_GPL(kvm_get_subpages);
    +
    +int kvm_set_subpages(struct kvm *kvm,
    + struct kvm_subpage *spp_info)
    +{
    + int ret;
    +
    + mutex_lock(&kvm->slots_lock);
    + ret = kvm_mmu_set_subpages(kvm, spp_info);
    + mutex_unlock(&kvm->slots_lock);
    +
    + return ret;
    +}
    +EXPORT_SYMBOL_GPL(kvm_set_subpages);
    +
    #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
    /**
    * kvm_get_dirty_log_protect - get a snapshot of dirty pages, and if any pages
    --
    2.7.4
    \
     
     \ /
      Last update: 2017-10-13 16:30    [W:6.252 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site