lkml.org 
[lkml]   [2020]   [Sep]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
Subject[PATCH 19/22] kvm: mmu: Support write protection for nesting in tdp MMU
From
To support nested virtualization, KVM will sometimes need to write
protect pages which are part of a shadowed paging structure or are not
writable in the shadowed paging structure. Add a function to write
protect GFN mappings for this purpose.

Tested by running kvm-unit-tests and KVM selftests on an Intel Haswell
machine. This series introduced no new failures.

This series can be viewed in Gerrit at:
https://linux-review.googlesource.com/c/virt/kvm/kvm/+/2538

Signed-off-by: Ben Gardon <bgardon@google.com>
---
arch/x86/kvm/mmu/mmu.c | 5 ++++
arch/x86/kvm/mmu/tdp_mmu.c | 57 ++++++++++++++++++++++++++++++++++++++
arch/x86/kvm/mmu/tdp_mmu.h | 3 ++
3 files changed, 65 insertions(+)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 12892fc4f146d..e6f5093ba8f6f 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1667,6 +1667,11 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
write_protected |= __rmap_write_protect(kvm, rmap_head, true);
}

+ if (kvm->arch.tdp_mmu_enabled)
+ write_protected =
+ kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn) ||
+ write_protected;
+
return write_protected;
}

diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index a2895119655ac..931cb469b1f2f 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1161,3 +1161,60 @@ void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
put_tdp_mmu_root(kvm, root);
}
}
+
+/*
+ * Removes write access on the last level SPTE mapping this GFN and unsets the
+ * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
+ * Returns true if an SPTE was set and a TLB flush is needed.
+ */
+static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
+ gfn_t gfn)
+{
+ struct tdp_iter iter;
+ u64 new_spte;
+ bool spte_set = false;
+ int as_id = kvm_mmu_page_as_id(root);
+
+ for_each_tdp_pte_root(iter, root, gfn, gfn + 1) {
+ if (!is_shadow_present_pte(iter.old_spte) ||
+ !is_last_spte(iter.old_spte, iter.level))
+ continue;
+
+ if (!is_writable_pte(iter.old_spte))
+ break;
+
+ new_spte = iter.old_spte &
+ ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
+
+ *iter.sptep = new_spte;
+ handle_changed_spte(kvm, as_id, iter.gfn, iter.old_spte,
+ new_spte, iter.level);
+ spte_set = true;
+ }
+
+ return spte_set;
+}
+
+/*
+ * Removes write access on the last level SPTE mapping this GFN and unsets the
+ * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
+ * Returns true if an SPTE was set and a TLB flush is needed.
+ */
+bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn)
+{
+ struct kvm_mmu_page *root;
+ int root_as_id;
+ bool spte_set = false;
+
+ lockdep_assert_held(&kvm->mmu_lock);
+ for_each_tdp_mmu_root(kvm, root) {
+ root_as_id = kvm_mmu_page_as_id(root);
+ if (root_as_id != slot->as_id)
+ continue;
+
+ spte_set = write_protect_gfn(kvm, root, gfn) || spte_set;
+ }
+ return spte_set;
+}
+
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 10e70699c5372..2ecb047211a6d 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -40,4 +40,7 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot);
+
+bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn);
#endif /* __KVM_X86_MMU_TDP_MMU_H */
--
2.28.0.709.gb0816b6eb0-goog
\
 
 \ /
  Last update: 2020-09-25 23:24    [W:0.567 / U:3.384 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site