lkml.org 
[lkml]   [2015]   [May]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 12/15] KVM: MTRR: introduce mtrr_for_each_mem_type
Date
It walks all MTRRs and gets all the memory cache type setting for the
specified range also it checks if the range is fully covered by MTRRs

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
---
arch/x86/kvm/mtrr.c | 183 ++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 183 insertions(+)

diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index e59d138..35f86303 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -395,6 +395,189 @@ void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
}

+struct mtrr_looker {
+ /* input fields. */
+ struct kvm_mtrr *mtrr_state;
+ u64 start;
+ u64 end;
+
+ /* output fields. */
+ int mem_type;
+ /* [start, end) is fully covered in MTRRs? */
+ bool partial_map;
+
+ /* private fields. */
+ union {
+ /* used for fixed MTRRs. */
+ struct {
+ int index;
+ int seg;
+ };
+
+ /* used for var MTRRs. */
+ struct {
+ struct kvm_mtrr_range *range;
+ /* max address has been covered in var MTRRs. */
+ u64 start_max;
+ };
+ };
+
+ bool fixed;
+};
+
+static void mtrr_lookup_init(struct mtrr_looker *looker,
+ struct kvm_mtrr *mtrr_state, u64 start, u64 end)
+{
+ looker->mtrr_state = mtrr_state;
+ looker->start = start;
+ looker->end = end;
+}
+
+static u64 fixed_mtrr_range_end_addr(int seg, int index)
+{
+ struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
+
+ return mtrr_seg->start + mtrr_seg->range_size * index;
+}
+
+static bool mtrr_lookup_fixed_start(struct mtrr_looker *looker)
+{
+ int seg, index;
+
+ if (!looker->mtrr_state->fixed_mtrr_enabled)
+ return false;
+
+ seg = fixed_mtrr_addr_to_seg(looker->start);
+ if (seg < 0)
+ return false;
+
+ looker->fixed = true;
+ index = fixed_mtrr_addr_seg_to_range_index(looker->start, seg);
+ looker->index = index;
+ looker->seg = seg;
+ looker->mem_type = looker->mtrr_state->fixed_ranges[index];
+ looker->start = fixed_mtrr_range_end_addr(seg, index);
+ return true;
+}
+
+static bool match_var_range(struct mtrr_looker *looker,
+ struct kvm_mtrr_range *range)
+{
+ u64 start, end;
+
+ var_mtrr_range(range, &start, &end);
+ if (!(start >= looker->end || end <= looker->start)) {
+ looker->range = range;
+ looker->mem_type = range->base & 0xff;
+
+ /*
+ * the function is called when we do kvm_mtrr.head walking
+ * that means range has the minimum base address interleaves
+ * with [looker->start_max, looker->end).
+ */
+ looker->partial_map |= looker->start_max < start;
+
+ /* update the max address has been covered. */
+ looker->start_max = max(looker->start_max, end);
+ return true;
+ }
+
+ return false;
+}
+
+static void mtrr_lookup_var_start(struct mtrr_looker *looker)
+{
+ struct kvm_mtrr *mtrr_state = looker->mtrr_state;
+ struct kvm_mtrr_range *range;
+
+ looker->fixed = false;
+ looker->partial_map = false;
+ looker->start_max = looker->start;
+ looker->mem_type = -1;
+
+ list_for_each_entry(range, &mtrr_state->head, node)
+ if (match_var_range(looker, range))
+ return;
+
+ looker->partial_map = true;
+}
+
+static void mtrr_lookup_fixed_next(struct mtrr_looker *looker)
+{
+ struct fixed_mtrr_segment *eseg = &fixed_seg_table[looker->seg];
+ struct kvm_mtrr *mtrr_state = looker->mtrr_state;
+ u64 end;
+
+ if (looker->start >= looker->end) {
+ looker->mem_type = -1;
+ looker->partial_map = false;
+ return;
+ }
+
+ WARN_ON(!looker->fixed);
+
+ looker->index++;
+ end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
+
+ /* switch to next segment. */
+ if (end >= eseg->end) {
+ looker->seg++;
+ looker->index = 0;
+
+ /* have looked up for all fixed MTRRs. */
+ if (looker->seg >= ARRAY_SIZE(fixed_seg_table))
+ return mtrr_lookup_var_start(looker);
+
+ end = fixed_mtrr_range_end_addr(looker->seg, looker->index);
+ }
+
+ looker->mem_type = mtrr_state->fixed_ranges[looker->index];
+ looker->start = end;
+}
+
+static void mtrr_lookup_var_next(struct mtrr_looker *looker)
+{
+ struct kvm_mtrr *mtrr_state = looker->mtrr_state;
+
+ WARN_ON(looker->fixed);
+
+ looker->mem_type = -1;
+
+ list_for_each_entry_continue(looker->range, &mtrr_state->head, node)
+ if (match_var_range(looker, looker->range))
+ return;
+
+ looker->partial_map |= looker->start_max < looker->end;
+}
+
+static void mtrr_lookup_start(struct mtrr_looker *looker)
+{
+ looker->mem_type = -1;
+
+ if (!looker->mtrr_state->mtrr_enabled) {
+ looker->partial_map = true;
+ return;
+ }
+
+ if (!mtrr_lookup_fixed_start(looker))
+ mtrr_lookup_var_start(looker);
+}
+
+static void mtrr_lookup_next(struct mtrr_looker *looker)
+{
+ WARN_ON(looker->mem_type == -1);
+
+ if (looker->fixed)
+ mtrr_lookup_fixed_next(looker);
+ else
+ mtrr_lookup_var_next(looker);
+}
+
+#define mtrr_for_each_mem_type(_looker_, _mtrr_, _gpa_start_, _gpa_end_) \
+ for (mtrr_lookup_init(_looker_, _mtrr_, _gpa_start_, _gpa_end_), \
+ mtrr_lookup_start(_looker_); (_looker_)->mem_type != -1; \
+ mtrr_lookup_next(_looker_))
+
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
{
struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
--
2.1.0


\
 
 \ /
  Last update: 2015-05-30 13:41    [W:0.155 / U:5.224 seconds]
©2003-2018 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site