lkml.org 
[lkml]   [2018]   [Mar]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 3/3] mm/mmu_notifier: keep track of ranges being invalidated
Date
From: Jérôme Glisse <jglisse@redhat.com>

This keep a list of all virtual address range being invalidated (ie inside
a mmu_notifier_invalidate_range_start/end section). Also add an helper to
check if a range is under going such invalidation. With this it easy for a
concurrent thread to ignore invalidation that do not affect the virtual
address range it is working on.

Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Leon Romanovsky <leonro@mellanox.com>
Cc: Artemy Kovalyov <artemyko@mellanox.com>
Cc: Evgeny Baskakov <ebaskakov@nvidia.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mark Hairgrove <mhairgrove@nvidia.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Mike Marciniszyn <mike.marciniszyn@intel.com>
Cc: Dennis Dalessandro <dennis.dalessandro@intel.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Sudeep Dutt <sudeep.dutt@intel.com>
Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
Cc: Dimitri Sivanich <sivanich@sgi.com>
---
include/linux/mmu_notifier.h | 38 ++++++++++++++++++++++++++++++++++++++
mm/mmu_notifier.c | 28 ++++++++++++++++++++++++++++
2 files changed, 66 insertions(+)

diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index e59db7a1e86d..4bda68499f43 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -47,16 +47,20 @@ struct mmu_notifier_mm {
struct hlist_head list;
/* to serialize the list modifications and hlist_unhashed */
spinlock_t lock;
+ /* list of all active invalidation range */
+ struct list_head ranges;
};

/*
* struct mmu_notifier_range - range being invalidated with range_start/end
+ * @list: use to track list of active invalidation
* @mm: mm_struct invalidation is against
* @start: start address of range (inclusive)
* @end: end address of range (exclusive)
* @event: type of invalidation (see enum mmu_notifier_event)
*/
struct mmu_notifier_range {
+ struct list_head list;
struct mm_struct *mm;
unsigned long start;
unsigned long end;
@@ -268,6 +272,9 @@ extern void __mmu_notifier_invalidate_range_end(
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
unsigned long start, unsigned long end);
extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm);
+extern bool __mmu_notifier_range_valid(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);

static inline void mmu_notifier_release(struct mm_struct *mm)
{
@@ -275,6 +282,24 @@ static inline void mmu_notifier_release(struct mm_struct *mm)
__mmu_notifier_release(mm);
}

+static inline bool mmu_notifier_range_valid(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ return __mmu_notifier_range_valid(mm, start, end);
+ return false;
+}
+
+static inline bool mmu_notifier_addr_valid(struct mm_struct *mm,
+ unsigned long addr)
+{
+ addr &= PAGE_MASK;
+ if (mm_has_notifiers(mm))
+ return __mmu_notifier_range_valid(mm, addr, addr + PAGE_SIZE);
+ return false;
+}
+
static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long start,
unsigned long end)
@@ -487,6 +512,19 @@ static inline void mmu_notifier_release(struct mm_struct *mm)
{
}

+static inline bool mmu_notifier_range_valid(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ return true;
+}
+
+static inline bool mmu_notifier_addr_valid(struct mm_struct *mm,
+ unsigned long addr)
+{
+ return true;
+}
+
static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long start,
unsigned long end)
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 91a614b9636e..d7c46eaa5d42 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -180,6 +180,10 @@ void __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
struct mmu_notifier *mn;
int id;

+ spin_lock(&mm->mmu_notifier_mm->lock);
+ list_add_rcu(&range->list, &mm->mmu_notifier_mm->ranges);
+ spin_unlock(&mm->mmu_notifier_mm->lock);
+
id = srcu_read_lock(&srcu);
hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
if (mn->ops->invalidate_range_start)
@@ -218,6 +222,10 @@ void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range,
mn->ops->invalidate_range_end(mn, range);
}
srcu_read_unlock(&srcu, id);
+
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ list_del_rcu(&range->list);
+ spin_unlock(&mm->mmu_notifier_mm->lock);
}
EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);

@@ -288,6 +296,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
goto out_clean;

if (!mm_has_notifiers(mm)) {
+ INIT_LIST_HEAD(&mmu_notifier_mm->ranges);
INIT_HLIST_HEAD(&mmu_notifier_mm->list);
spin_lock_init(&mmu_notifier_mm->lock);

@@ -424,3 +433,22 @@ void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
mmdrop(mm);
}
EXPORT_SYMBOL_GPL(mmu_notifier_unregister_no_release);
+
+bool __mmu_notifier_range_valid(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct mmu_notifier_range *range;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(range, &mm->mmu_notifier_mm->ranges, list) {
+ if (end < range->start || start >= range->end)
+ continue;
+ rcu_read_unlock();
+ return false;
+ }
+ rcu_read_unlock();
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(__mmu_notifier_range_valid);
--
2.14.3
\
 
 \ /
  Last update: 2018-03-23 18:21    [W:0.881 / U:0.064 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site