lkml.org 
[lkml]   [2021]   [Jul]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] mm/rmap: Convert from atomic_t to refcount_t on anon_vma->refcount
Date
refcount_t type and corresponding API can protect refcounters from
accidental underflow and overflow and further use-after-free situations.

Signed-off-by: Xiyu Yang <xiyuyang19@fudan.edu.cn>
Signed-off-by: Xin Tan <tanxin.ctf@gmail.com>
---
include/linux/rmap.h | 8 +++++---
mm/rmap.c | 14 +++++++-------
2 files changed, 12 insertions(+), 10 deletions(-)

diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c976cc6de257..38151efe1a59 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -12,6 +12,8 @@
#include <linux/memcontrol.h>
#include <linux/highmem.h>

+#include <linux/refcount.h>
+
/*
* The anon_vma heads a list of private "related" vmas, to scan if
* an anonymous page pointing to this anon_vma needs to be unmapped:
@@ -36,7 +38,7 @@ struct anon_vma {
* the reference is responsible for clearing up the
* anon_vma if they are the last user on release
*/
- atomic_t refcount;
+ refcount_t refcount;

/*
* Count of child anon_vmas and VMAs which points to this anon_vma.
@@ -100,14 +102,14 @@ enum ttu_flags {
#ifdef CONFIG_MMU
static inline void get_anon_vma(struct anon_vma *anon_vma)
{
- atomic_inc(&anon_vma->refcount);
+ refcount_inc(&anon_vma->refcount);
}

void __put_anon_vma(struct anon_vma *anon_vma);

static inline void put_anon_vma(struct anon_vma *anon_vma)
{
- if (atomic_dec_and_test(&anon_vma->refcount))
+ if (refcount_dec_and_test(&anon_vma->refcount))
__put_anon_vma(anon_vma);
}

diff --git a/mm/rmap.c b/mm/rmap.c
index b9eb5c12f3fe..7badd786e095 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -88,7 +88,7 @@ static inline struct anon_vma *anon_vma_alloc(void)

anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
if (anon_vma) {
- atomic_set(&anon_vma->refcount, 1);
+ refcount_set(&anon_vma->refcount, 1);
anon_vma->degree = 1; /* Reference for first vma */
anon_vma->parent = anon_vma;
/*
@@ -103,7 +103,7 @@ static inline struct anon_vma *anon_vma_alloc(void)

static inline void anon_vma_free(struct anon_vma *anon_vma)
{
- VM_BUG_ON(atomic_read(&anon_vma->refcount));
+ VM_BUG_ON(refcount_read(&anon_vma->refcount));

/*
* Synchronize against page_lock_anon_vma_read() such that
@@ -445,7 +445,7 @@ static void anon_vma_ctor(void *data)
struct anon_vma *anon_vma = data;

init_rwsem(&anon_vma->rwsem);
- atomic_set(&anon_vma->refcount, 0);
+ refcount_set(&anon_vma->refcount, 0);
anon_vma->rb_root = RB_ROOT_CACHED;
}

@@ -495,7 +495,7 @@ struct anon_vma *page_get_anon_vma(struct page *page)
goto out;

anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
- if (!atomic_inc_not_zero(&anon_vma->refcount)) {
+ if (!refcount_inc_not_zero(&anon_vma->refcount)) {
anon_vma = NULL;
goto out;
}
@@ -554,7 +554,7 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
}

/* trylock failed, we got to sleep */
- if (!atomic_inc_not_zero(&anon_vma->refcount)) {
+ if (!refcount_inc_not_zero(&anon_vma->refcount)) {
anon_vma = NULL;
goto out;
}
@@ -569,7 +569,7 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
rcu_read_unlock();
anon_vma_lock_read(anon_vma);

- if (atomic_dec_and_test(&anon_vma->refcount)) {
+ if (refcount_dec_and_test(&anon_vma->refcount)) {
/*
* Oops, we held the last refcount, release the lock
* and bail -- can't simply use put_anon_vma() because
@@ -2221,7 +2221,7 @@ void __put_anon_vma(struct anon_vma *anon_vma)
struct anon_vma *root = anon_vma->root;

anon_vma_free(anon_vma);
- if (root != anon_vma && atomic_dec_and_test(&root->refcount))
+ if (root != anon_vma && refcount_dec_and_test(&root->refcount))
anon_vma_free(root);
}

--
2.7.4
\
 
 \ /
  Last update: 2021-07-19 05:24    [W:0.060 / U:0.052 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site