lkml.org 
[lkml]   [2015]   [Nov]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[PATCH 03/37] mm: kill get_user_pages_locked()
From
Date

From: Dave Hansen <dave.hansen@linux.intel.com>

We have no remaining users of get_user_pages_locked(). Kill it.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
---

b/include/linux/mm.h | 4 ----
b/mm/gup.c | 31 -------------------------------
2 files changed, 35 deletions(-)

diff -puN include/linux/mm.h~kill-get_user_pages_locked include/linux/mm.h
--- a/include/linux/mm.h~kill-get_user_pages_locked 2015-11-16 12:35:35.684187540 -0800
+++ b/include/linux/mm.h 2015-11-16 12:35:35.689187767 -0800
@@ -1195,10 +1195,6 @@ long get_user_pages(struct task_struct *
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
struct vm_area_struct **vmas);
-long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
int write, int force, struct page **pages,
diff -puN mm/gup.c~kill-get_user_pages_locked mm/gup.c
--- a/mm/gup.c~kill-get_user_pages_locked 2015-11-16 12:35:35.686187631 -0800
+++ b/mm/gup.c 2015-11-16 12:35:35.690187812 -0800
@@ -715,37 +715,6 @@ static __always_inline long __get_user_p
}

/*
- * We can leverage the VM_FAULT_RETRY functionality in the page fault
- * paths better by using either get_user_pages_locked() or
- * get_user_pages_unlocked().
- *
- * get_user_pages_locked() is suitable to replace the form:
- *
- * down_read(&mm->mmap_sem);
- * do_something()
- * get_user_pages(tsk, mm, ..., pages, NULL);
- * up_read(&mm->mmap_sem);
- *
- * to:
- *
- * int locked = 1;
- * down_read(&mm->mmap_sem);
- * do_something()
- * get_user_pages_locked(tsk, mm, ..., pages, &locked);
- * if (locked)
- * up_read(&mm->mmap_sem);
- */
-long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- int *locked)
-{
- return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
- pages, NULL, locked, true, FOLL_TOUCH);
-}
-EXPORT_SYMBOL(get_user_pages_locked);
-
-/*
* Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows to
* pass additional gup_flags as last parameter (like FOLL_HWPOISON).
*
_

\
 
 \ /
  Last update: 2015-11-17 05:01    [W:1.679 / U:0.212 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site