lkml.org 
[lkml]   [2017]   [Dec]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH v2 01/17] mm/gup: Fixup p*_access_permitted()
On Thu, Dec 14, 2017 at 09:04:56PM -0800, Dave Hansen wrote:
>
> I've got some additions to the selftests and a fix where we pass FOLL_*
> flags around a bit more instead of just 'write'. I'll get those out as
> soon as I do a bit more testing.

Try the below; I have more in the works, but this already fixes a whole
bunch of obvious fail and should fix the case I described.

The thing is, you should _never_ return NULL for an access error, that's
complete crap.

You should also not blindly change every pte_write() test to
pte_access_permitted(), that's also wrong, because then you're missing
the read-access tests.

Basically you need to very carefully audit each and every
p??_access_permitted() call; they're currently mostly wrong.

--- a/mm/gup.c
+++ b/mm/gup.c
@@ -66,7 +66,7 @@ static int follow_pfn_pte(struct vm_area
*/
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
- return pte_access_permitted(pte, WRITE) ||
+ return pte_write(pte) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}

@@ -153,6 +153,11 @@ static struct page *follow_page_pte(stru
}

if (flags & FOLL_GET) {
+ if (!pte_access_permitted(pte, !!(flags & FOLL_WRITE))) {
+ page = ERR_PTR(-EFAULT);
+ goto out;
+ }
+
get_page(page);

/* drop the pgmap reference now that we hold the page */
@@ -244,6 +249,15 @@ static struct page *follow_pmd_mask(stru
pmd_migration_entry_wait(mm, pmd);
goto retry;
}
+
+ if (flags & FOLL_GET) {
+ if (!pmd_access_permitted(*pmd, !!(flags & FOLL_WRITE))) {
+ page = ERR_PTR(-EFAULT);
+ spin_unlock(ptr);
+ return page;
+ }
+ }
+
if (pmd_devmap(*pmd)) {
ptl = pmd_lock(mm, pmd);
page = follow_devmap_pmd(vma, address, pmd, flags);
@@ -326,6 +340,15 @@ static struct page *follow_pud_mask(stru
return page;
return no_page_table(vma, flags);
}
+
+ if (flags & FOLL_GET) {
+ if (!pud_access_permitted(*pud, !!(flags & FOLL_WRITE))) {
+ page = ERR_PTR(-EFAULT);
+ spin_unlock(ptr);
+ return page;
+ }
+ }
+
if (pud_devmap(*pud)) {
ptl = pud_lock(mm, pud);
page = follow_devmap_pud(vma, address, pud, flags);
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -870,9 +870,6 @@ struct page *follow_devmap_pmd(struct vm
*/
WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");

- if (!pmd_access_permitted(*pmd, flags & FOLL_WRITE))
- return NULL;
-
if (pmd_present(*pmd) && pmd_devmap(*pmd))
/* pass */;
else
@@ -1012,9 +1009,6 @@ struct page *follow_devmap_pud(struct vm

assert_spin_locked(pud_lockptr(mm, pud));

- if (!pud_access_permitted(*pud, flags & FOLL_WRITE))
- return NULL;
-
if (pud_present(*pud) && pud_devmap(*pud))
/* pass */;
else
@@ -1386,7 +1380,7 @@ int do_huge_pmd_wp_page(struct vm_fault
*/
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
- return pmd_access_permitted(pmd, WRITE) ||
+ return pmd_write(pmd) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
}

\
 
 \ /
  Last update: 2017-12-15 09:01    [W:0.125 / U:0.280 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site