lkml.org 
[lkml]   [2021]   [Feb]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [PATCH v19 18/25] mm: Update can_follow_write_pte() for shadow stack
    On Wed, Feb 03, 2021 at 02:55:40PM -0800, Yu-cheng Yu wrote:
    > Can_follow_write_pte() ensures a read-only page is COWed by checking the
    > FOLL_COW flag, and uses pte_dirty() to validate the flag is still valid.
    >
    > Like a writable data page, a shadow stack page is writable, and becomes
    > read-only during copy-on-write, but it is always dirty. Thus, in the
    > can_follow_write_pte() check, it belongs to the writable page case and
    > should be excluded from the read-only page pte_dirty() check. Apply
    > the same changes to can_follow_write_pmd().

    Does this need the vma passed down? Should it just pass vm_flags? I
    suppose it doesn't really matter, though.

    Reviewed-by: Kees Cook <keescook@chromium.org>

    -Kees

    >
    > Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
    > ---
    > mm/gup.c | 8 +++++---
    > mm/huge_memory.c | 8 +++++---
    > 2 files changed, 10 insertions(+), 6 deletions(-)
    >
    > diff --git a/mm/gup.c b/mm/gup.c
    > index e4c224cd9661..66ab67626f57 100644
    > --- a/mm/gup.c
    > +++ b/mm/gup.c
    > @@ -357,10 +357,12 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
    > * FOLL_FORCE can write to even unwritable pte's, but only
    > * after we've gone through a COW cycle and they are dirty.
    > */
    > -static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
    > +static inline bool can_follow_write_pte(pte_t pte, unsigned int flags,
    > + struct vm_area_struct *vma)
    > {
    > return pte_write(pte) ||
    > - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
    > + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte) &&
    > + !arch_shadow_stack_mapping(vma->vm_flags));
    > }
    >
    > static struct page *follow_page_pte(struct vm_area_struct *vma,
    > @@ -403,7 +405,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
    > }
    > if ((flags & FOLL_NUMA) && pte_protnone(pte))
    > goto no_page;
    > - if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
    > + if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags, vma)) {
    > pte_unmap_unlock(ptep, ptl);
    > return NULL;
    > }
    > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
    > index bfec65c9308b..eb64e2b56bc9 100644
    > --- a/mm/huge_memory.c
    > +++ b/mm/huge_memory.c
    > @@ -1337,10 +1337,12 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
    > * FOLL_FORCE can write to even unwritable pmd's, but only
    > * after we've gone through a COW cycle and they are dirty.
    > */
    > -static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
    > +static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags,
    > + struct vm_area_struct *vma)
    > {
    > return pmd_write(pmd) ||
    > - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
    > + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd) &&
    > + !arch_shadow_stack_mapping(vma->vm_flags));
    > }
    >
    > struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
    > @@ -1353,7 +1355,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
    >
    > assert_spin_locked(pmd_lockptr(mm, pmd));
    >
    > - if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
    > + if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags, vma))
    > goto out;
    >
    > /* Avoid dumping huge zero page */
    > --
    > 2.21.0
    >
    >

    --
    Kees Cook

    \
     
     \ /
      Last update: 2021-02-04 21:32    [W:4.332 / U:1.112 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site