lkml.org 
[lkml]   [2018]   [Sep]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC PATCH v4 16/27] mm: Update can_follow_write_pte/pmd for shadow stack
    Date
    can_follow_write_pte/pmd look for the (RO & DIRTY) PTE/PMD to
    verify an exclusive RO page still exists after a broken COW.

    A shadow stack PTE is RO & PAGE_DIRTY_SW when it is shared,
    otherwise RO & PAGE_DIRTY_HW.

    Introduce pte_exclusive() and pmd_exclusive() to also verify a
    shadow stack PTE is exclusive.

    Also rename can_follow_write_pte/pmd() to can_follow_write() to
    make their meaning clear; i.e. "Can we write to the page?", not
    "Is the PTE writable?"

    Signed-off-by: Yu-cheng Yu <yu-cheng.yu@intel.com>
    ---
    arch/x86/mm/pgtable.c | 19 +++++++++++++++++++
    include/asm-generic/pgtable.h | 4 ++++
    mm/gup.c | 8 +++++---
    mm/huge_memory.c | 8 +++++---
    4 files changed, 33 insertions(+), 6 deletions(-)

    diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
    index ccdfd3dd7163..e13a020e37db 100644
    --- a/arch/x86/mm/pgtable.c
    +++ b/arch/x86/mm/pgtable.c
    @@ -894,4 +894,23 @@ inline bool arch_copy_pte_mapping(vm_flags_t vm_flags)
    {
    return (vm_flags & VM_SHSTK);
    }
    +
    +inline bool pte_exclusive(pte_t pte, struct vm_area_struct *vma)
    +{
    + if (vma->vm_flags & VM_SHSTK)
    + return pte_dirty_hw(pte);
    + else
    + return pte_dirty(pte);
    +}
    +
    +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
    +inline bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma)
    +{
    + if (vma->vm_flags & VM_SHSTK)
    + return pmd_dirty_hw(pmd);
    + else
    + return pmd_dirty(pmd);
    +}
    +#endif
    +
    #endif /* CONFIG_X86_INTEL_SHADOW_STACK_USER */
    diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
    index a91f07454ced..6223017929be 100644
    --- a/include/asm-generic/pgtable.h
    +++ b/include/asm-generic/pgtable.h
    @@ -1131,10 +1131,14 @@ static inline bool arch_has_pfn_modify_check(void)
    #define pte_set_vma_features(pte, vma) pte
    #define pmd_set_vma_features(pmd, vma) pmd
    #define arch_copy_pte_mapping(vma_flags) false
    +#define pte_exclusive(pte, vma) pte_dirty(pte)
    +#define pmd_exclusive(pmd, vma) pmd_dirty(pmd)
    #else
    inline pte_t pte_set_vma_features(pte_t pte, struct vm_area_struct *vma);
    inline pmd_t pmd_set_vma_features(pmd_t pmd, struct vm_area_struct *vma);
    bool arch_copy_pte_mapping(vm_flags_t vm_flags);
    +bool pte_exclusive(pte_t pte, struct vm_area_struct *vma);
    +bool pmd_exclusive(pmd_t pmd, struct vm_area_struct *vma);
    #endif

    #endif /* _ASM_GENERIC_PGTABLE_H */
    diff --git a/mm/gup.c b/mm/gup.c
    index 1abc8b4afff6..03cb2e331f80 100644
    --- a/mm/gup.c
    +++ b/mm/gup.c
    @@ -64,10 +64,12 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
    * FOLL_FORCE can write to even unwritable pte's, but only
    * after we've gone through a COW cycle and they are dirty.
    */
    -static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
    +static inline bool can_follow_write(pte_t pte, unsigned int flags,
    + struct vm_area_struct *vma)
    {
    return pte_write(pte) ||
    - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
    + ((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
    + pte_exclusive(pte, vma));
    }

    static struct page *follow_page_pte(struct vm_area_struct *vma,
    @@ -105,7 +107,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
    }
    if ((flags & FOLL_NUMA) && pte_protnone(pte))
    goto no_page;
    - if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
    + if ((flags & FOLL_WRITE) && !can_follow_write(pte, flags, vma)) {
    pte_unmap_unlock(ptep, ptl);
    return NULL;
    }
    diff --git a/mm/huge_memory.c b/mm/huge_memory.c
    index df39ae20fe40..c70aa8fa4cb2 100644
    --- a/mm/huge_memory.c
    +++ b/mm/huge_memory.c
    @@ -1387,10 +1387,12 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd)
    * FOLL_FORCE can write to even unwritable pmd's, but only
    * after we've gone through a COW cycle and they are dirty.
    */
    -static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
    +static inline bool can_follow_write(pmd_t pmd, unsigned int flags,
    + struct vm_area_struct *vma)
    {
    return pmd_write(pmd) ||
    - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
    + ((flags & FOLL_FORCE) && (flags & FOLL_COW) &&
    + pmd_exclusive(pmd, vma));
    }

    struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
    @@ -1403,7 +1405,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,

    assert_spin_locked(pmd_lockptr(mm, pmd));

    - if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
    + if (flags & FOLL_WRITE && !can_follow_write(*pmd, flags, vma))
    goto out;

    /* Avoid dumping huge zero page */
    --
    2.17.1
    \
     
     \ /
      Last update: 2018-09-21 17:12    [W:4.294 / U:0.172 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site