lkml.org 
[lkml]   [2017]   [Dec]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCHv4 09/12] x86/mm: Provide pmdp_establish() helper
    Date
    We need an atomic way to setup pmd page table entry, avoiding races with
    CPU setting dirty/accessed bits. This is required to implement
    pmdp_invalidate() that doesn't lose these bits.

    On PAE we can avoid expensive cmpxchg8b for cases when new page table
    entry is not present. If it's present, fallback to cpmxchg loop.

    Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
    Cc: Ingo Molnar <mingo@kernel.org>
    Cc: H. Peter Anvin <hpa@zytor.com>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    ---
    arch/x86/include/asm/pgtable-3level.h | 37 ++++++++++++++++++++++++++++++++++-
    arch/x86/include/asm/pgtable.h | 15 ++++++++++++++
    2 files changed, 51 insertions(+), 1 deletion(-)

    diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
    index bc4af5453802..2c874cfc7789 100644
    --- a/arch/x86/include/asm/pgtable-3level.h
    +++ b/arch/x86/include/asm/pgtable-3level.h
    @@ -158,7 +158,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
    #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
    #endif

    -#ifdef CONFIG_SMP
    union split_pmd {
    struct {
    u32 pmd_low;
    @@ -166,6 +165,8 @@ union split_pmd {
    };
    pmd_t pmd;
    };
    +
    +#ifdef CONFIG_SMP
    static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
    {
    union split_pmd res, *orig = (union split_pmd *)pmdp;
    @@ -181,6 +182,40 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
    #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
    #endif

    +#ifndef pmdp_establish
    +#define pmdp_establish pmdp_establish
    +static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
    + unsigned long address, pmd_t *pmdp, pmd_t pmd)
    +{
    + pmd_t old;
    +
    + /*
    + * If pmd has present bit cleared we can get away without expensive
    + * cmpxchg64: we can update pmdp half-by-half without racing with
    + * anybody.
    + */
    + if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
    + union split_pmd old, new, *ptr;
    +
    + ptr = (union split_pmd *)pmdp;
    +
    + new.pmd = pmd;
    +
    + /* xchg acts as a barrier before setting of the high bits */
    + old.pmd_low = xchg(&ptr->pmd_low, new.pmd_low);
    + old.pmd_high = ptr->pmd_high;
    + ptr->pmd_high = new.pmd_high;
    + return old.pmd;
    + }
    +
    + {
    + old = *pmdp;
    + } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
    +
    + return old;
    +}
    +#endif
    +
    #ifdef CONFIG_SMP
    union split_pud {
    struct {
    diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
    index 95e2dfd75521..099ebe0053c3 100644
    --- a/arch/x86/include/asm/pgtable.h
    +++ b/arch/x86/include/asm/pgtable.h
    @@ -1094,6 +1094,21 @@ static inline int pud_write(pud_t pud)
    return pud_flags(pud) & _PAGE_RW;
    }

    +#ifndef pmdp_establish
    +#define pmdp_establish pmdp_establish
    +static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
    + unsigned long address, pmd_t *pmdp, pmd_t pmd)
    +{
    + if (IS_ENABLED(CONFIG_SMP)) {
    + return xchg(pmdp, pmd);
    + } else {
    + pmd_t old = *pmdp;
    + *pmdp = pmd;
    + return old;
    + }
    +}
    +#endif
    +
    /*
    * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
    *
    --
    2.15.0
    \
     
     \ /
      Last update: 2017-12-13 11:59    [W:4.190 / U:0.380 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site