lkml.org 
[lkml]   [2020]   [Sep]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC PATCH 13/16] mm: thp: add a knob to enable/disable 1GB THPs.
    Date
    From: Zi Yan <ziy@nvidia.com>

    It does not affect existing 1GB THPs. It is similar to the knob for
    2MB THPs.

    Signed-off-by: Zi Yan <ziy@nvidia.com>
    ---
    include/linux/huge_mm.h | 14 ++++++++++++++
    mm/huge_memory.c | 40 ++++++++++++++++++++++++++++++++++++++++
    mm/memory.c | 2 +-
    3 files changed, 55 insertions(+), 1 deletion(-)

    diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
    index c7bc40c4a5e2..3bf8d8a09f08 100644
    --- a/include/linux/huge_mm.h
    +++ b/include/linux/huge_mm.h
    @@ -119,6 +119,8 @@ enum transparent_hugepage_flag {
    #ifdef CONFIG_DEBUG_VM
    TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
    #endif
    + TRANSPARENT_PUD_HUGEPAGE_FLAG,
    + TRANSPARENT_PUD_HUGEPAGE_REQ_MADV_FLAG,
    };

    struct kobject;
    @@ -184,6 +186,18 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
    }

    bool transparent_hugepage_enabled(struct vm_area_struct *vma);
    +static inline bool transparent_pud_hugepage_enabled(struct vm_area_struct *vma)
    +{
    + if (transparent_hugepage_enabled(vma)) {
    + if (transparent_hugepage_flags & (1 << TRANSPARENT_PUD_HUGEPAGE_FLAG))
    + return true;
    + if (transparent_hugepage_flags &
    + (1 << TRANSPARENT_PUD_HUGEPAGE_REQ_MADV_FLAG))
    + return !!(vma->vm_flags & VM_HUGEPAGE);
    + }
    +
    + return false;
    +}

    #define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)

    diff --git a/mm/huge_memory.c b/mm/huge_memory.c
    index e209c2dfc5b7..e1440a13da63 100644
    --- a/mm/huge_memory.c
    +++ b/mm/huge_memory.c
    @@ -49,9 +49,11 @@
    unsigned long transparent_hugepage_flags __read_mostly =
    #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
    (1<<TRANSPARENT_HUGEPAGE_FLAG)|
    + (1<<TRANSPARENT_PUD_HUGEPAGE_FLAG)|
    #endif
    #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
    (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
    + (1<<TRANSPARENT_PUD_HUGEPAGE_REQ_MADV_FLAG)|
    #endif
    (1<<TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG)|
    (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
    @@ -199,6 +201,43 @@ static ssize_t enabled_store(struct kobject *kobj,
    static struct kobj_attribute enabled_attr =
    __ATTR(enabled, 0644, enabled_show, enabled_store);

    +static ssize_t enabled_1gb_show(struct kobject *kobj,
    + struct kobj_attribute *attr, char *buf)
    +{
    + if (test_bit(TRANSPARENT_PUD_HUGEPAGE_FLAG, &transparent_hugepage_flags))
    + return sprintf(buf, "[always] madvise never\n");
    + else if (test_bit(TRANSPARENT_PUD_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags))
    + return sprintf(buf, "always [madvise] never\n");
    + else
    + return sprintf(buf, "always madvise [never]\n");
    +}
    +
    +static ssize_t enabled_1gb_store(struct kobject *kobj,
    + struct kobj_attribute *attr,
    + const char *buf, size_t count)
    +{
    + ssize_t ret = count;
    +
    + if (!memcmp("always", buf,
    + min(sizeof("always")-1, count))) {
    + clear_bit(TRANSPARENT_PUD_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
    + set_bit(TRANSPARENT_PUD_HUGEPAGE_FLAG, &transparent_hugepage_flags);
    + } else if (!memcmp("madvise", buf,
    + min(sizeof("madvise")-1, count))) {
    + clear_bit(TRANSPARENT_PUD_HUGEPAGE_FLAG, &transparent_hugepage_flags);
    + set_bit(TRANSPARENT_PUD_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
    + } else if (!memcmp("never", buf,
    + min(sizeof("never")-1, count))) {
    + clear_bit(TRANSPARENT_PUD_HUGEPAGE_FLAG, &transparent_hugepage_flags);
    + clear_bit(TRANSPARENT_PUD_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
    + } else
    + ret = -EINVAL;
    +
    + return ret;
    +}
    +static struct kobj_attribute enabled_1gb_attr =
    + __ATTR(enabled_1gb, 0644, enabled_1gb_show, enabled_1gb_store);
    +
    ssize_t single_hugepage_flag_show(struct kobject *kobj,
    struct kobj_attribute *attr, char *buf,
    enum transparent_hugepage_flag flag)
    @@ -305,6 +344,7 @@ static struct kobj_attribute hpage_pmd_size_attr =

    static struct attribute *hugepage_attr[] = {
    &enabled_attr.attr,
    + &enabled_1gb_attr.attr,
    &defrag_attr.attr,
    &use_zero_page_attr.attr,
    &hpage_pmd_size_attr.attr,
    diff --git a/mm/memory.c b/mm/memory.c
    index 184d8eb2d060..518f29a5903e 100644
    --- a/mm/memory.c
    +++ b/mm/memory.c
    @@ -4305,7 +4305,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
    if (!vmf.pud)
    return VM_FAULT_OOM;
    retry_pud:
    - if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
    + if (pud_none(*vmf.pud) && transparent_pud_hugepage_enabled(vma)) {
    ret = create_huge_pud(&vmf);
    if (!(ret & VM_FAULT_FALLBACK))
    return ret;
    --
    2.28.0
    \
     
     \ /
      Last update: 2020-09-02 20:08    [W:4.712 / U:0.172 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site