lkml.org 
[lkml]   [2012]   [Jun]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:x86/mm] x86/tlb: fall back to flush all when meet a THP large page
    Commit-ID:  fd460eec063227223e9b1151e36c846e2a87b619
    Gitweb: http://git.kernel.org/tip/fd460eec063227223e9b1151e36c846e2a87b619
    Author: Alex Shi <alex.shi@intel.com>
    AuthorDate: Mon, 25 Jun 2012 14:08:21 +0800
    Committer: H. Peter Anvin <hpa@zytor.com>
    CommitDate: Mon, 25 Jun 2012 20:53:16 -0700

    x86/tlb: fall back to flush all when meet a THP large page

    We don't need to flush large pages by PAGE_SIZE step, that just waste
    time. and actually, large page don't need 'invlpg' optimizing according
    to our macro benchmark. So, just flush whole TLB is enough for them.

    The following result is tested on a 2CPU * 4cores * 2HT NHM EP machine,
    with THP 'always' setting.

    Multi-thread testing, '-t' paramter is thread number:
    without this patch with this patch
    ./mprotect -t 1 14ns 13ns
    ./mprotect -t 2 13ns 13ns
    ./mprotect -t 4 12ns 11ns
    ./mprotect -t 8 14ns 10ns
    ./mprotect -t 16 28ns 28ns
    ./mprotect -t 32 54ns 52ns
    ./mprotect -t 128 200ns 200ns

    Signed-off-by: Alex Shi <alex.shi@intel.com>
    Link: http://lkml.kernel.org/r/1340604507-11214-4-git-send-email-alex.shi@intel.com
    Signed-off-by: H. Peter Anvin <hpa@zytor.com>
    ---
    arch/x86/mm/tlb.c | 34 ++++++++++++++++++++++++++++++++++
    1 files changed, 34 insertions(+), 0 deletions(-)

    diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
    index 3b91c98..184a02a 100644
    --- a/arch/x86/mm/tlb.c
    +++ b/arch/x86/mm/tlb.c
    @@ -318,12 +318,42 @@ void flush_tlb_mm(struct mm_struct *mm)

    #define FLUSHALL_BAR 16

    +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
    +static inline unsigned long has_large_page(struct mm_struct *mm,
    + unsigned long start, unsigned long end)
    +{
    + pgd_t *pgd;
    + pud_t *pud;
    + pmd_t *pmd;
    + unsigned long addr = ALIGN(start, HPAGE_SIZE);
    + for (; addr < end; addr += HPAGE_SIZE) {
    + pgd = pgd_offset(mm, addr);
    + if (likely(!pgd_none(*pgd))) {
    + pud = pud_offset(pgd, addr);
    + if (likely(!pud_none(*pud))) {
    + pmd = pmd_offset(pud, addr);
    + if (likely(!pmd_none(*pmd)))
    + if (pmd_large(*pmd))
    + return addr;
    + }
    + }
    + }
    + return 0;
    +}
    +#else
    +static inline unsigned long has_large_page(struct mm_struct *mm,
    + unsigned long start, unsigned long end)
    +{
    + return 0;
    +}
    +#endif
    void flush_tlb_range(struct vm_area_struct *vma,
    unsigned long start, unsigned long end)
    {
    struct mm_struct *mm;

    if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
    +flush_all:
    flush_tlb_mm(vma->vm_mm);
    return;
    }
    @@ -346,6 +376,10 @@ void flush_tlb_range(struct vm_area_struct *vma,
    if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
    local_flush_tlb();
    else {
    + if (has_large_page(mm, start, end)) {
    + preempt_enable();
    + goto flush_all;
    + }
    for (addr = start; addr < end;
    addr += PAGE_SIZE)
    __flush_tlb_single(addr);

    \
     
     \ /
      Last update: 2012-06-26 18:01    [W:7.333 / U:0.340 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site