lkml.org 
[lkml]   [2017]   [Mar]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 5/5] mm: add debugfs interface for parallel free tuning
    Date
    Make it possible to set different values for async_free_threshold and
    max_gather_batch_count through debugfs.

    With this, we can do tests for different purposes:
    1 Restore vanilla kernel bahaviour for performance comparison.
    Set max_gather_batch_count to a value like 20 to effectively restore
    the behaviour of vanilla kernel since this will make page gathered
    always smaller than async_free_threshold(effectively disable parallel
    free);
    2 Debug purpose.
    Set async_free_threshold to a very small value(like 128) to trigger
    parallel free even on ordinary processes, ideal for debug purpose with
    a virtual machine that doesn't have much memory assigned to it;
    3 Performance tuning.
    Use a different value for async_free_threshold and max_gather_batch_count
    other than the default to test if parallel free performs better or worse.

    Signed-off-by: Aaron Lu <aaron.lu@intel.com>
    ---
    mm/memory.c | 33 +++++++++++++++++++++++++++++++--
    1 file changed, 31 insertions(+), 2 deletions(-)

    diff --git a/mm/memory.c b/mm/memory.c
    index 83b38823aaba..3a971cc1fc3b 100644
    --- a/mm/memory.c
    +++ b/mm/memory.c
    @@ -183,6 +183,35 @@ static void check_sync_rss_stat(struct task_struct *task)

    #ifdef HAVE_GENERIC_MMU_GATHER

    +static unsigned long async_free_threshold = ASYNC_FREE_THRESHOLD;
    +static unsigned long max_gather_batch_count = MAX_GATHER_BATCH_COUNT;
    +
    +#ifdef CONFIG_DEBUG_FS
    +static int __init tlb_mmu_parallel_free_debugfs(void)
    +{
    + umode_t mode = 0644;
    + struct dentry *dir;
    +
    + dir = debugfs_create_dir("parallel_free", NULL);
    + if (!dir)
    + return -ENOMEM;
    +
    + if (!debugfs_create_ulong("async_free_threshold", mode, dir,
    + &async_free_threshold))
    + goto fail;
    + if (!debugfs_create_ulong("max_gather_batch_count", mode, dir,
    + &max_gather_batch_count))
    + goto fail;
    +
    + return 0;
    +
    +fail:
    + debugfs_remove_recursive(dir);
    + return -ENOMEM;
    +}
    +late_initcall(tlb_mmu_parallel_free_debugfs);
    +#endif
    +
    static bool tlb_next_batch(struct mmu_gather *tlb)
    {
    struct mmu_gather_batch *batch;
    @@ -193,7 +222,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
    return true;
    }

    - if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
    + if (tlb->batch_count == max_gather_batch_count)
    return false;

    batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
    @@ -307,7 +336,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
    {
    struct batch_free_struct *batch_free = NULL;

    - if (tlb->page_nr >= ASYNC_FREE_THRESHOLD)
    + if (tlb->page_nr >= async_free_threshold)
    batch_free = kmalloc(sizeof(*batch_free),
    GFP_NOWAIT | __GFP_NOWARN);

    --
    2.7.4
    \
     
     \ /
      Last update: 2017-03-15 10:01    [W:2.677 / U:0.128 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site