lkml.org 
[lkml]   [2010]   [Oct]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 11/11] memcg: check memcg dirty limits in page writeback
    Date
    If the current process is in a non-root memcg, then
    global_dirty_limits() will consider the memcg dirty limit.
    This allows different cgroups to have distinct dirty limits
    which trigger direct and background writeback at different
    levels.

    Signed-off-by: Andrea Righi <arighi@develer.com>
    Signed-off-by: Greg Thelen <gthelen@google.com>
    ---

    Changelog since v1:
    - Removed unnecessary get_ prefix from get_xxx() functions.

    mm/page-writeback.c | 89 +++++++++++++++++++++++++++++++++++++++++---------
    1 files changed, 73 insertions(+), 16 deletions(-)

    diff --git a/mm/page-writeback.c b/mm/page-writeback.c
    index a0bb3e2..9b34f01 100644
    --- a/mm/page-writeback.c
    +++ b/mm/page-writeback.c
    @@ -180,7 +180,7 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
    * Returns the numebr of pages that can currently be freed and used
    * by the kernel for direct mappings.
    */
    -static unsigned long determine_dirtyable_memory(void)
    +static unsigned long global_dirtyable_memory(void)
    {
    unsigned long x;

    @@ -192,6 +192,58 @@ static unsigned long determine_dirtyable_memory(void)
    return x + 1; /* Ensure that we never return 0 */
    }

    +static unsigned long dirtyable_memory(void)
    +{
    + unsigned long memory;
    + s64 memcg_memory;
    +
    + memory = global_dirtyable_memory();
    + if (!mem_cgroup_has_dirty_limit())
    + return memory;
    + memcg_memory = mem_cgroup_page_stat(MEMCG_NR_DIRTYABLE_PAGES);
    + BUG_ON(memcg_memory < 0);
    +
    + return min((unsigned long)memcg_memory, memory);
    +}
    +
    +static long reclaimable_pages(void)
    +{
    + s64 ret;
    +
    + if (!mem_cgroup_has_dirty_limit())
    + return global_page_state(NR_FILE_DIRTY) +
    + global_page_state(NR_UNSTABLE_NFS);
    + ret = mem_cgroup_page_stat(MEMCG_NR_RECLAIM_PAGES);
    + BUG_ON(ret < 0);
    +
    + return ret;
    +}
    +
    +static long writeback_pages(void)
    +{
    + s64 ret;
    +
    + if (!mem_cgroup_has_dirty_limit())
    + return global_page_state(NR_WRITEBACK);
    + ret = mem_cgroup_page_stat(MEMCG_NR_WRITEBACK);
    + BUG_ON(ret < 0);
    +
    + return ret;
    +}
    +
    +static unsigned long dirty_writeback_pages(void)
    +{
    + s64 ret;
    +
    + if (!mem_cgroup_has_dirty_limit())
    + return global_page_state(NR_UNSTABLE_NFS) +
    + global_page_state(NR_WRITEBACK);
    + ret = mem_cgroup_page_stat(MEMCG_NR_DIRTY_WRITEBACK_PAGES);
    + BUG_ON(ret < 0);
    +
    + return ret;
    +}
    +
    /*
    * couple the period to the dirty_ratio:
    *
    @@ -204,8 +256,8 @@ static int calc_period_shift(void)
    if (vm_dirty_bytes)
    dirty_total = vm_dirty_bytes / PAGE_SIZE;
    else
    - dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
    - 100;
    + dirty_total = (vm_dirty_ratio * global_dirtyable_memory()) /
    + 100;
    return 2 + ilog2(dirty_total - 1);
    }

    @@ -410,18 +462,23 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
    {
    unsigned long background;
    unsigned long dirty;
    - unsigned long available_memory = determine_dirtyable_memory();
    + unsigned long available_memory = dirtyable_memory();
    struct task_struct *tsk;
    + struct vm_dirty_param dirty_param;

    - if (vm_dirty_bytes)
    - dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
    + vm_dirty_param(&dirty_param);
    +
    + if (dirty_param.dirty_bytes)
    + dirty = DIV_ROUND_UP(dirty_param.dirty_bytes, PAGE_SIZE);
    else
    - dirty = (vm_dirty_ratio * available_memory) / 100;
    + dirty = (dirty_param.dirty_ratio * available_memory) / 100;

    - if (dirty_background_bytes)
    - background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
    + if (dirty_param.dirty_background_bytes)
    + background = DIV_ROUND_UP(dirty_param.dirty_background_bytes,
    + PAGE_SIZE);
    else
    - background = (dirty_background_ratio * available_memory) / 100;
    + background = (dirty_param.dirty_background_ratio *
    + available_memory) / 100;

    if (background >= dirty)
    background = dirty / 2;
    @@ -493,9 +550,8 @@ static void balance_dirty_pages(struct address_space *mapping,
    .range_cyclic = 1,
    };

    - nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
    - global_page_state(NR_UNSTABLE_NFS);
    - nr_writeback = global_page_state(NR_WRITEBACK);
    + nr_reclaimable = reclaimable_pages();
    + nr_writeback = writeback_pages();

    global_dirty_limits(&background_thresh, &dirty_thresh);

    @@ -652,6 +708,7 @@ void throttle_vm_writeout(gfp_t gfp_mask)
    {
    unsigned long background_thresh;
    unsigned long dirty_thresh;
    + unsigned long dirty;

    for ( ; ; ) {
    global_dirty_limits(&background_thresh, &dirty_thresh);
    @@ -662,9 +719,9 @@ void throttle_vm_writeout(gfp_t gfp_mask)
    */
    dirty_thresh += dirty_thresh / 10; /* wheeee... */

    - if (global_page_state(NR_UNSTABLE_NFS) +
    - global_page_state(NR_WRITEBACK) <= dirty_thresh)
    - break;
    + dirty = dirty_writeback_pages();
    + if (dirty <= dirty_thresh)
    + break;
    congestion_wait(BLK_RW_ASYNC, HZ/10);

    /*
    --
    1.7.1


    \
     
     \ /
      Last update: 2010-10-19 02:47    [W:0.042 / U:60.636 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site