lkml.org 
[lkml]   [2008]   [Jun]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 1/2] hugetlb reservations: move region tracking earlier
    Date
    Move the region tracking code much earlier so we can use it for page
    presence tracking later on. No code is changed, just its location.

    Signed-off-by: Andy Whitcroft <apw@shadowen.org>
    ---
    mm/hugetlb.c | 246 +++++++++++++++++++++++++++++----------------------------
    1 files changed, 125 insertions(+), 121 deletions(-)

    diff --git a/mm/hugetlb.c b/mm/hugetlb.c
    index 0f76ed1..d701e39 100644
    --- a/mm/hugetlb.c
    +++ b/mm/hugetlb.c
    @@ -47,6 +47,131 @@ static unsigned long __initdata default_hstate_size;
    static DEFINE_SPINLOCK(hugetlb_lock);

    /*
    + * Region tracking -- allows tracking of reservations and instantiated pages
    + * across the pages in a mapping.
    + */
    +struct file_region {
    + struct list_head link;
    + long from;
    + long to;
    +};
    +
    +static long region_add(struct list_head *head, long f, long t)
    +{
    + struct file_region *rg, *nrg, *trg;
    +
    + /* Locate the region we are either in or before. */
    + list_for_each_entry(rg, head, link)
    + if (f <= rg->to)
    + break;
    +
    + /* Round our left edge to the current segment if it encloses us. */
    + if (f > rg->from)
    + f = rg->from;
    +
    + /* Check for and consume any regions we now overlap with. */
    + nrg = rg;
    + list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
    + if (&rg->link == head)
    + break;
    + if (rg->from > t)
    + break;
    +
    + /* If this area reaches higher then extend our area to
    + * include it completely. If this is not the first area
    + * which we intend to reuse, free it. */
    + if (rg->to > t)
    + t = rg->to;
    + if (rg != nrg) {
    + list_del(&rg->link);
    + kfree(rg);
    + }
    + }
    + nrg->from = f;
    + nrg->to = t;
    + return 0;
    +}
    +
    +static long region_chg(struct list_head *head, long f, long t)
    +{
    + struct file_region *rg, *nrg;
    + long chg = 0;
    +
    + /* Locate the region we are before or in. */
    + list_for_each_entry(rg, head, link)
    + if (f <= rg->to)
    + break;
    +
    + /* If we are below the current region then a new region is required.
    + * Subtle, allocate a new region at the position but make it zero
    + * size such that we can guarantee to record the reservation. */
    + if (&rg->link == head || t < rg->from) {
    + nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
    + if (!nrg)
    + return -ENOMEM;
    + nrg->from = f;
    + nrg->to = f;
    + INIT_LIST_HEAD(&nrg->link);
    + list_add(&nrg->link, rg->link.prev);
    +
    + return t - f;
    + }
    +
    + /* Round our left edge to the current segment if it encloses us. */
    + if (f > rg->from)
    + f = rg->from;
    + chg = t - f;
    +
    + /* Check for and consume any regions we now overlap with. */
    + list_for_each_entry(rg, rg->link.prev, link) {
    + if (&rg->link == head)
    + break;
    + if (rg->from > t)
    + return chg;
    +
    + /* We overlap with this area, if it extends futher than
    + * us then we must extend ourselves. Account for its
    + * existing reservation. */
    + if (rg->to > t) {
    + chg += rg->to - t;
    + t = rg->to;
    + }
    + chg -= rg->to - rg->from;
    + }
    + return chg;
    +}
    +
    +static long region_truncate(struct list_head *head, long end)
    +{
    + struct file_region *rg, *trg;
    + long chg = 0;
    +
    + /* Locate the region we are either in or before. */
    + list_for_each_entry(rg, head, link)
    + if (end <= rg->to)
    + break;
    + if (&rg->link == head)
    + return 0;
    +
    + /* If we are in the middle of a region then adjust it. */
    + if (end > rg->from) {
    + chg = rg->to - end;
    + rg->to = end;
    + rg = list_entry(rg->link.next, typeof(*rg), link);
    + }
    +
    + /* Drop any remaining regions. */
    + list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
    + if (&rg->link == head)
    + break;
    + chg += rg->to - rg->from;
    + list_del(&rg->link);
    + kfree(rg);
    + }
    + return chg;
    +}
    +
    +/*
    * Convert the address within this vma to the page offset within
    * the mapping, in base page units.
    */
    @@ -649,127 +774,6 @@ static void return_unused_surplus_pages(struct hstate *h,
    }
    }

    -struct file_region {
    - struct list_head link;
    - long from;
    - long to;
    -};
    -
    -static long region_add(struct list_head *head, long f, long t)
    -{
    - struct file_region *rg, *nrg, *trg;
    -
    - /* Locate the region we are either in or before. */
    - list_for_each_entry(rg, head, link)
    - if (f <= rg->to)
    - break;
    -
    - /* Round our left edge to the current segment if it encloses us. */
    - if (f > rg->from)
    - f = rg->from;
    -
    - /* Check for and consume any regions we now overlap with. */
    - nrg = rg;
    - list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
    - if (&rg->link == head)
    - break;
    - if (rg->from > t)
    - break;
    -
    - /* If this area reaches higher then extend our area to
    - * include it completely. If this is not the first area
    - * which we intend to reuse, free it. */
    - if (rg->to > t)
    - t = rg->to;
    - if (rg != nrg) {
    - list_del(&rg->link);
    - kfree(rg);
    - }
    - }
    - nrg->from = f;
    - nrg->to = t;
    - return 0;
    -}
    -
    -static long region_chg(struct list_head *head, long f, long t)
    -{
    - struct file_region *rg, *nrg;
    - long chg = 0;
    -
    - /* Locate the region we are before or in. */
    - list_for_each_entry(rg, head, link)
    - if (f <= rg->to)
    - break;
    -
    - /* If we are below the current region then a new region is required.
    - * Subtle, allocate a new region at the position but make it zero
    - * size such that we can guarantee to record the reservation. */
    - if (&rg->link == head || t < rg->from) {
    - nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
    - if (!nrg)
    - return -ENOMEM;
    - nrg->from = f;
    - nrg->to = f;
    - INIT_LIST_HEAD(&nrg->link);
    - list_add(&nrg->link, rg->link.prev);
    -
    - return t - f;
    - }
    -
    - /* Round our left edge to the current segment if it encloses us. */
    - if (f > rg->from)
    - f = rg->from;
    - chg = t - f;
    -
    - /* Check for and consume any regions we now overlap with. */
    - list_for_each_entry(rg, rg->link.prev, link) {
    - if (&rg->link == head)
    - break;
    - if (rg->from > t)
    - return chg;
    -
    - /* We overlap with this area, if it extends futher than
    - * us then we must extend ourselves. Account for its
    - * existing reservation. */
    - if (rg->to > t) {
    - chg += rg->to - t;
    - t = rg->to;
    - }
    - chg -= rg->to - rg->from;
    - }
    - return chg;
    -}
    -
    -static long region_truncate(struct list_head *head, long end)
    -{
    - struct file_region *rg, *trg;
    - long chg = 0;
    -
    - /* Locate the region we are either in or before. */
    - list_for_each_entry(rg, head, link)
    - if (end <= rg->to)
    - break;
    - if (&rg->link == head)
    - return 0;
    -
    - /* If we are in the middle of a region then adjust it. */
    - if (end > rg->from) {
    - chg = rg->to - end;
    - rg->to = end;
    - rg = list_entry(rg->link.next, typeof(*rg), link);
    - }
    -
    - /* Drop any remaining regions. */
    - list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
    - if (&rg->link == head)
    - break;
    - chg += rg->to - rg->from;
    - list_del(&rg->link);
    - kfree(rg);
    - }
    - return chg;
    -}
    -
    /*
    * Determine if the huge page at addr within the vma has an associated
    * reservation. Where it does not we will need to logically increase
    --
    1.5.6.205.g7ca3a


    \
     
     \ /
      Last update: 2008-06-20 21:25    [W:4.163 / U:0.048 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site