lkml.org 
[lkml]   [2023]   [Apr]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
SubjectRe: [PATCHv9 02/14] mm: Add support for unaccepted memory
From
On 4/3/23 12:02, Kirill A. Shutemov wrote:
> On Mon, Apr 03, 2023 at 11:26:53AM +0200, Vlastimil Babka wrote:
>> > Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
>> > Acked-by: Mike Rapoport <rppt@linux.ibm.com> # memblock
>>
>> Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
>
> Thanks!
>
>> Just a small suggestion below:
>>
>> > +
>> > +static bool try_to_accept_memory(struct zone *zone, unsigned int order)
>> > +{
>> > + long to_accept;
>> > + int ret = false;
>> > +
>> > + if (!static_branch_unlikely(&zones_with_unaccepted_pages))
>> > + return false;
>>
>>
>> This potentially (depends on what compiler decides) means we'll call this
>> function just to skip the static branch. OTOH forcing it as inline would be
>> wasteful too. So I'd split that away and make the callers do that static
>> branch check inline. Just as deferred_pages_enabled() is used.
>
> Like this?

Yep.

> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 07e16e9b49c4..80fe5e4b6cca 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -390,6 +390,7 @@ EXPORT_SYMBOL(nr_online_nodes);
> static bool page_contains_unaccepted(struct page *page, unsigned int order);
> static void accept_page(struct page *page, unsigned int order);
> static bool try_to_accept_memory(struct zone *zone, unsigned int order);
> +static inline bool has_unaccepted_memory(void);
> static bool __free_unaccepted(struct page *page);
>
> int page_group_by_mobility_disabled __read_mostly;
> @@ -3464,8 +3465,10 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
> gfp_mask)) {
> int ret;
>
> - if (try_to_accept_memory(zone, order))
> - goto try_this_zone;
> + if (has_unaccepted_memory()) {
> + if (try_to_accept_memory(zone, order))
> + goto try_this_zone;
> + }
>
> #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
> /*
> @@ -3519,8 +3522,10 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
>
> return page;
> } else {
> - if (try_to_accept_memory(zone, order))
> - goto try_this_zone;
> + if (has_unaccepted_memory()) {
> + if (try_to_accept_memory(zone, order))
> + goto try_this_zone;
> + }
>
> #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
> /* Try again if zone has deferred pages */
> @@ -7302,9 +7307,6 @@ static bool try_to_accept_memory(struct zone *zone, unsigned int order)
> long to_accept;
> int ret = false;
>
> - if (!static_branch_unlikely(&zones_with_unaccepted_pages))
> - return false;
> -
> /* How much to accept to get to high watermark? */
> to_accept = high_wmark_pages(zone) -
> (zone_page_state(zone, NR_FREE_PAGES) -
> @@ -7321,6 +7323,11 @@ static bool try_to_accept_memory(struct zone *zone, unsigned int order)
> return ret;
> }
>
> +static inline bool has_unaccepted_memory(void)
> +{
> + return static_branch_unlikely(&zones_with_unaccepted_pages);
> +}
> +
> static bool __free_unaccepted(struct page *page)
> {
> struct zone *zone = page_zone(page);
> @@ -7398,6 +7405,11 @@ static bool try_to_accept_memory(struct zone *zone, unsigned int order)
> return false;
> }
>
> +static inline bool has_unaccepted_memory(void)
> +{
> + return false;
> +}
> +
> static bool __free_unaccepted(struct page *page)
> {
> BUILD_BUG();

\
 
 \ /
  Last update: 2023-04-03 15:08    [W:0.064 / U:0.368 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site