lkml.org 
[lkml]   [2024]   [May]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH net-next v3 10/13] mm: page_frag: introduce prepare/probe/commit API
On Wed, 8 May 2024, Yunsheng Lin wrote:

> There are many use cases that need minimum memory in order
> for forward progressing, but more performant if more memory
> is available or need to probe the cache info to use any
> memory available for frag caoleasing reason.
>
> Currently skb_page_frag_refill() API is used to solve the
> above usecases, caller need to know about the internal detail
> and access the data field of 'struct page_frag' to meet the
> requirement of the above use cases and its implementation is
> similar to the one in mm subsystem.
>
> To unify those two page_frag implementations, introduce a
> prepare API to ensure minimum memory is satisfied and return
> how much the actual memory is available to the caller and a
> probe API to report the current available memory to caller
> without doing cache refilling. The caller needs to either call
> the commit API to report how much memory it actually uses, or
> not do so if deciding to not use any memory.
>
> As next patch is about to replace 'struct page_frag' with
> 'struct page_frag_cache' in linux/sched.h, which is included
> by the asm-offsets.s, using the virt_to_page() in the inline
> helper of page_frag_cache.h cause a "???vmemmap??? undeclared"
> compiling error for asm-offsets.s, use a macro for probe API
> to avoid that compiling error.
>
> CC: Alexander Duyck <alexander.duyck@gmail.com>
> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
> ---
> include/linux/page_frag_cache.h | 86 ++++++++++++++++++++++++
> mm/page_frag_cache.c | 113 ++++++++++++++++++++++++++++++++
> 2 files changed, 199 insertions(+)
>
> diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h
> index 88e91ee57b91..30893638155b 100644
> --- a/include/linux/page_frag_cache.h
> +++ b/include/linux/page_frag_cache.h
> @@ -71,6 +71,21 @@ static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc)
> return encoded_page_pfmemalloc(nc->encoded_va);
> }
>
> +static inline unsigned int page_frag_cache_page_size(struct encoded_va *encoded_va)
> +{
> +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> + return PAGE_SIZE << encoded_page_order(encoded_va);
> +#else
> + return PAGE_SIZE;
> +#endif
> +}
> +
> +static inline unsigned int __page_frag_cache_page_offset(struct encoded_va *encoded_va,
> + unsigned int remaining)
> +{
> + return page_frag_cache_page_size(encoded_va) - remaining;
> +}
> +
> void page_frag_cache_drain(struct page_frag_cache *nc);
> void __page_frag_cache_drain(struct page *page, unsigned int count);
> void *__page_frag_alloc_va_align(struct page_frag_cache *nc,
> @@ -85,12 +100,83 @@ static inline void *page_frag_alloc_va_align(struct page_frag_cache *nc,
> return __page_frag_alloc_va_align(nc, fragsz, gfp_mask, -align);
> }
>
> +static inline unsigned int page_frag_cache_page_offset(const struct page_frag_cache *nc)
> +{
> + return __page_frag_cache_page_offset(nc->encoded_va, nc->remaining);
> +}
> +
> static inline void *page_frag_alloc_va(struct page_frag_cache *nc,
> unsigned int fragsz, gfp_t gfp_mask)
> {
> return __page_frag_alloc_va_align(nc, fragsz, gfp_mask, ~0u);
> }
>
> +void *page_frag_alloc_va_prepare(struct page_frag_cache *nc, unsigned int *fragsz,
> + gfp_t gfp);
> +
> +static inline void *page_frag_alloc_va_prepare_align(struct page_frag_cache *nc,
> + unsigned int *fragsz,
> + gfp_t gfp,
> + unsigned int align)
> +{
> + WARN_ON_ONCE(!is_power_of_2(align) || align > PAGE_SIZE);
> + nc->remaining = nc->remaining & -align;
> + return page_frag_alloc_va_prepare(nc, fragsz, gfp);
> +}
> +
> +struct page *page_frag_alloc_pg_prepare(struct page_frag_cache *nc,
> + unsigned int *offset,
> + unsigned int *fragsz, gfp_t gfp);
> +
> +struct page *page_frag_alloc_prepare(struct page_frag_cache *nc,
> + unsigned int *offset,
> + unsigned int *fragsz,
> + void **va, gfp_t gfp);
> +
> +static inline struct encoded_va *__page_frag_alloc_probe(struct page_frag_cache *nc,
> + unsigned int *offset,
> + unsigned int *fragsz,
> + void **va)
> +{
> + struct encoded_va *encoded_va;
> +
> + *fragsz = nc->remaining;
> + encoded_va = nc->encoded_va;
> + *offset = __page_frag_cache_page_offset(encoded_va, *fragsz);
> + *va = encoded_page_address(encoded_va) + *offset;
> +
> + return encoded_va;
> +}
> +
> +#define page_frag_alloc_probe(nc, offset, fragsz, va) \
> +({ \
> + struct encoded_va *__encoded_va; \
> + struct page *__page = NULL; \
> + \

Hi Yunsheng -

I made this suggestion for patch 13 (documentation), but want to clarify
my request here:

> + if (likely((nc)->remaining)) \

I think it would be more useful to change this line to

if ((nc)->remaining >= *fragsz)

That way the caller can use this function to "probe" for a specific amount
of available space, rather than "nonzero" space. If the caller wants to
check for available space, they can set *fragsz = 1.

In other words, I think the functionality you described in the
documentation is better and the code should be changed to match!

- Mat

> + __page = virt_to_page(__page_frag_alloc_probe(nc, \
> + offset, \
> + fragsz, \
> + va)); \
> + \
> + __page; \
> +})
> +
> +static inline void page_frag_alloc_commit(struct page_frag_cache *nc,
> + unsigned int fragsz)
> +{
> + VM_BUG_ON(fragsz > nc->remaining || !nc->pagecnt_bias);
> + nc->pagecnt_bias--;
> + nc->remaining -= fragsz;
> +}
> +
> +static inline void page_frag_alloc_commit_noref(struct page_frag_cache *nc,
> + unsigned int fragsz)
> +{
> + VM_BUG_ON(fragsz > nc->remaining);
> + nc->remaining -= fragsz;
> +}
> +
> void page_frag_free_va(void *addr);
>
> #endif


\
 
 \ /
  Last update: 2024-05-27 18:24    [W:0.134 / U:0.500 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site