lkml.org 
[lkml]   [2024]   [Apr]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
SubjectRe: [PATCH net-next v4 6/7] page_pool: check for DMA sync shortcut earlier
From
From: Alexander Lobakin <aleksander.lobakin@intel.com>
Date: Tue, 23 Apr 2024 15:58:31 +0200

> We can save a couple more function calls in the Page Pool code if we
> check for dma_need_sync() earlier, just when we test pp->p.dma_sync.
> Move both these checks into an inline wrapper and call the PP wrapper
> over the generic DMA sync function only when both are true.
> You can't cache the result of dma_need_sync() in &page_pool, as it may
> change anytime if an SWIOTLB buffer is allocated or mapped.
>
> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
> ---
> net/core/page_pool.c | 31 +++++++++++++++++--------------
> 1 file changed, 17 insertions(+), 14 deletions(-)
>
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 6cf26a68fa91..87319c6365e0 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -398,16 +398,24 @@ static struct page *__page_pool_get_cached(struct page_pool *pool)
> return page;
> }
>
> -static void page_pool_dma_sync_for_device(const struct page_pool *pool,
> - const struct page *page,
> - unsigned int dma_sync_size)
> +static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
> + const struct page *page,
> + u32 dma_sync_size)
> {
> dma_addr_t dma_addr = page_pool_get_dma_addr(page);
>
> dma_sync_size = min(dma_sync_size, pool->p.max_len);
> - dma_sync_single_range_for_device(pool->p.dev, dma_addr,
> - pool->p.offset, dma_sync_size,
> - pool->p.dma_dir);
> + __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
> + dma_sync_size, pool->p.dma_dir);

Breh, this breaks builds on !DMA_NEED_SYNC systems, as this function
isn't defined there, and my CI didn't catch it in time... I'll ifdef
this in the next spin after some reviews for this one.

> +}
> +
> +static __always_inline void
> +page_pool_dma_sync_for_device(const struct page_pool *pool,
> + const struct page *page,
> + u32 dma_sync_size)
> +{
> + if (pool->dma_sync && dma_dev_need_sync(pool->p.dev))
> + __page_pool_dma_sync_for_device(pool, page, dma_sync_size);
> }
>
> static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
> @@ -429,8 +437,7 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
> if (page_pool_set_dma_addr(page, dma))
> goto unmap_failed;
>
> - if (pool->dma_sync)
> - page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
> + page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
>
> return true;
>
> @@ -699,9 +706,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page,
> if (likely(__page_pool_page_can_be_recycled(page))) {
> /* Read barrier done in page_ref_count / READ_ONCE */
>
> - if (pool->dma_sync)
> - page_pool_dma_sync_for_device(pool, page,
> - dma_sync_size);
> + page_pool_dma_sync_for_device(pool, page, dma_sync_size);
>
> if (allow_direct && page_pool_recycle_in_cache(page, pool))
> return NULL;
> @@ -840,9 +845,7 @@ static struct page *page_pool_drain_frag(struct page_pool *pool,
> return NULL;
>
> if (__page_pool_page_can_be_recycled(page)) {
> - if (pool->dma_sync)
> - page_pool_dma_sync_for_device(pool, page, -1);
> -
> + page_pool_dma_sync_for_device(pool, page, -1);
> return page;
> }

Thanks,
Olek

\
 
 \ /
  Last update: 2024-04-24 10:53    [W:0.087 / U:0.228 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site