lkml.org 
[lkml]   [2020]   [Jun]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 4/4] xsk: use dma_need_sync instead of reimplenting it
Date
Use the dma_need_sync helper instead of (not always entirely correctly)
poking into the dma-mapping internals.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
net/xdp/xsk_buff_pool.c | 50 +++--------------------------------------
1 file changed, 3 insertions(+), 47 deletions(-)

diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 6733e2c59e4835..08b80669f64955 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -2,9 +2,6 @@

#include <net/xsk_buff_pool.h>
#include <net/xdp_sock.h>
-#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/swiotlb.h>

#include "xsk_queue.h"

@@ -124,48 +121,6 @@ static void xp_check_dma_contiguity(struct xsk_buff_pool *pool)
}
}

-static bool __maybe_unused xp_check_swiotlb_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_SWIOTLB)
- phys_addr_t paddr;
- u32 i;
-
- for (i = 0; i < pool->dma_pages_cnt; i++) {
- paddr = dma_to_phys(pool->dev, pool->dma_pages[i]);
- if (is_swiotlb_buffer(paddr))
- return false;
- }
-#endif
- return true;
-}
-
-static bool xp_check_cheap_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_HAS_DMA)
- const struct dma_map_ops *ops = get_dma_ops(pool->dev);
-
- if (ops) {
- return !ops->sync_single_for_cpu &&
- !ops->sync_single_for_device;
- }
-
- if (!dma_is_direct(ops))
- return false;
-
- if (!xp_check_swiotlb_dma(pool))
- return false;
-
- if (!dev_is_dma_coherent(pool->dev)) {
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
- return false;
-#endif
- }
-#endif
- return true;
-}
-
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
unsigned long attrs, struct page **pages, u32 nr_pages)
{
@@ -179,6 +134,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,

pool->dev = dev;
pool->dma_pages_cnt = nr_pages;
+ pool->dma_need_sync = false;

for (i = 0; i < pool->dma_pages_cnt; i++) {
dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
@@ -187,13 +143,13 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
xp_dma_unmap(pool, attrs);
return -ENOMEM;
}
+ if (dma_need_sync(dev, dma))
+ pool->dma_need_sync = true;
pool->dma_pages[i] = dma;
}

if (pool->unaligned)
xp_check_dma_contiguity(pool);
-
- pool->dma_need_sync = !xp_check_cheap_dma(pool);
return 0;
}
EXPORT_SYMBOL(xp_dma_map);
--
2.26.2
\
 
 \ /
  Last update: 2020-06-29 21:03    [W:0.071 / U:31.684 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site