lkml.org 
[lkml]   [2017]   [Nov]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 6/8] staging: octeon: Remove USE_ASYNC_IOBDMA macro.
Date
Previous patch sets USE_ASYNC_IOBDMA to 1 unconditionally.  Remove
USE_ASYNC_IOBDMA from all if statements. Remove dead code caused by
the change.

Signed-off-by: David Daney <david.daney@cavium.com>
---
drivers/staging/octeon/ethernet-defines.h | 6 ---
drivers/staging/octeon/ethernet-rx.c | 25 ++++-----
drivers/staging/octeon/ethernet-tx.c | 85 ++++++++++---------------------
3 files changed, 37 insertions(+), 79 deletions(-)

diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index e898df25b87f..21438c804a43 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -10,10 +10,6 @@

/*
* A few defines are used to control the operation of this driver:
- * USE_ASYNC_IOBDMA
- * Use asynchronous IO access to hardware. This uses Octeon's asynchronous
- * IOBDMAs to issue IO accesses without stalling. Set this to zero
- * to disable this. Note that IOBDMAs require CVMSEG.
* REUSE_SKBUFFS_WITHOUT_FREE
* Allows the TX path to free an skbuff into the FPA hardware pool. This
* can significantly improve performance for forwarding and bridging, but
@@ -32,8 +28,6 @@
#define REUSE_SKBUFFS_WITHOUT_FREE 1
#endif

-#define USE_ASYNC_IOBDMA 1
-
/* Maximum number of SKBs to try to free per xmit packet. */
#define MAX_OUT_QUEUE_DEPTH 1000

diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 1a44291318ee..dd76c99d5ae0 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -201,11 +201,9 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
/* Prefetch cvm_oct_device since we know we need it soon */
prefetch(cvm_oct_device);

- if (USE_ASYNC_IOBDMA) {
- /* Save scratch in case userspace is using it */
- CVMX_SYNCIOBDMA;
- old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- }
+ /* Save scratch in case userspace is using it */
+ CVMX_SYNCIOBDMA;
+ old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);

/* Only allow work for our group (and preserve priorities) */
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
@@ -220,10 +218,8 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
BIT(rx_group->group));
}

- if (USE_ASYNC_IOBDMA) {
- cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
- did_work_request = 1;
- }
+ cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
+ did_work_request = 1;

while (rx_count < budget) {
struct sk_buff *skb = NULL;
@@ -232,7 +228,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
cvmx_wqe_t *work;
int port;

- if (USE_ASYNC_IOBDMA && did_work_request)
+ if (did_work_request)
work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
else
work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
@@ -260,7 +256,7 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
sizeof(void *));
prefetch(pskb);

- if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
+ if (rx_count < (budget - 1)) {
cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
CVMX_POW_NO_WAIT);
did_work_request = 1;
@@ -403,10 +399,9 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
}

- if (USE_ASYNC_IOBDMA) {
- /* Restore the scratch area */
- cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
- }
+ /* Restore the scratch area */
+ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
+
cvm_oct_rx_refill_pool(0);

return rx_count;
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 31f35025d19e..2eede0907924 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -179,23 +179,18 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
qos = 0;
}

- if (USE_ASYNC_IOBDMA) {
- /* Save scratch in case userspace is using it */
- CVMX_SYNCIOBDMA;
- old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
-
- /*
- * Fetch and increment the number of packets to be
- * freed.
- */
- cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
- FAU_NUM_PACKET_BUFFERS_TO_FREE,
- 0);
- cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
- priv->fau + qos * 4,
- MAX_SKB_TO_FREE);
- }
+ /* Save scratch in case userspace is using it */
+ CVMX_SYNCIOBDMA;
+ old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
+
+ /* Fetch and increment the number of packets to be freed. */
+ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
+ FAU_NUM_PACKET_BUFFERS_TO_FREE,
+ 0);
+ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
+ priv->fau + qos * 4,
+ MAX_SKB_TO_FREE);

/*
* We have space for 6 segment pointers, If there will be more
@@ -204,22 +199,11 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
if (unlikely(__skb_linearize(skb))) {
queue_type = QUEUE_DROP;
- if (USE_ASYNC_IOBDMA) {
- /*
- * Get the number of skbuffs in use
- * by the hardware
- */
- CVMX_SYNCIOBDMA;
- skb_to_free =
- cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- } else {
- /*
- * Get the number of skbuffs in use
- * by the hardware
- */
- skb_to_free = cvmx_fau_fetch_and_add32(
- priv->fau + qos * 4, MAX_SKB_TO_FREE);
- }
+ /* Get the number of skbuffs in use by the
+ * hardware
+ */
+ CVMX_SYNCIOBDMA;
+ skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
priv->fau +
qos * 4);
@@ -387,18 +371,10 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
}

- if (USE_ASYNC_IOBDMA) {
- /* Get the number of skbuffs in use by the hardware */
- CVMX_SYNCIOBDMA;
- skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
- } else {
- /* Get the number of skbuffs in use by the hardware */
- skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
- MAX_SKB_TO_FREE);
- buffers_to_free =
- cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
- }
+ /* Get the number of skbuffs in use by the hardware */
+ CVMX_SYNCIOBDMA;
+ skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);

skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
priv->fau + qos * 4);
@@ -416,9 +392,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
queue_type = QUEUE_HW;
}
- if (USE_ASYNC_IOBDMA)
- cvmx_fau_async_fetch_and_add32(
- CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
+ cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);

spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);

@@ -488,16 +462,11 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb_any(t);
}

- if (USE_ASYNC_IOBDMA) {
- CVMX_SYNCIOBDMA;
- total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
- /* Restore the scratch area */
- cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
- cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
- } else {
- total_to_clean = cvmx_fau_fetch_and_add32(
- FAU_TOTAL_TX_TO_CLEAN, 1);
- }
+ CVMX_SYNCIOBDMA;
+ total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
+ /* Restore the scratch area */
+ cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
+ cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);

if (total_to_clean & 0x3ff) {
/*
--
2.13.6
\
 
 \ /
  Last update: 2017-11-09 01:53    [W:0.056 / U:1.276 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site