lkml.org 
[lkml]   [2017]   [May]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.10 23/62] sh_eth: unmap DMA buffers when freeing rings
    Date
    4.10-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>


    [ Upstream commit 1debdc8f9ebd07daf140e417b3841596911e0066 ]

    The DMA API debugging (when enabled) causes:

    WARNING: CPU: 0 PID: 1445 at lib/dma-debug.c:519 add_dma_entry+0xe0/0x12c
    DMA-API: exceeded 7 overlapping mappings of cacheline 0x01b2974d

    to be printed after repeated initialization of the Ether device, e.g.
    suspend/resume or 'ifconfig' up/down. This is because DMA buffers mapped
    using dma_map_single() in sh_eth_ring_format() and sh_eth_start_xmit() are
    never unmapped. Resolve this problem by unmapping the buffers when freeing
    the descriptor rings; in order to do it right, we'd have to add an extra
    parameter to sh_eth_txfree() (we rename this function to sh_eth_tx_free(),
    while at it).

    Based on the commit a47b70ea86bd ("ravb: unmap descriptors when freeing
    rings").

    Signed-off-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
    Signed-off-by: David S. Miller <davem@davemloft.net>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    drivers/net/ethernet/renesas/sh_eth.c | 122 ++++++++++++++++++----------------
    1 file changed, 67 insertions(+), 55 deletions(-)

    --- a/drivers/net/ethernet/renesas/sh_eth.c
    +++ b/drivers/net/ethernet/renesas/sh_eth.c
    @@ -1061,12 +1061,70 @@ static struct mdiobb_ops bb_ops = {
    .get_mdio_data = sh_get_mdio,
    };

    +/* free Tx skb function */
    +static int sh_eth_tx_free(struct net_device *ndev, bool sent_only)
    +{
    + struct sh_eth_private *mdp = netdev_priv(ndev);
    + struct sh_eth_txdesc *txdesc;
    + int free_num = 0;
    + int entry;
    + bool sent;
    +
    + for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
    + entry = mdp->dirty_tx % mdp->num_tx_ring;
    + txdesc = &mdp->tx_ring[entry];
    + sent = !(txdesc->status & cpu_to_le32(TD_TACT));
    + if (sent_only && !sent)
    + break;
    + /* TACT bit must be checked before all the following reads */
    + dma_rmb();
    + netif_info(mdp, tx_done, ndev,
    + "tx entry %d status 0x%08x\n",
    + entry, le32_to_cpu(txdesc->status));
    + /* Free the original skb. */
    + if (mdp->tx_skbuff[entry]) {
    + dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
    + le32_to_cpu(txdesc->len) >> 16,
    + DMA_TO_DEVICE);
    + dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
    + mdp->tx_skbuff[entry] = NULL;
    + free_num++;
    + }
    + txdesc->status = cpu_to_le32(TD_TFP);
    + if (entry >= mdp->num_tx_ring - 1)
    + txdesc->status |= cpu_to_le32(TD_TDLE);
    +
    + if (sent) {
    + ndev->stats.tx_packets++;
    + ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
    + }
    + }
    + return free_num;
    +}
    +
    /* free skb and descriptor buffer */
    static void sh_eth_ring_free(struct net_device *ndev)
    {
    struct sh_eth_private *mdp = netdev_priv(ndev);
    int ringsize, i;

    + if (mdp->rx_ring) {
    + for (i = 0; i < mdp->num_rx_ring; i++) {
    + if (mdp->rx_skbuff[i]) {
    + struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
    +
    + dma_unmap_single(&ndev->dev,
    + le32_to_cpu(rxdesc->addr),
    + ALIGN(mdp->rx_buf_sz, 32),
    + DMA_FROM_DEVICE);
    + }
    + }
    + ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
    + dma_free_coherent(NULL, ringsize, mdp->rx_ring,
    + mdp->rx_desc_dma);
    + mdp->rx_ring = NULL;
    + }
    +
    /* Free Rx skb ringbuffer */
    if (mdp->rx_skbuff) {
    for (i = 0; i < mdp->num_rx_ring; i++)
    @@ -1075,27 +1133,18 @@ static void sh_eth_ring_free(struct net_
    kfree(mdp->rx_skbuff);
    mdp->rx_skbuff = NULL;

    - /* Free Tx skb ringbuffer */
    - if (mdp->tx_skbuff) {
    - for (i = 0; i < mdp->num_tx_ring; i++)
    - dev_kfree_skb(mdp->tx_skbuff[i]);
    - }
    - kfree(mdp->tx_skbuff);
    - mdp->tx_skbuff = NULL;
    -
    - if (mdp->rx_ring) {
    - ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
    - dma_free_coherent(NULL, ringsize, mdp->rx_ring,
    - mdp->rx_desc_dma);
    - mdp->rx_ring = NULL;
    - }
    -
    if (mdp->tx_ring) {
    + sh_eth_tx_free(ndev, false);
    +
    ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
    dma_free_coherent(NULL, ringsize, mdp->tx_ring,
    mdp->tx_desc_dma);
    mdp->tx_ring = NULL;
    }
    +
    + /* Free Tx skb ringbuffer */
    + kfree(mdp->tx_skbuff);
    + mdp->tx_skbuff = NULL;
    }

    /* format skb and descriptor buffer */
    @@ -1343,43 +1392,6 @@ static void sh_eth_dev_exit(struct net_d
    update_mac_address(ndev);
    }

    -/* free Tx skb function */
    -static int sh_eth_txfree(struct net_device *ndev)
    -{
    - struct sh_eth_private *mdp = netdev_priv(ndev);
    - struct sh_eth_txdesc *txdesc;
    - int free_num = 0;
    - int entry;
    -
    - for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
    - entry = mdp->dirty_tx % mdp->num_tx_ring;
    - txdesc = &mdp->tx_ring[entry];
    - if (txdesc->status & cpu_to_le32(TD_TACT))
    - break;
    - /* TACT bit must be checked before all the following reads */
    - dma_rmb();
    - netif_info(mdp, tx_done, ndev,
    - "tx entry %d status 0x%08x\n",
    - entry, le32_to_cpu(txdesc->status));
    - /* Free the original skb. */
    - if (mdp->tx_skbuff[entry]) {
    - dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr),
    - le32_to_cpu(txdesc->len) >> 16,
    - DMA_TO_DEVICE);
    - dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
    - mdp->tx_skbuff[entry] = NULL;
    - free_num++;
    - }
    - txdesc->status = cpu_to_le32(TD_TFP);
    - if (entry >= mdp->num_tx_ring - 1)
    - txdesc->status |= cpu_to_le32(TD_TDLE);
    -
    - ndev->stats.tx_packets++;
    - ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16;
    - }
    - return free_num;
    -}
    -
    /* Packet receive function */
    static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
    {
    @@ -1622,7 +1634,7 @@ ignore_link:
    intr_status, mdp->cur_tx, mdp->dirty_tx,
    (u32)ndev->state, edtrr);
    /* dirty buffer free */
    - sh_eth_txfree(ndev);
    + sh_eth_tx_free(ndev, true);

    /* SH7712 BUG */
    if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
    @@ -1681,7 +1693,7 @@ static irqreturn_t sh_eth_interrupt(int
    /* Clear Tx interrupts */
    sh_eth_write(ndev, intr_status & cd->tx_check, EESR);

    - sh_eth_txfree(ndev);
    + sh_eth_tx_free(ndev, true);
    netif_wake_queue(ndev);
    }

    @@ -2309,7 +2321,7 @@ static int sh_eth_start_xmit(struct sk_b

    spin_lock_irqsave(&mdp->lock, flags);
    if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
    - if (!sh_eth_txfree(ndev)) {
    + if (!sh_eth_tx_free(ndev, true)) {
    netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
    netif_stop_queue(ndev);
    spin_unlock_irqrestore(&mdp->lock, flags);

    \
     
     \ /
      Last update: 2017-05-01 23:50    [W:4.252 / U:0.088 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site