lkml.org 
[lkml]   [2017]   [Jun]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.9 065/108] ravb: unmap descriptors when freeing rings
    Date
    4.9-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com>


    [ Upstream commit a47b70ea86bdeb3091341f5ae3ef580f1a1ad822 ]

    "swiotlb buffer is full" errors occur after repeated initialisation of a
    device - f.e. suspend/resume or ip link set up/down. This is because memory
    mapped using dma_map_single() in ravb_ring_format() and ravb_start_xmit()
    is not released. Resolve this problem by unmapping descriptors when
    freeing rings.

    Fixes: c156633f1353 ("Renesas Ethernet AVB driver proper")
    Signed-off-by: Kazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com>
    [simon: reworked]
    Signed-off-by: Simon Horman <horms+renesas@verge.net.au>
    Acked-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
    Signed-off-by: David S. Miller <davem@davemloft.net>

    Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    drivers/net/ethernet/renesas/ravb_main.c | 112 +++++++++++++++++--------------
    1 file changed, 64 insertions(+), 48 deletions(-)

    --- a/drivers/net/ethernet/renesas/ravb_main.c
    +++ b/drivers/net/ethernet/renesas/ravb_main.c
    @@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = {
    .get_mdio_data = ravb_get_mdio_data,
    };

    +/* Free TX skb function for AVB-IP */
    +static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
    +{
    + struct ravb_private *priv = netdev_priv(ndev);
    + struct net_device_stats *stats = &priv->stats[q];
    + struct ravb_tx_desc *desc;
    + int free_num = 0;
    + int entry;
    + u32 size;
    +
    + for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
    + bool txed;
    +
    + entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
    + NUM_TX_DESC);
    + desc = &priv->tx_ring[q][entry];
    + txed = desc->die_dt == DT_FEMPTY;
    + if (free_txed_only && !txed)
    + break;
    + /* Descriptor type must be checked before all other reads */
    + dma_rmb();
    + size = le16_to_cpu(desc->ds_tagl) & TX_DS;
    + /* Free the original skb. */
    + if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
    + dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
    + size, DMA_TO_DEVICE);
    + /* Last packet descriptor? */
    + if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
    + entry /= NUM_TX_DESC;
    + dev_kfree_skb_any(priv->tx_skb[q][entry]);
    + priv->tx_skb[q][entry] = NULL;
    + if (txed)
    + stats->tx_packets++;
    + }
    + free_num++;
    + }
    + if (txed)
    + stats->tx_bytes += size;
    + desc->die_dt = DT_EEMPTY;
    + }
    + return free_num;
    +}
    +
    /* Free skb's and DMA buffers for Ethernet AVB */
    static void ravb_ring_free(struct net_device *ndev, int q)
    {
    @@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_de
    kfree(priv->rx_skb[q]);
    priv->rx_skb[q] = NULL;

    - /* Free TX skb ringbuffer */
    - if (priv->tx_skb[q]) {
    - for (i = 0; i < priv->num_tx_ring[q]; i++)
    - dev_kfree_skb(priv->tx_skb[q][i]);
    - }
    - kfree(priv->tx_skb[q]);
    - priv->tx_skb[q] = NULL;
    -
    /* Free aligned TX buffers */
    kfree(priv->tx_align[q]);
    priv->tx_align[q] = NULL;

    if (priv->rx_ring[q]) {
    + for (i = 0; i < priv->num_rx_ring[q]; i++) {
    + struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
    +
    + if (!dma_mapping_error(ndev->dev.parent,
    + le32_to_cpu(desc->dptr)))
    + dma_unmap_single(ndev->dev.parent,
    + le32_to_cpu(desc->dptr),
    + PKT_BUF_SZ,
    + DMA_FROM_DEVICE);
    + }
    ring_size = sizeof(struct ravb_ex_rx_desc) *
    (priv->num_rx_ring[q] + 1);
    dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
    @@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_de
    }

    if (priv->tx_ring[q]) {
    + ravb_tx_free(ndev, q, false);
    +
    ring_size = sizeof(struct ravb_tx_desc) *
    (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
    dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
    priv->tx_desc_dma[q]);
    priv->tx_ring[q] = NULL;
    }
    +
    + /* Free TX skb ringbuffer.
    + * SKBs are freed by ravb_tx_free() call above.
    + */
    + kfree(priv->tx_skb[q]);
    + priv->tx_skb[q] = NULL;
    }

    /* Format skb and descriptor buffer for Ethernet AVB */
    @@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_dev
    return 0;
    }

    -/* Free TX skb function for AVB-IP */
    -static int ravb_tx_free(struct net_device *ndev, int q)
    -{
    - struct ravb_private *priv = netdev_priv(ndev);
    - struct net_device_stats *stats = &priv->stats[q];
    - struct ravb_tx_desc *desc;
    - int free_num = 0;
    - int entry;
    - u32 size;
    -
    - for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
    - entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
    - NUM_TX_DESC);
    - desc = &priv->tx_ring[q][entry];
    - if (desc->die_dt != DT_FEMPTY)
    - break;
    - /* Descriptor type must be checked before all other reads */
    - dma_rmb();
    - size = le16_to_cpu(desc->ds_tagl) & TX_DS;
    - /* Free the original skb. */
    - if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
    - dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
    - size, DMA_TO_DEVICE);
    - /* Last packet descriptor? */
    - if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
    - entry /= NUM_TX_DESC;
    - dev_kfree_skb_any(priv->tx_skb[q][entry]);
    - priv->tx_skb[q][entry] = NULL;
    - stats->tx_packets++;
    - }
    - free_num++;
    - }
    - stats->tx_bytes += size;
    - desc->die_dt = DT_EEMPTY;
    - }
    - return free_num;
    -}
    -
    static void ravb_get_tx_tstamp(struct net_device *ndev)
    {
    struct ravb_private *priv = netdev_priv(ndev);
    @@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct
    spin_lock_irqsave(&priv->lock, flags);
    /* Clear TX interrupt */
    ravb_write(ndev, ~mask, TIS);
    - ravb_tx_free(ndev, q);
    + ravb_tx_free(ndev, q, true);
    netif_wake_subqueue(ndev, q);
    mmiowb();
    spin_unlock_irqrestore(&priv->lock, flags);
    @@ -1571,7 +1586,8 @@ static netdev_tx_t ravb_start_xmit(struc

    priv->cur_tx[q] += NUM_TX_DESC;
    if (priv->cur_tx[q] - priv->dirty_tx[q] >
    - (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
    + (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
    + !ravb_tx_free(ndev, q, true))
    netif_stop_subqueue(ndev, q);

    exit:

    \
     
     \ /
      Last update: 2017-06-15 20:13    [W:4.042 / U:0.040 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site