lkml.org 
[lkml]   [2017]   [Jun]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 02/44] ibmveth: properly unwind on init errors
    Date
    That way the driver doesn't have to rely on DMA_ERROR_CODE, which
    is not a public API and going away.

    Signed-off-by: Christoph Hellwig <hch@lst.de>
    Acked-by: David S. Miller <davem@davemloft.net>
    ---
    drivers/net/ethernet/ibm/ibmveth.c | 159 +++++++++++++++++--------------------
    1 file changed, 74 insertions(+), 85 deletions(-)

    diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
    index 72ab7b6bf20b..3ac27f59e595 100644
    --- a/drivers/net/ethernet/ibm/ibmveth.c
    +++ b/drivers/net/ethernet/ibm/ibmveth.c
    @@ -467,56 +467,6 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
    }
    }

    -static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
    -{
    - int i;
    - struct device *dev = &adapter->vdev->dev;
    -
    - if (adapter->buffer_list_addr != NULL) {
    - if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
    - dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
    - DMA_BIDIRECTIONAL);
    - adapter->buffer_list_dma = DMA_ERROR_CODE;
    - }
    - free_page((unsigned long)adapter->buffer_list_addr);
    - adapter->buffer_list_addr = NULL;
    - }
    -
    - if (adapter->filter_list_addr != NULL) {
    - if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
    - dma_unmap_single(dev, adapter->filter_list_dma, 4096,
    - DMA_BIDIRECTIONAL);
    - adapter->filter_list_dma = DMA_ERROR_CODE;
    - }
    - free_page((unsigned long)adapter->filter_list_addr);
    - adapter->filter_list_addr = NULL;
    - }
    -
    - if (adapter->rx_queue.queue_addr != NULL) {
    - dma_free_coherent(dev, adapter->rx_queue.queue_len,
    - adapter->rx_queue.queue_addr,
    - adapter->rx_queue.queue_dma);
    - adapter->rx_queue.queue_addr = NULL;
    - }
    -
    - for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
    - if (adapter->rx_buff_pool[i].active)
    - ibmveth_free_buffer_pool(adapter,
    - &adapter->rx_buff_pool[i]);
    -
    - if (adapter->bounce_buffer != NULL) {
    - if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
    - dma_unmap_single(&adapter->vdev->dev,
    - adapter->bounce_buffer_dma,
    - adapter->netdev->mtu + IBMVETH_BUFF_OH,
    - DMA_BIDIRECTIONAL);
    - adapter->bounce_buffer_dma = DMA_ERROR_CODE;
    - }
    - kfree(adapter->bounce_buffer);
    - adapter->bounce_buffer = NULL;
    - }
    -}
    -
    static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
    union ibmveth_buf_desc rxq_desc, u64 mac_address)
    {
    @@ -573,14 +523,17 @@ static int ibmveth_open(struct net_device *netdev)
    for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
    rxq_entries += adapter->rx_buff_pool[i].size;

    + rc = -ENOMEM;
    adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
    - adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
    + if (!adapter->buffer_list_addr) {
    + netdev_err(netdev, "unable to allocate list pages\n");
    + goto out;
    + }

    - if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
    - netdev_err(netdev, "unable to allocate filter or buffer list "
    - "pages\n");
    - rc = -ENOMEM;
    - goto err_out;
    + adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
    + if (!adapter->filter_list_addr) {
    + netdev_err(netdev, "unable to allocate filter pages\n");
    + goto out_free_buffer_list;
    }

    dev = &adapter->vdev->dev;
    @@ -590,22 +543,21 @@ static int ibmveth_open(struct net_device *netdev)
    adapter->rx_queue.queue_addr =
    dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
    &adapter->rx_queue.queue_dma, GFP_KERNEL);
    - if (!adapter->rx_queue.queue_addr) {
    - rc = -ENOMEM;
    - goto err_out;
    - }
    + if (!adapter->rx_queue.queue_addr)
    + goto out_free_filter_list;

    adapter->buffer_list_dma = dma_map_single(dev,
    adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
    + if (dma_mapping_error(dev, adapter->buffer_list_dma)) {
    + netdev_err(netdev, "unable to map buffer list pages\n");
    + goto out_free_queue_mem;
    + }
    +
    adapter->filter_list_dma = dma_map_single(dev,
    adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
    -
    - if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
    - (dma_mapping_error(dev, adapter->filter_list_dma))) {
    - netdev_err(netdev, "unable to map filter or buffer list "
    - "pages\n");
    - rc = -ENOMEM;
    - goto err_out;
    + if (dma_mapping_error(dev, adapter->filter_list_dma)) {
    + netdev_err(netdev, "unable to map filter list pages\n");
    + goto out_unmap_buffer_list;
    }

    adapter->rx_queue.index = 0;
    @@ -636,7 +588,7 @@ static int ibmveth_open(struct net_device *netdev)
    rxq_desc.desc,
    mac_address);
    rc = -ENONET;
    - goto err_out;
    + goto out_unmap_filter_list;
    }

    for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
    @@ -646,7 +598,7 @@ static int ibmveth_open(struct net_device *netdev)
    netdev_err(netdev, "unable to alloc pool\n");
    adapter->rx_buff_pool[i].active = 0;
    rc = -ENOMEM;
    - goto err_out;
    + goto out_free_buffer_pools;
    }
    }

    @@ -660,22 +612,21 @@ static int ibmveth_open(struct net_device *netdev)
    lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
    } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));

    - goto err_out;
    + goto out_free_buffer_pools;
    }

    + rc = -ENOMEM;
    adapter->bounce_buffer =
    kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
    - if (!adapter->bounce_buffer) {
    - rc = -ENOMEM;
    - goto err_out_free_irq;
    - }
    + if (!adapter->bounce_buffer)
    + goto out_free_irq;
    +
    adapter->bounce_buffer_dma =
    dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
    netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
    if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
    netdev_err(netdev, "unable to map bounce buffer\n");
    - rc = -ENOMEM;
    - goto err_out_free_irq;
    + goto out_free_bounce_buffer;
    }

    netdev_dbg(netdev, "initial replenish cycle\n");
    @@ -687,10 +638,31 @@ static int ibmveth_open(struct net_device *netdev)

    return 0;

    -err_out_free_irq:
    +out_free_bounce_buffer:
    + kfree(adapter->bounce_buffer);
    +out_free_irq:
    free_irq(netdev->irq, netdev);
    -err_out:
    - ibmveth_cleanup(adapter);
    +out_free_buffer_pools:
    + while (--i >= 0) {
    + if (adapter->rx_buff_pool[i].active)
    + ibmveth_free_buffer_pool(adapter,
    + &adapter->rx_buff_pool[i]);
    + }
    +out_unmap_filter_list:
    + dma_unmap_single(dev, adapter->filter_list_dma, 4096,
    + DMA_BIDIRECTIONAL);
    +out_unmap_buffer_list:
    + dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
    + DMA_BIDIRECTIONAL);
    +out_free_queue_mem:
    + dma_free_coherent(dev, adapter->rx_queue.queue_len,
    + adapter->rx_queue.queue_addr,
    + adapter->rx_queue.queue_dma);
    +out_free_filter_list:
    + free_page((unsigned long)adapter->filter_list_addr);
    +out_free_buffer_list:
    + free_page((unsigned long)adapter->buffer_list_addr);
    +out:
    napi_disable(&adapter->napi);
    return rc;
    }
    @@ -698,7 +670,9 @@ static int ibmveth_open(struct net_device *netdev)
    static int ibmveth_close(struct net_device *netdev)
    {
    struct ibmveth_adapter *adapter = netdev_priv(netdev);
    + struct device *dev = &adapter->vdev->dev;
    long lpar_rc;
    + int i;

    netdev_dbg(netdev, "close starting\n");

    @@ -722,7 +696,27 @@ static int ibmveth_close(struct net_device *netdev)

    ibmveth_update_rx_no_buffer(adapter);

    - ibmveth_cleanup(adapter);
    + dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
    + DMA_BIDIRECTIONAL);
    + free_page((unsigned long)adapter->buffer_list_addr);
    +
    + dma_unmap_single(dev, adapter->filter_list_dma, 4096,
    + DMA_BIDIRECTIONAL);
    + free_page((unsigned long)adapter->filter_list_addr);
    +
    + dma_free_coherent(dev, adapter->rx_queue.queue_len,
    + adapter->rx_queue.queue_addr,
    + adapter->rx_queue.queue_dma);
    +
    + for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
    + if (adapter->rx_buff_pool[i].active)
    + ibmveth_free_buffer_pool(adapter,
    + &adapter->rx_buff_pool[i]);
    +
    + dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma,
    + adapter->netdev->mtu + IBMVETH_BUFF_OH,
    + DMA_BIDIRECTIONAL);
    + kfree(adapter->bounce_buffer);

    netdev_dbg(netdev, "close complete\n");

    @@ -1648,11 +1642,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
    }

    netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
    -
    - adapter->buffer_list_dma = DMA_ERROR_CODE;
    - adapter->filter_list_dma = DMA_ERROR_CODE;
    - adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
    -
    netdev_dbg(netdev, "registering netdev...\n");

    ibmveth_set_features(netdev, netdev->features);
    --
    2.11.0
    \
     
     \ /
      Last update: 2017-06-16 20:29    [W:4.101 / U:0.056 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site