lkml.org 
[lkml]   [2017]   [Apr]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 02/11] soc/fsl/qbman: Use shared-dma-pool for QMan private memory allocations
    Date
    Use the shared-memory-pool mechanism for frame queue descriptor and
    packed frame descriptor record area allocations.

    Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
    ---
    drivers/soc/fsl/qbman/qman_ccsr.c | 136 +++++++++++++++++++++++++++++---------
    drivers/soc/fsl/qbman/qman_priv.h | 4 +-
    drivers/soc/fsl/qbman/qman_test.h | 2 -
    3 files changed, 107 insertions(+), 35 deletions(-)

    diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
    index 90bc40c..39ad7c4 100644
    --- a/drivers/soc/fsl/qbman/qman_ccsr.c
    +++ b/drivers/soc/fsl/qbman/qman_ccsr.c
    @@ -401,21 +401,42 @@ static int qm_init_pfdr(struct device *dev, u32 pfdr_start, u32 num)
    }

    /*
    - * Ideally we would use the DMA API to turn rmem->base into a DMA address
    - * (especially if iommu translations ever get involved). Unfortunately, the
    - * DMA API currently does not allow mapping anything that is not backed with
    - * a struct page.
    + * QMan needs two global memory areas initialized at boot time:
    + * 1) FQD: Frame Queue Descriptors used to manage frame queues
    + * 2) PFDR: Packed Frame Queue Descriptor Records used to store frames
    + * Both areas are reserved using the device tree reserved memory framework
    + * and the addresses and sizes are initialized when the QMan device is probed
    */
    static dma_addr_t fqd_a, pfdr_a;
    static size_t fqd_sz, pfdr_sz;

    +#ifdef CONFIG_PPC
    +/*
    + * Support for PPC Device Tree backward compatibility when compatiable
    + * string is set to fsl-qman-fqd and fsl-qman-pfdr
    + */
    +static int zero_priv_mem(phys_addr_t addr, size_t sz)
    +{
    + /* map as cacheable, non-guarded */
    + void __iomem *tmpp = ioremap_prot(addr, sz, 0);
    +
    + if (!tmpp)
    + return -ENOMEM;
    +
    + memset_io(tmpp, 0, sz);
    + flush_dcache_range((unsigned long)tmpp,
    + (unsigned long)tmpp + sz);
    + iounmap(tmpp);
    +
    + return 0;
    +}
    +
    static int qman_fqd(struct reserved_mem *rmem)
    {
    fqd_a = rmem->base;
    fqd_sz = rmem->size;

    WARN_ON(!(fqd_a && fqd_sz));
    -
    return 0;
    }
    RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
    @@ -431,32 +452,13 @@ static int qman_pfdr(struct reserved_mem *rmem)
    }
    RESERVEDMEM_OF_DECLARE(qman_pfdr, "fsl,qman-pfdr", qman_pfdr);

    +#endif
    +
    static unsigned int qm_get_fqid_maxcnt(void)
    {
    return fqd_sz / 64;
    }

    -/*
    - * Flush this memory range from data cache so that QMAN originated
    - * transactions for this memory region could be marked non-coherent.
    - */
    -static int zero_priv_mem(struct device *dev, struct device_node *node,
    - phys_addr_t addr, size_t sz)
    -{
    - /* map as cacheable, non-guarded */
    - void __iomem *tmpp = ioremap_prot(addr, sz, 0);
    -
    - if (!tmpp)
    - return -ENOMEM;
    -
    - memset_io(tmpp, 0, sz);
    - flush_dcache_range((unsigned long)tmpp,
    - (unsigned long)tmpp + sz);
    - iounmap(tmpp);
    -
    - return 0;
    -}
    -
    static void log_edata_bits(struct device *dev, u32 bit_count)
    {
    u32 i, j, mask = 0xffffffff;
    @@ -687,11 +689,12 @@ static int qman_resource_init(struct device *dev)
    static int fsl_qman_probe(struct platform_device *pdev)
    {
    struct device *dev = &pdev->dev;
    - struct device_node *node = dev->of_node;
    + struct device_node *mem_node, *node = dev->of_node;
    struct resource *res;
    int ret, err_irq;
    u16 id;
    u8 major, minor;
    + u64 size;

    res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    if (!res) {
    @@ -727,10 +730,81 @@ static int fsl_qman_probe(struct platform_device *pdev)
    qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
    }

    - ret = zero_priv_mem(dev, node, fqd_a, fqd_sz);
    - WARN_ON(ret);
    - if (ret)
    - return -ENODEV;
    + if (fqd_a) {
    +#ifdef CONFIG_PPC
    + /*
    + * For PPC backward DT compatibility
    + * FQD memory MUST be zero'd by software
    + */
    + zero_priv_mem(fqd_a, fqd_sz);
    +#endif
    + } else {
    + /*
    + * Order of memory regions is assumed as FQD followed by PFDR
    + * in order to ensure allocations from the correct regions the
    + * driver initializes then allocates each piece in order
    + */
    + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
    + if (ret) {
    + dev_err(dev, "of_reserved_mem_device_init_by_idx(0) failed 0x%x\n",
    + ret);
    + return -ENODEV;
    + }
    + mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
    + if (mem_node) {
    + ret = of_property_read_u64(mem_node, "size", &size);
    + if (ret) {
    + dev_err(dev, "FQD: of_address_to_resource fails 0x%x\n",
    + ret);
    + return -ENODEV;
    + }
    + fqd_sz = size;
    + } else {
    + dev_err(dev, "No memory-region found for FQD\n");
    + return -ENODEV;
    + }
    + if (!dma_zalloc_coherent(dev, fqd_sz, &fqd_a, 0)) {
    + dev_err(dev, "Alloc FQD memory failed\n");
    + return -ENODEV;
    + }
    +
    + /*
    + * Disassociate the FQD reseverd memory area from the device
    + * because a device can only have one DMA memory area. This
    + * should be fine since the memory is allocated and initialized
    + * and only ever accessed by the QMan device from now on
    + */
    + of_reserved_mem_device_release(dev);
    + }
    + dev_info(dev, "Allocated FQD 0x%llx 0x%zx\n", fqd_a, fqd_sz);
    +
    + if (!pfdr_a) {
    + /* Setup PFDR memory */
    + ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 1);
    + if (ret) {
    + dev_err(dev, "of_reserved_mem_device_init(1) failed 0x%x\n",
    + ret);
    + return -ENODEV;
    + }
    + mem_node = of_parse_phandle(dev->of_node, "memory-region", 1);
    + if (mem_node) {
    + ret = of_property_read_u64(mem_node, "size", &size);
    + if (ret) {
    + dev_err(dev, "PFDR: of_address_to_resource fails 0x%x\n",
    + ret);
    + return -ENODEV;
    + }
    + pfdr_sz = size;
    + } else {
    + dev_err(dev, "No memory-region found for PFDR\n");
    + return -ENODEV;
    + }
    + if (!dma_zalloc_coherent(dev, pfdr_sz, &pfdr_a, 0)) {
    + dev_err(dev, "Alloc PFDR Failed size 0x%zx\n", pfdr_sz);
    + return -ENODEV;
    + }
    + }
    + dev_info(dev, "Allocated PFDR 0x%llx 0x%zx\n", pfdr_a, pfdr_sz);

    ret = qman_init_ccsr(dev);
    if (ret) {
    diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
    index 22725bd..1e998ea5 100644
    --- a/drivers/soc/fsl/qbman/qman_priv.h
    +++ b/drivers/soc/fsl/qbman/qman_priv.h
    @@ -28,12 +28,12 @@
    * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    */

    -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
    -
    #include "dpaa_sys.h"

    #include <soc/fsl/qman.h>
    #include <linux/iommu.h>
    +#include <linux/dma-contiguous.h>
    +#include <linux/of_address.h>

    #if defined(CONFIG_FSL_PAMU)
    #include <asm/fsl_pamu_stash.h>
    diff --git a/drivers/soc/fsl/qbman/qman_test.h b/drivers/soc/fsl/qbman/qman_test.h
    index d5f8cb2..41bdbc48 100644
    --- a/drivers/soc/fsl/qbman/qman_test.h
    +++ b/drivers/soc/fsl/qbman/qman_test.h
    @@ -30,7 +30,5 @@

    #include "qman_priv.h"

    -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
    -
    int qman_test_stash(void);
    int qman_test_api(void);
    --
    2.7.4
    \
     
     \ /
      Last update: 2017-04-19 22:51    [W:5.315 / U:0.076 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site