lkml.org 
[lkml]   [2019]   [May]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.14 17/77] intel_th: msu: Fix single mode with IOMMU
    Date
    From: Alexander Shishkin <alexander.shishkin@linux.intel.com>

    commit 4e0eaf239fb33ebc671303e2b736fa043462e2f4 upstream.

    Currently, the pages that are allocated for the single mode of MSC are not
    mapped into the device's dma space and the code is incorrectly using
    *_to_phys() in place of a dma address. This fails with IOMMU enabled and
    is otherwise bad practice.

    Fix the single mode buffer allocation to map the pages into the device's
    DMA space.

    Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
    Fixes: ba82664c134e ("intel_th: Add Memory Storage Unit driver")
    Cc: stable@vger.kernel.org # v4.4+
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

    ---
    drivers/hwtracing/intel_th/msu.c | 35 ++++++++++++++++++++++++++++++++---
    1 file changed, 32 insertions(+), 3 deletions(-)

    --- a/drivers/hwtracing/intel_th/msu.c
    +++ b/drivers/hwtracing/intel_th/msu.c
    @@ -92,6 +92,7 @@ struct msc_iter {
    * @reg_base: register window base address
    * @thdev: intel_th_device pointer
    * @win_list: list of windows in multiblock mode
    + * @single_sgt: single mode buffer
    * @nr_pages: total number of pages allocated for this buffer
    * @single_sz: amount of data in single mode
    * @single_wrap: single mode wrap occurred
    @@ -112,6 +113,7 @@ struct msc {
    struct intel_th_device *thdev;

    struct list_head win_list;
    + struct sg_table single_sgt;
    unsigned long nr_pages;
    unsigned long single_sz;
    unsigned int single_wrap : 1;
    @@ -625,22 +627,45 @@ static void intel_th_msc_deactivate(stru
    */
    static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
    {
    + unsigned long nr_pages = size >> PAGE_SHIFT;
    unsigned int order = get_order(size);
    struct page *page;
    + int ret;

    if (!size)
    return 0;

    + ret = sg_alloc_table(&msc->single_sgt, 1, GFP_KERNEL);
    + if (ret)
    + goto err_out;
    +
    + ret = -ENOMEM;
    page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
    if (!page)
    - return -ENOMEM;
    + goto err_free_sgt;

    split_page(page, order);
    - msc->nr_pages = size >> PAGE_SHIFT;
    + sg_set_buf(msc->single_sgt.sgl, page_address(page), size);
    +
    + ret = dma_map_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl, 1,
    + DMA_FROM_DEVICE);
    + if (ret < 0)
    + goto err_free_pages;
    +
    + msc->nr_pages = nr_pages;
    msc->base = page_address(page);
    - msc->base_addr = page_to_phys(page);
    + msc->base_addr = sg_dma_address(msc->single_sgt.sgl);

    return 0;
    +
    +err_free_pages:
    + __free_pages(page, order);
    +
    +err_free_sgt:
    + sg_free_table(&msc->single_sgt);
    +
    +err_out:
    + return ret;
    }

    /**
    @@ -651,6 +676,10 @@ static void msc_buffer_contig_free(struc
    {
    unsigned long off;

    + dma_unmap_sg(msc_dev(msc)->parent->parent, msc->single_sgt.sgl,
    + 1, DMA_FROM_DEVICE);
    + sg_free_table(&msc->single_sgt);
    +
    for (off = 0; off < msc->nr_pages << PAGE_SHIFT; off += PAGE_SIZE) {
    struct page *page = virt_to_page(msc->base + off);


    \
     
     \ /
      Last update: 2019-05-23 21:50    [W:3.551 / U:0.108 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site