lkml.org 
[lkml]   [2019]   [Dec]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.4 057/434] drm/ttm: return -EBUSY on pipelining with no_gpu_wait (v2)
    Date
    From: Christian König <christian.koenig@amd.com>

    [ Upstream commit 3084cf46cf8110826a42de8c8ef30e8fa48974c2 ]

    Setting the no_gpu_wait flag means that the allocate BO must be available
    immediately and we can't wait for any GPU operation to finish.

    v2: squash in mem leak fix, rebase

    Signed-off-by: Christian König <christian.koenig@amd.com>
    Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
    Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    drivers/gpu/drm/ttm/ttm_bo.c | 44 +++++++++++++++++++++---------------
    1 file changed, 26 insertions(+), 18 deletions(-)

    diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
    index 98819462f025..f07803699809 100644
    --- a/drivers/gpu/drm/ttm/ttm_bo.c
    +++ b/drivers/gpu/drm/ttm/ttm_bo.c
    @@ -926,7 +926,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put);
    */
    static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
    struct ttm_mem_type_manager *man,
    - struct ttm_mem_reg *mem)
    + struct ttm_mem_reg *mem,
    + bool no_wait_gpu)
    {
    struct dma_fence *fence;
    int ret;
    @@ -935,19 +936,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
    fence = dma_fence_get(man->move);
    spin_unlock(&man->move_lock);

    - if (fence) {
    - dma_resv_add_shared_fence(bo->base.resv, fence);
    + if (!fence)
    + return 0;

    - ret = dma_resv_reserve_shared(bo->base.resv, 1);
    - if (unlikely(ret)) {
    - dma_fence_put(fence);
    - return ret;
    - }
    + if (no_wait_gpu)
    + return -EBUSY;
    +
    + dma_resv_add_shared_fence(bo->base.resv, fence);

    - dma_fence_put(bo->moving);
    - bo->moving = fence;
    + ret = dma_resv_reserve_shared(bo->base.resv, 1);
    + if (unlikely(ret)) {
    + dma_fence_put(fence);
    + return ret;
    }

    + dma_fence_put(bo->moving);
    + bo->moving = fence;
    return 0;
    }

    @@ -978,7 +982,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
    return ret;
    } while (1);

    - return ttm_bo_add_move_fence(bo, man, mem);
    + return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
    }

    static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
    @@ -1120,14 +1124,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
    if (unlikely(ret))
    goto error;

    - if (mem->mm_node) {
    - ret = ttm_bo_add_move_fence(bo, man, mem);
    - if (unlikely(ret)) {
    - (*man->func->put_node)(man, mem);
    - goto error;
    - }
    - return 0;
    + if (!mem->mm_node)
    + continue;
    +
    + ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
    + if (unlikely(ret)) {
    + (*man->func->put_node)(man, mem);
    + if (ret == -EBUSY)
    + continue;
    +
    + goto error;
    }
    + return 0;
    }

    for (i = 0; i < placement->num_busy_placement; ++i) {
    --
    2.20.1


    \
     
     \ /
      Last update: 2019-12-29 18:44    [W:4.026 / U:0.124 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site