lkml.org 
[lkml]   [2011]   [Sep]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[135/244] drm/radeon/kms: Make GPU/CPU page size handling consistent in blit code (v2)
    3.0-stable review patch.  If anyone has any objections, please let us know.

    ------------------

    From: Alex Deucher <alexander.deucher@amd.com>

    commit 003cefe0c238e683a29d2207dba945b508cd45b7 upstream.

    The BO blit code inconsistenly handled the page size. This wasn't
    an issue on system with 4k pages since the GPU's page size is 4k as
    well. Switch the driver blit callbacks to take num pages in GPU
    page units.

    Fixes lemote mipsel systems using AMD rs780/rs880 chipsets.

    v2: incorporate suggestions from Michel.

    Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
    Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
    Signed-off-by: Dave Airlie <airlied@redhat.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

    ---
    drivers/gpu/drm/radeon/evergreen.c | 10 ++++++----
    drivers/gpu/drm/radeon/r100.c | 12 ++++++------
    drivers/gpu/drm/radeon/r200.c | 4 ++--
    drivers/gpu/drm/radeon/r600.c | 10 ++++++----
    drivers/gpu/drm/radeon/radeon.h | 7 ++++---
    drivers/gpu/drm/radeon/radeon_asic.h | 8 ++++----
    drivers/gpu/drm/radeon/radeon_ttm.c | 7 ++++++-
    7 files changed, 34 insertions(+), 24 deletions(-)

    --- a/drivers/gpu/drm/radeon/evergreen.c
    +++ b/drivers/gpu/drm/radeon/evergreen.c
    @@ -3170,21 +3170,23 @@ int evergreen_suspend(struct radeon_devi
    }

    int evergreen_copy_blit(struct radeon_device *rdev,
    - uint64_t src_offset, uint64_t dst_offset,
    - unsigned num_pages, struct radeon_fence *fence)
    + uint64_t src_offset,
    + uint64_t dst_offset,
    + unsigned num_gpu_pages,
    + struct radeon_fence *fence)
    {
    int r;

    mutex_lock(&rdev->r600_blit.mutex);
    rdev->r600_blit.vb_ib = NULL;
    - r = evergreen_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
    + r = evergreen_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
    if (r) {
    if (rdev->r600_blit.vb_ib)
    radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
    mutex_unlock(&rdev->r600_blit.mutex);
    return r;
    }
    - evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
    + evergreen_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
    evergreen_blit_done_copy(rdev, fence);
    mutex_unlock(&rdev->r600_blit.mutex);
    return 0;
    --- a/drivers/gpu/drm/radeon/r100.c
    +++ b/drivers/gpu/drm/radeon/r100.c
    @@ -721,11 +721,11 @@ void r100_fence_ring_emit(struct radeon_
    int r100_copy_blit(struct radeon_device *rdev,
    uint64_t src_offset,
    uint64_t dst_offset,
    - unsigned num_pages,
    + unsigned num_gpu_pages,
    struct radeon_fence *fence)
    {
    uint32_t cur_pages;
    - uint32_t stride_bytes = PAGE_SIZE;
    + uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
    uint32_t pitch;
    uint32_t stride_pixels;
    unsigned ndw;
    @@ -737,7 +737,7 @@ int r100_copy_blit(struct radeon_device
    /* radeon pitch is /64 */
    pitch = stride_bytes / 64;
    stride_pixels = stride_bytes / 4;
    - num_loops = DIV_ROUND_UP(num_pages, 8191);
    + num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);

    /* Ask for enough room for blit + flush + fence */
    ndw = 64 + (10 * num_loops);
    @@ -746,12 +746,12 @@ int r100_copy_blit(struct radeon_device
    DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
    return -EINVAL;
    }
    - while (num_pages > 0) {
    - cur_pages = num_pages;
    + while (num_gpu_pages > 0) {
    + cur_pages = num_gpu_pages;
    if (cur_pages > 8191) {
    cur_pages = 8191;
    }
    - num_pages -= cur_pages;
    + num_gpu_pages -= cur_pages;

    /* pages are in Y direction - height
    page width in X direction - width */
    --- a/drivers/gpu/drm/radeon/r200.c
    +++ b/drivers/gpu/drm/radeon/r200.c
    @@ -84,7 +84,7 @@ static int r200_get_vtx_size_0(uint32_t
    int r200_copy_dma(struct radeon_device *rdev,
    uint64_t src_offset,
    uint64_t dst_offset,
    - unsigned num_pages,
    + unsigned num_gpu_pages,
    struct radeon_fence *fence)
    {
    uint32_t size;
    @@ -93,7 +93,7 @@ int r200_copy_dma(struct radeon_device *
    int r = 0;

    /* radeon pitch is /64 */
    - size = num_pages << PAGE_SHIFT;
    + size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
    num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
    r = radeon_ring_lock(rdev, num_loops * 4 + 64);
    if (r) {
    --- a/drivers/gpu/drm/radeon/r600.c
    +++ b/drivers/gpu/drm/radeon/r600.c
    @@ -2355,21 +2355,23 @@ void r600_fence_ring_emit(struct radeon_
    }

    int r600_copy_blit(struct radeon_device *rdev,
    - uint64_t src_offset, uint64_t dst_offset,
    - unsigned num_pages, struct radeon_fence *fence)
    + uint64_t src_offset,
    + uint64_t dst_offset,
    + unsigned num_gpu_pages,
    + struct radeon_fence *fence)
    {
    int r;

    mutex_lock(&rdev->r600_blit.mutex);
    rdev->r600_blit.vb_ib = NULL;
    - r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
    + r = r600_blit_prepare_copy(rdev, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
    if (r) {
    if (rdev->r600_blit.vb_ib)
    radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
    mutex_unlock(&rdev->r600_blit.mutex);
    return r;
    }
    - r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
    + r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages * RADEON_GPU_PAGE_SIZE);
    r600_blit_done_copy(rdev, fence);
    mutex_unlock(&rdev->r600_blit.mutex);
    return 0;
    --- a/drivers/gpu/drm/radeon/radeon.h
    +++ b/drivers/gpu/drm/radeon/radeon.h
    @@ -322,6 +322,7 @@ union radeon_gart_table {

    #define RADEON_GPU_PAGE_SIZE 4096
    #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
    +#define RADEON_GPU_PAGE_SHIFT 12

    struct radeon_gart {
    dma_addr_t table_addr;
    @@ -914,17 +915,17 @@ struct radeon_asic {
    int (*copy_blit)(struct radeon_device *rdev,
    uint64_t src_offset,
    uint64_t dst_offset,
    - unsigned num_pages,
    + unsigned num_gpu_pages,
    struct radeon_fence *fence);
    int (*copy_dma)(struct radeon_device *rdev,
    uint64_t src_offset,
    uint64_t dst_offset,
    - unsigned num_pages,
    + unsigned num_gpu_pages,
    struct radeon_fence *fence);
    int (*copy)(struct radeon_device *rdev,
    uint64_t src_offset,
    uint64_t dst_offset,
    - unsigned num_pages,
    + unsigned num_gpu_pages,
    struct radeon_fence *fence);
    uint32_t (*get_engine_clock)(struct radeon_device *rdev);
    void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
    --- a/drivers/gpu/drm/radeon/radeon_asic.h
    +++ b/drivers/gpu/drm/radeon/radeon_asic.h
    @@ -75,7 +75,7 @@ uint32_t r100_pll_rreg(struct radeon_dev
    int r100_copy_blit(struct radeon_device *rdev,
    uint64_t src_offset,
    uint64_t dst_offset,
    - unsigned num_pages,
    + unsigned num_gpu_pages,
    struct radeon_fence *fence);
    int r100_set_surface_reg(struct radeon_device *rdev, int reg,
    uint32_t tiling_flags, uint32_t pitch,
    @@ -143,7 +143,7 @@ extern void r100_post_page_flip(struct r
    extern int r200_copy_dma(struct radeon_device *rdev,
    uint64_t src_offset,
    uint64_t dst_offset,
    - unsigned num_pages,
    + unsigned num_gpu_pages,
    struct radeon_fence *fence);
    void r200_set_safe_registers(struct radeon_device *rdev);

    @@ -311,7 +311,7 @@ void r600_ring_ib_execute(struct radeon_
    int r600_ring_test(struct radeon_device *rdev);
    int r600_copy_blit(struct radeon_device *rdev,
    uint64_t src_offset, uint64_t dst_offset,
    - unsigned num_pages, struct radeon_fence *fence);
    + unsigned num_gpu_pages, struct radeon_fence *fence);
    void r600_hpd_init(struct radeon_device *rdev);
    void r600_hpd_fini(struct radeon_device *rdev);
    bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
    @@ -403,7 +403,7 @@ void evergreen_bandwidth_update(struct r
    void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
    int evergreen_copy_blit(struct radeon_device *rdev,
    uint64_t src_offset, uint64_t dst_offset,
    - unsigned num_pages, struct radeon_fence *fence);
    + unsigned num_gpu_pages, struct radeon_fence *fence);
    void evergreen_hpd_init(struct radeon_device *rdev);
    void evergreen_hpd_fini(struct radeon_device *rdev);
    bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
    --- a/drivers/gpu/drm/radeon/radeon_ttm.c
    +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
    @@ -277,7 +277,12 @@ static int radeon_move_blit(struct ttm_b
    DRM_ERROR("Trying to move memory with CP turned off.\n");
    return -EINVAL;
    }
    - r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
    +
    + BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
    +
    + r = radeon_copy(rdev, old_start, new_start,
    + new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
    + fence);
    /* FIXME: handle copy error */
    r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
    evict, no_wait_reserve, no_wait_gpu, new_mem);

    --
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

    \
     
     \ /
      Last update: 2011-09-29 01:15    [W:5.248 / U:0.068 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site