lkml.org 
[lkml]   [2019]   [Jun]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 2/6] drm/ttm: use new ww_mutex_(un)lock_for_each macros
Date
Use the provided macros instead of implementing deadlock handling on our own.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
drivers/gpu/drm/ttm/ttm_execbuf_util.c | 87 +++++++++-----------------
1 file changed, 30 insertions(+), 57 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 957ec375a4ba..3c3ac6c94d7f 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -33,16 +33,6 @@
#include <linux/sched.h>
#include <linux/module.h>

-static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
- struct ttm_validate_buffer *entry)
-{
- list_for_each_entry_continue_reverse(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
-
- reservation_object_unlock(bo->resv);
- }
-}
-
static void ttm_eu_del_from_lru_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@@ -96,8 +86,9 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
struct list_head *dups, bool del_lru)
{
- struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
+ struct ww_mutex *contended;
+ struct ttm_bo_global *glob;
int ret;

if (list_empty(list))
@@ -109,68 +100,39 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
if (ticket)
ww_acquire_init(ticket, &reservation_ww_class);

- list_for_each_entry(entry, list, head) {
+ ww_mutex_lock_for_each(list_for_each_entry(entry, list, head),
+ &entry->bo->resv->lock, contended, ret,
+ intr, ticket)
+ {
struct ttm_buffer_object *bo = entry->bo;

- ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
reservation_object_unlock(bo->resv);
-
ret = -EBUSY;
+ goto error;
+ }

- } else if (ret == -EALREADY && dups) {
+ if (ret == -EALREADY && dups) {
struct ttm_validate_buffer *safe = entry;
+
entry = list_prev_entry(entry, head);
list_del(&safe->head);
list_add(&safe->head, dups);
continue;
}

- if (!ret) {
- if (!entry->num_shared)
- continue;
-
- ret = reservation_object_reserve_shared(bo->resv,
- entry->num_shared);
- if (!ret)
- continue;
- }
+ if (unlikely(ret))
+ goto error;

- /* uh oh, we lost out, drop every reservation and try
- * to only reserve this buffer, then start over if
- * this succeeds.
- */
- ttm_eu_backoff_reservation_reverse(list, entry);
-
- if (ret == -EDEADLK) {
- if (intr) {
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- ticket);
- } else {
- ww_mutex_lock_slow(&bo->resv->lock, ticket);
- ret = 0;
- }
- }
+ if (!entry->num_shared)
+ continue;

- if (!ret && entry->num_shared)
- ret = reservation_object_reserve_shared(bo->resv,
- entry->num_shared);
-
- if (unlikely(ret != 0)) {
- if (ret == -EINTR)
- ret = -ERESTARTSYS;
- if (ticket) {
- ww_acquire_done(ticket);
- ww_acquire_fini(ticket);
- }
- return ret;
+ ret = reservation_object_reserve_shared(bo->resv,
+ entry->num_shared);
+ if (unlikely(ret)) {
+ reservation_object_unlock(bo->resv);
+ goto error;
}
-
- /* move this item to the front of the list,
- * forces correct iteration of the loop without keeping track
- */
- list_del(&entry->head);
- list_add(&entry->head, list);
}

if (del_lru) {
@@ -179,6 +141,17 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
spin_unlock(&glob->lru_lock);
}
return 0;
+
+error:
+ ww_mutex_unlock_for_each(list_for_each_entry(entry, list, head),
+ &entry->bo->resv->lock, contended);
+ if (ret == -EINTR)
+ ret = -ERESTARTSYS;
+ if (ticket) {
+ ww_acquire_done(ticket);
+ ww_acquire_fini(ticket);
+ }
+ return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);

--
2.17.1
\
 
 \ /
  Last update: 2019-06-14 14:42    [W:2.109 / U:0.500 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site