lkml.org 
[lkml]   [2023]   [Mar]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
SubjectRe: [PATCH v9 12/15] drm/msm: Add deadline based boost support
From
On 03/03/2023 19:03, Rob Clark wrote:
> On Fri, Mar 3, 2023 at 2:10 AM Dmitry Baryshkov
> <dmitry.baryshkov@linaro.org> wrote:
>>
>> On 03/03/2023 01:53, Rob Clark wrote:
>>> From: Rob Clark <robdclark@chromium.org>
>>>
>>> Track the nearest deadline on a fence timeline and set a timer to expire
>>> shortly before to trigger boost if the fence has not yet been signaled.
>>>
>>> v2: rebase
>>>
>>> Signed-off-by: Rob Clark <robdclark@chromium.org>
>>> ---
>>> drivers/gpu/drm/msm/msm_fence.c | 74 +++++++++++++++++++++++++++++++++
>>> drivers/gpu/drm/msm/msm_fence.h | 20 +++++++++
>>> 2 files changed, 94 insertions(+)
>>
>> Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
>>
>> A small question: do we boost to fit into the deadline or to miss the
>> deadline for as little as possible? If the former is the case, we might
>> need to adjust 3ms depending on the workload.
>
> The goal is as much to run with higher clock on the next frame as it
> is to not miss a deadline. Ie. we don't want devfreq to come to the
> conclusion that running at <50% clks is best due to the amount of
> utilization caused by missing ever other vblank.

Ack, thanks for the explanation.

>
> But 3ms is mostly just "seems like a good compromise" value. It might change.
>
> BR,
> -R
>
>>>
>>> diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
>>> index 56641408ea74..51b461f32103 100644
>>> --- a/drivers/gpu/drm/msm/msm_fence.c
>>> +++ b/drivers/gpu/drm/msm/msm_fence.c
>>> @@ -8,6 +8,35 @@
>>>
>>> #include "msm_drv.h"
>>> #include "msm_fence.h"
>>> +#include "msm_gpu.h"
>>> +
>>> +static struct msm_gpu *fctx2gpu(struct msm_fence_context *fctx)
>>> +{
>>> + struct msm_drm_private *priv = fctx->dev->dev_private;
>>> + return priv->gpu;
>>> +}
>>> +
>>> +static enum hrtimer_restart deadline_timer(struct hrtimer *t)
>>> +{
>>> + struct msm_fence_context *fctx = container_of(t,
>>> + struct msm_fence_context, deadline_timer);
>>> +
>>> + kthread_queue_work(fctx2gpu(fctx)->worker, &fctx->deadline_work);
>>> +
>>> + return HRTIMER_NORESTART;
>>> +}
>>> +
>>> +static void deadline_work(struct kthread_work *work)
>>> +{
>>> + struct msm_fence_context *fctx = container_of(work,
>>> + struct msm_fence_context, deadline_work);
>>> +
>>> + /* If deadline fence has already passed, nothing to do: */
>>> + if (msm_fence_completed(fctx, fctx->next_deadline_fence))
>>> + return;
>>> +
>>> + msm_devfreq_boost(fctx2gpu(fctx), 2);
>>> +}
>>>
>>>
>>> struct msm_fence_context *
>>> @@ -36,6 +65,13 @@ msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
>>> fctx->completed_fence = fctx->last_fence;
>>> *fctx->fenceptr = fctx->last_fence;
>>>
>>> + hrtimer_init(&fctx->deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
>>> + fctx->deadline_timer.function = deadline_timer;
>>> +
>>> + kthread_init_work(&fctx->deadline_work, deadline_work);
>>> +
>>> + fctx->next_deadline = ktime_get();
>>> +
>>> return fctx;
>>> }
>>>
>>> @@ -62,6 +98,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
>>> spin_lock_irqsave(&fctx->spinlock, flags);
>>> if (fence_after(fence, fctx->completed_fence))
>>> fctx->completed_fence = fence;
>>> + if (msm_fence_completed(fctx, fctx->next_deadline_fence))
>>> + hrtimer_cancel(&fctx->deadline_timer);
>>> spin_unlock_irqrestore(&fctx->spinlock, flags);
>>> }
>>>
>>> @@ -92,10 +130,46 @@ static bool msm_fence_signaled(struct dma_fence *fence)
>>> return msm_fence_completed(f->fctx, f->base.seqno);
>>> }
>>>
>>> +static void msm_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
>>> +{
>>> + struct msm_fence *f = to_msm_fence(fence);
>>> + struct msm_fence_context *fctx = f->fctx;
>>> + unsigned long flags;
>>> + ktime_t now;
>>> +
>>> + spin_lock_irqsave(&fctx->spinlock, flags);
>>> + now = ktime_get();
>>> +
>>> + if (ktime_after(now, fctx->next_deadline) ||
>>> + ktime_before(deadline, fctx->next_deadline)) {
>>> + fctx->next_deadline = deadline;
>>> + fctx->next_deadline_fence =
>>> + max(fctx->next_deadline_fence, (uint32_t)fence->seqno);
>>> +
>>> + /*
>>> + * Set timer to trigger boost 3ms before deadline, or
>>> + * if we are already less than 3ms before the deadline
>>> + * schedule boost work immediately.
>>> + */
>>> + deadline = ktime_sub(deadline, ms_to_ktime(3));
>>> +
>>> + if (ktime_after(now, deadline)) {
>>> + kthread_queue_work(fctx2gpu(fctx)->worker,
>>> + &fctx->deadline_work);
>>> + } else {
>>> + hrtimer_start(&fctx->deadline_timer, deadline,
>>> + HRTIMER_MODE_ABS);
>>> + }
>>> + }
>>> +
>>> + spin_unlock_irqrestore(&fctx->spinlock, flags);
>>> +}
>>> +
>>> static const struct dma_fence_ops msm_fence_ops = {
>>> .get_driver_name = msm_fence_get_driver_name,
>>> .get_timeline_name = msm_fence_get_timeline_name,
>>> .signaled = msm_fence_signaled,
>>> + .set_deadline = msm_fence_set_deadline,
>>> };
>>>
>>> struct dma_fence *
>>> diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
>>> index 7f1798c54cd1..cdaebfb94f5c 100644
>>> --- a/drivers/gpu/drm/msm/msm_fence.h
>>> +++ b/drivers/gpu/drm/msm/msm_fence.h
>>> @@ -52,6 +52,26 @@ struct msm_fence_context {
>>> volatile uint32_t *fenceptr;
>>>
>>> spinlock_t spinlock;
>>> +
>>> + /*
>>> + * TODO this doesn't really deal with multiple deadlines, like
>>> + * if userspace got multiple frames ahead.. OTOH atomic updates
>>> + * don't queue, so maybe that is ok
>>> + */
>>> +
>>> + /** next_deadline: Time of next deadline */
>>> + ktime_t next_deadline;
>>> +
>>> + /**
>>> + * next_deadline_fence:
>>> + *
>>> + * Fence value for next pending deadline. The deadline timer is
>>> + * canceled when this fence is signaled.
>>> + */
>>> + uint32_t next_deadline_fence;
>>> +
>>> + struct hrtimer deadline_timer;
>>> + struct kthread_work deadline_work;
>>> };
>>>
>>> struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
>>
>> --
>> With best wishes
>> Dmitry
>>

--
With best wishes
Dmitry

\
 
 \ /
  Last update: 2023-03-27 00:43    [W:0.082 / U:0.220 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site