lkml.org 
[lkml]   [2006]   [Jun]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    SubjectRe: Why can't I set the priority of softirq-hrt? (Re: 2.6.17-rt1)
    From
    On Wed, 21 Jun 2006, Esben Nielsen wrote:

    >
    >
    > On Wed, 21 Jun 2006, Steven Rostedt wrote:
    >
    >>
    >> On Tue, 20 Jun 2006, Esben Nielsen wrote:
    >>
    >> >
    >> > > I have to check, whether the priority is propagated when the softirq
    >> > > is
    >> > > blocked on a lock. If not its a bug and has to be fixed.
    >> >
    >> > I think the simplest solution would be to add
    >> >
    >> > if (p->blocked_on)
    >> > wake_up_process(p);
    >> >
    >> > in __setscheduler().
    >> >
    >>
    >> Except that wake_up_process calls try_to_wakeup which grabs the runqueue
    >> lock, which unfortunately is already held when __setscheduler is called.
    >>
    >
    > Yeah, I saw. Move it out in setscheduler() then. I'll try to fix it, but I am
    > not sure I can make a test if it works.

    What about the patch below. It compiles and my UP labtop runs fine, but I
    haven't otherwise tested it. My labtop runs RTExec without hichups
    until now.

    Esben

    Index: linux-2.6.17-rt1/kernel/rtmutex.c
    ===================================================================
    --- linux-2.6.17-rt1.orig/kernel/rtmutex.c
    +++ linux-2.6.17-rt1/kernel/rtmutex.c
    @@ -625,6 +625,7 @@ rt_lock_slowlock(struct rt_mutex *lock _

    debug_rt_mutex_init_waiter(&waiter);
    waiter.task = NULL;
    + waiter.save_state = 1;

    spin_lock(&lock->wait_lock);

    @@ -687,6 +688,19 @@ rt_lock_slowlock(struct rt_mutex *lock _
    state = xchg(&current->state, TASK_UNINTERRUPTIBLE);
    if (unlikely(state == TASK_RUNNING))
    saved_state = TASK_RUNNING;
    +
    + if (unlikely(waiter.task) &&
    + waiter.list_entry.prio != current->prio) {
    + /*
    + * We still not have the lock, but we are woken up with
    + * a different prio than the one we waited with
    + * originally. We remove the wait entry now and then
    + * reinsert ourselves with the right priority
    + */
    + remove_waiter(lock, &waiter __IP__);
    + waiter.task = NULL;
    + }
    +
    }

    state = xchg(&current->state, saved_state);
    @@ -798,6 +812,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,

    debug_rt_mutex_init_waiter(&waiter);
    waiter.task = NULL;
    + waiter.save_state = 0;

    spin_lock(&lock->wait_lock);

    @@ -877,6 +892,18 @@ rt_mutex_slowlock(struct rt_mutex *lock,

    current->flags |= saved_flags;
    set_current_state(state);
    +
    + if (unlikely(waiter.task) &&
    + waiter.list_entry.prio != current->prio) {
    + /*
    + * We still not have the lock, but we are woken up with
    + * a different prio than the one we waited with
    + * originally. We remove the wait entry now and then
    + * reinsert ourselves with the right priority
    + */
    + remove_waiter(lock, &waiter __IP__);
    + waiter.task = NULL;
    + }
    }

    set_current_state(TASK_RUNNING);
    Index: linux-2.6.17-rt1/kernel/sched.c
    ===================================================================
    --- linux-2.6.17-rt1.orig/kernel/sched.c
    +++ linux-2.6.17-rt1/kernel/sched.c
    @@ -57,6 +57,8 @@

    #include <asm/unistd.h>

    +#include "rtmutex_common.h"
    +
    /*
    * Convert user-nice values [ -20 ... 0 ... 19 ]
    * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
    @@ -646,7 +648,9 @@ static inline void sched_info_switch(tas
    #define sched_info_switch(t, next) do { } while (0)
    #endif /* CONFIG_SCHEDSTATS */

    +#ifdef CONFIG_SMP
    static __cacheline_aligned_in_smp atomic_t rt_overload;
    +#endif

    static inline void inc_rt_tasks(task_t *p, runqueue_t *rq)
    {
    @@ -1473,10 +1477,11 @@ static inline int wake_idle(int cpu, tas
    static int try_to_wake_up(task_t *p, unsigned int state, int sync, int mutex)
    {
    int cpu, this_cpu, success = 0;
    - runqueue_t *this_rq, *rq;
    + runqueue_t *rq;
    unsigned long flags;
    long old_state;
    #ifdef CONFIG_SMP
    + runqueue_t *this_rq;
    unsigned long load, this_load;
    struct sched_domain *sd, *this_sd = NULL;
    int new_cpu;
    @@ -4351,6 +4356,18 @@ int setscheduler(struct task_struct *p,
    resched_task(rq->curr);
    }
    task_rq_unlock(rq, &flags);
    +
    + /*
    + * If the process is blocked on rt-mutex, it will now wake up and
    + * reinsert itself into the wait list and boost the owner correctly
    + */
    + if (p->pi_blocked_on) {
    + if (p->pi_blocked_on->save_state)
    + wake_up_process_mutex(p);
    + else
    + wake_up_process(p);
    + }
    +
    spin_unlock_irqrestore(&p->pi_lock, fp);
    return 0;
    }
    @@ -7086,4 +7103,3 @@ void notrace preempt_enable_no_resched(v

    EXPORT_SYMBOL(preempt_enable_no_resched);
    #endif
    -
    Index: linux-2.6.17-rt1/kernel/rtmutex_common.h
    ===================================================================
    --- linux-2.6.17-rt1.orig/kernel/rtmutex_common.h
    +++ linux-2.6.17-rt1/kernel/rtmutex_common.h
    @@ -49,6 +49,7 @@ struct rt_mutex_waiter {
    struct plist_node pi_list_entry;
    struct task_struct *task;
    struct rt_mutex *lock;
    + int save_state;
    #ifdef CONFIG_DEBUG_RT_MUTEXES
    unsigned long ip;
    pid_t deadlock_task_pid;
    -
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/
    \
     
     \ /
      Last update: 2006-06-21 16:46    [W:2.847 / U:0.316 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site