lkml.org 
[lkml]   [2009]   [Jul]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch 07/24] Blackfin: fix deadlock in SMP IPI handler
2.6.30-stable review patch.  If anyone has any objections, please let us know.

------------------

From: Sonic Zhang <sonic.zhang@analog.com>

commit 86f2008bf546af9a434f480710e8d33891616bf5 upstream.

When a low priority interrupt (like ethernet) is triggered between 2 high
priority IPI messages, a deadlock in disable_irq() is hit by the second
IPI handler. This is because the second IPI message is queued within the
first IPI handler, but the handler doesn't process all messages, and new
ones are inserted rather than appended. So now we process all the pending
messages, and append new ones to the pending list.

URL: http://blackfin.uclinux.org/gf/tracker/5226

Signed-off-by: Sonic Zhang <sonic.zhang@analog.com>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

---
arch/blackfin/mach-common/smp.c | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)

--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -139,7 +139,7 @@ static void ipi_call_function(unsigned i

static irqreturn_t ipi_handler(int irq, void *dev_instance)
{
- struct ipi_message *msg, *mg;
+ struct ipi_message *msg;
struct ipi_message_queue *msg_queue;
unsigned int cpu = smp_processor_id();

@@ -149,7 +149,8 @@ static irqreturn_t ipi_handler(int irq,
msg_queue->count++;

spin_lock(&msg_queue->lock);
- list_for_each_entry_safe(msg, mg, &msg_queue->head, list) {
+ while (!list_empty(&msg_queue->head)) {
+ msg = list_entry(msg_queue->head.next, typeof(*msg), list);
list_del(&msg->list);
switch (msg->type) {
case BFIN_IPI_RESCHEDULE:
@@ -216,7 +217,7 @@ int smp_call_function(void (*func)(void
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}
@@ -256,7 +257,7 @@ int smp_call_function_single(int cpuid,

msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);

@@ -287,7 +288,7 @@ void smp_send_reschedule(int cpu)

msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);

@@ -315,7 +316,7 @@ void smp_send_stop(void)
for_each_cpu_mask(cpu, callmap) {
msg_queue = &per_cpu(ipi_msg_queue, cpu);
spin_lock_irqsave(&msg_queue->lock, flags);
- list_add(&msg->list, &msg_queue->head);
+ list_add_tail(&msg->list, &msg_queue->head);
spin_unlock_irqrestore(&msg_queue->lock, flags);
platform_send_ipi_cpu(cpu);
}



\
 
 \ /
  Last update: 2009-07-17 22:33    [W:0.064 / U:56.396 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site