lkml.org 
[lkml]   [2011]   [Mar]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[tip:irq/urgent] genirq: Split irq_set_affinity() so it can be called with lock held.
Commit-ID:  c2d0c555c22242c3a76e366074c4d83ef9fa3b8c
Gitweb: http://git.kernel.org/tip/c2d0c555c22242c3a76e366074c4d83ef9fa3b8c
Author: David Daney <ddaney@caviumnetworks.com>
AuthorDate: Fri, 25 Mar 2011 12:38:50 -0700
Committer: Thomas Gleixner <tglx@linutronix.de>
CommitDate: Sun, 27 Mar 2011 17:45:59 +0200

genirq: Split irq_set_affinity() so it can be called with lock held.

The .irq_cpu_online() and .irq_cpu_offline() functions may need to
adjust affinity, but they are called with the descriptor lock held.
Create __irq_set_affinity_locked() which is called with the lock held.
Make irq_set_affinity() just a wrapper that acquires the lock.

[ tglx: Changed the argument to irq_data, added a !desc check and
moved the !irq_set_affinity check where it belongs ]

Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Cc: linux-mips@linux-mips.org
Cc: ralf@linux-mips.org
LKML-Reference: <1301081931-11240-4-git-send-email-ddaney@caviumnetworks.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/irq.h | 1 +
kernel/irq/manage.c | 48 ++++++++++++++++++++++++++++++------------------
2 files changed, 31 insertions(+), 18 deletions(-)

diff --git a/include/linux/irq.h b/include/linux/irq.h
index 76e948f..a10717e 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -382,6 +382,7 @@ extern void remove_irq(unsigned int irq, struct irqaction *act);

extern void irq_cpu_online(void);
extern void irq_cpu_offline(void);
+extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask);

#ifdef CONFIG_GENERIC_HARDIRQS

diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0a2aa73..3d151fd 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -139,35 +139,26 @@ static inline void
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
#endif

-/**
- * irq_set_affinity - Set the irq affinity of a given irq
- * @irq: Interrupt to set affinity
- * @cpumask: cpumask
- *
- */
-int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
+int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
{
- struct irq_desc *desc = irq_to_desc(irq);
- struct irq_chip *chip = desc->irq_data.chip;
- unsigned long flags;
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ struct irq_desc *desc = irq_data_to_desc(data);
int ret = 0;

- if (!chip->irq_set_affinity)
+ if (!chip || !chip->irq_set_affinity)
return -EINVAL;

- raw_spin_lock_irqsave(&desc->lock, flags);
-
- if (irq_can_move_pcntxt(desc)) {
- ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
+ if (irqd_can_move_in_process_context(data)) {
+ ret = chip->irq_set_affinity(data, mask, false);
switch (ret) {
case IRQ_SET_MASK_OK:
- cpumask_copy(desc->irq_data.affinity, mask);
+ cpumask_copy(data->affinity, mask);
case IRQ_SET_MASK_OK_NOCOPY:
irq_set_thread_affinity(desc);
ret = 0;
}
} else {
- irqd_set_move_pending(&desc->irq_data);
+ irqd_set_move_pending(data);
irq_copy_pending(desc, mask);
}

@@ -176,7 +167,28 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
schedule_work(&desc->affinity_notify->work);
}
irq_compat_set_affinity(desc);
- irqd_set(&desc->irq_data, IRQD_AFFINITY_SET);
+ irqd_set(data, IRQD_AFFINITY_SET);
+
+ return ret;
+}
+
+/**
+ * irq_set_affinity - Set the irq affinity of a given irq
+ * @irq: Interrupt to set affinity
+ * @cpumask: cpumask
+ *
+ */
+int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ unsigned long flags;
+ int ret;
+
+ if (!desc)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
raw_spin_unlock_irqrestore(&desc->lock, flags);
return ret;
}

\
 
 \ /
  Last update: 2011-03-27 18:23    [W:1.925 / U:0.048 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site