lkml.org 
[lkml]   [2017]   [Sep]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 2/4][RFC v2] x86/apic: Record the number of vectors assigned on a CPU
Date
Update the number of vectors assigned on each CPU during
vector allocation/free process. This is to prepare for
the vector spreading work that, we can find out the CPU
with least vectors assigned.

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Len Brown <lenb@kernel.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Chen Yu <yu.c.chen@intel.com>
---
arch/x86/include/asm/hw_irq.h | 8 ++++++++
arch/x86/kernel/apic/vector.c | 45 ++++++++++++++++++++++++++++++++++++++++++-
arch/x86/kernel/irq.c | 5 ++++-
arch/x86/kernel/irqinit.c | 1 +
arch/x86/lguest/boot.c | 1 +
5 files changed, 58 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index b2243fe..d1b3c61 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -151,6 +151,10 @@ extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
extern void lock_vector_lock(void);
extern void unlock_vector_lock(void);
extern void setup_vector_irq(int cpu);
+extern void inc_vector_alloc(const struct cpumask *mask,
+ int count);
+extern void dec_vector_alloc(const struct cpumask *mask,
+ int count);
#ifdef CONFIG_SMP
extern void send_cleanup_vector(struct irq_cfg *);
extern void irq_complete_move(struct irq_cfg *cfg);
@@ -163,6 +167,10 @@ extern void apic_ack_edge(struct irq_data *data);
#else /* CONFIG_X86_LOCAL_APIC */
static inline void lock_vector_lock(void) {}
static inline void unlock_vector_lock(void) {}
+static inline void inc_vector_alloc(const struct cpumask *mask,
+ int count) {}
+static inline void dec_vector_alloc(const struct cpumask *mask,
+ int count) {}
#endif /* CONFIG_X86_LOCAL_APIC */

/* Statistics */
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 2ce1021..4ff84c0 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -50,6 +50,36 @@ void unlock_vector_lock(void)
raw_spin_unlock(&vector_lock);
}

+static void update_vectors_alloc(const struct cpumask *mask,
+ int count, bool add)
+{
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ int cur_alloc = per_cpu(vector_irq, cpu).alloc;
+
+ /* Update the number of vectors assigned on this CPU. */
+ if (add && (cur_alloc + count <= NR_VECTORS))
+ per_cpu(vector_irq, cpu).alloc += count;
+ else if (!add && cur_alloc >= count)
+ per_cpu(vector_irq, cpu).alloc -= count;
+ else
+ continue;
+ }
+}
+
+void inc_vector_alloc(const struct cpumask *mask,
+ int count)
+{
+ update_vectors_alloc(mask, count, true);
+}
+
+void dec_vector_alloc(const struct cpumask *mask,
+ int count)
+{
+ update_vectors_alloc(mask, count, false);
+}
+
static struct apic_chip_data *apic_chip_data(struct irq_data *irq_data)
{
if (!irq_data)
@@ -191,6 +221,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
for_each_cpu(new_cpu, vector_searchmask)
per_cpu(vector_irq, new_cpu).desc[vector] = irq_to_desc(irq);

+ inc_vector_alloc(vector_searchmask, 1);
goto update;

next_cpu:
@@ -263,6 +294,7 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
{
struct irq_desc *desc;
int cpu, vector;
+ struct cpumask mask;

if (!data->cfg.vector)
return;
@@ -271,6 +303,9 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
for_each_cpu_and(cpu, data->domain, cpu_online_mask)
per_cpu(vector_irq, cpu).desc[vector] = VECTOR_UNUSED;

+ cpumask_and(&mask, data->domain, cpu_online_mask);
+ dec_vector_alloc(&mask, 1);
+
data->cfg.vector = 0;
cpumask_clear(data->domain);

@@ -289,6 +324,7 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
if (per_cpu(vector_irq, cpu).desc[vector] != desc)
continue;
per_cpu(vector_irq, cpu).desc[vector] = VECTOR_UNUSED;
+ dec_vector_alloc(cpumask_of(cpu), 1);
break;
}
}
@@ -483,6 +519,7 @@ static void __setup_vector_irq(int cpu)
continue;
vector = data->cfg.vector;
per_cpu(vector_irq, cpu).desc[vector] = desc;
+ inc_vector_alloc(cpumask_of(cpu), 1);
}
/* Mark the free vectors */
for (vector = 0; vector < NR_VECTORS; ++vector) {
@@ -491,8 +528,10 @@ static void __setup_vector_irq(int cpu)
continue;

data = apic_chip_data(irq_desc_get_irq_data(desc));
- if (!cpumask_test_cpu(cpu, data->domain))
+ if (!cpumask_test_cpu(cpu, data->domain)) {
per_cpu(vector_irq, cpu).desc[vector] = VECTOR_UNUSED;
+ dec_vector_alloc(cpumask_of(cpu), 1);
+ }
}
}

@@ -514,6 +553,7 @@ void setup_vector_irq(int cpu)
for (irq = 0; irq < nr_legacy_irqs(); irq++)
per_cpu(vector_irq, cpu).desc[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);

+ inc_vector_alloc(cpumask_of(cpu), irq);
__setup_vector_irq(cpu);
}

@@ -649,6 +689,7 @@ asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
goto unlock;
}
__this_cpu_write(vector_irq.desc[vector], VECTOR_UNUSED);
+ dec_vector_alloc(cpumask_of(me), 1);
cpumask_clear_cpu(me, data->old_domain);
unlock:
raw_spin_unlock(&desc->lock);
@@ -784,6 +825,8 @@ void irq_force_complete_move(struct irq_desc *desc)
for_each_cpu(cpu, data->old_domain)
per_cpu(vector_irq, cpu).desc[cfg->old_vector] = VECTOR_UNUSED;

+ dec_vector_alloc(data->old_domain, 1);
+
/* Cleanup the left overs of the (half finished) move */
cpumask_clear(data->old_domain);
data->move_in_progress = 0;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index ae11e86..67c01b8 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -250,6 +250,7 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
vector);
} else {
__this_cpu_write(vector_irq.desc[vector], VECTOR_UNUSED);
+ inc_vector_alloc(cpumask_of(smp_processor_id()), 1);
}
}

@@ -491,8 +492,10 @@ void fixup_irqs(void)
}
raw_spin_unlock(&desc->lock);
}
- if (__this_cpu_read(vector_irq.desc[vector]) != VECTOR_RETRIGGERED)
+ if (__this_cpu_read(vector_irq.desc[vector]) != VECTOR_RETRIGGERED) {
__this_cpu_write(vector_irq.desc[vector], VECTOR_UNUSED);
+ dec_vector_alloc(cpumask_of(smp_processor_id()), 1);
+ }
}
}
#endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 734b54f..dd618c1 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -97,6 +97,7 @@ void __init init_IRQ(void)
for (i = 0; i < nr_legacy_irqs(); i++)
per_cpu(vector_irq, 0).desc[ISA_IRQ_VECTOR(i)] = irq_to_desc(i);

+ inc_vector_alloc(cpumask_of(1), i);
x86_init.irqs.intr_init();
}

diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index e80758a..0696354 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -847,6 +847,7 @@ static int lguest_setup_irq(unsigned int irq)
/* Some systems map "vectors" to interrupts weirdly. Not us! */
desc = irq_to_desc(irq);
__this_cpu_write(vector_irq.desc[FIRST_EXTERNAL_VECTOR + irq], desc);
+ inc_vector_alloc(cpumask_of(smp_processor_id()), 1);
return 0;
}

--
2.7.4
\
 
 \ /
  Last update: 2017-09-01 07:03    [W:0.105 / U:0.612 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site