lkml.org 
[lkml]   [2007]   [Apr]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[5/6] dynamically allocate IRQ stacks (was: Re: [-mm patch] i386: enable 4k stacks by default)
    On Mon, Apr 30, 2007 at 10:38:19AM -0700, William Lee Irwin III wrote:
    > Here's what I did for i386 for someone concerned about blowing the stack.

    Fix up the conflict between IRQ stacks and deep stacks by dynamically
    allocating IRQ stacks.

    Signed-off-by: William Irwin <wli@holomorphy.com>


    Index: stack-paranoia/arch/i386/Kconfig.debug
    ===================================================================
    --- stack-paranoia.orig/arch/i386/Kconfig.debug 2007-04-30 10:34:07.058721717 -0700
    +++ stack-paranoia/arch/i386/Kconfig.debug 2007-04-30 10:36:14.553987258 -0700
    @@ -107,7 +107,6 @@

    config 64KSTACKS
    bool "Use 64KB for kernel stacks"
    - depends on !IRQSTACKS
    help
    If you say Y here, the kernel will use a 64KB stacksize.
    This impedes running more threads on a system and also increases
    Index: stack-paranoia/arch/i386/kernel/irq.c
    ===================================================================
    --- stack-paranoia.orig/arch/i386/kernel/irq.c 2007-04-30 10:34:07.058721717 -0700
    +++ stack-paranoia/arch/i386/kernel/irq.c 2007-04-30 10:36:14.553987258 -0700
    @@ -17,9 +17,11 @@
    #include <linux/notifier.h>
    #include <linux/cpu.h>
    #include <linux/delay.h>
    +#include <linux/bootmem.h>

    #include <asm/apic.h>
    #include <asm/uaccess.h>
    +#include <asm/pgtable.h>

    DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
    EXPORT_PER_CPU_SYMBOL(irq_stat);
    @@ -56,8 +58,8 @@
    u32 stack[THREAD_SIZE/sizeof(u32)];
    };

    -static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
    -static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
    +static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
    +static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
    #endif

    /*
    @@ -102,7 +104,7 @@
    #ifdef CONFIG_IRQSTACKS

    curctx = (union irq_ctx *) current_thread_info();
    - irqctx = hardirq_ctx[smp_processor_id()];
    + irqctx = per_cpu(hardirq_ctx, smp_processor_id());

    /*
    * this is where we switch to the IRQ stack. However, if we are
    @@ -150,11 +152,44 @@
    * These should really be __section__(".bss.page_aligned") as well, but
    * gcc's 3.0 and earlier don't handle that correctly.
    */
    -static char softirq_stack[NR_CPUS * THREAD_SIZE]
    - __attribute__((__aligned__(THREAD_SIZE)));
    +static DEFINE_PER_CPU(char *, softirq_stack);
    +static DEFINE_PER_CPU(char *, hardirq_stack);

    -static char hardirq_stack[NR_CPUS * THREAD_SIZE]
    - __attribute__((__aligned__(THREAD_SIZE)));
    +#ifdef CONFIG_VMALLOC_STACK
    +static void * __init __alloc_irqstack(int cpu)
    +{
    + int i;
    + struct page *pages[THREAD_SIZE/PAGE_SIZE], **tmp = pages;
    + struct vm_struct *area;
    +
    + if (!cpu)
    + return __alloc_bootmem(THREAD_SIZE, THREAD_SIZE,
    + __pa(MAX_DMA_ADDRESS));
    +
    + /* failures here are unrecoverable anyway */
    + area = get_vm_area(THREAD_SIZE, VM_IOREMAP);
    + for (i = 0; i < ARRAY_SIZE(pages); ++i)
    + pages[i] = alloc_page(GFP_HIGHUSER);
    + map_vm_area(area, PAGE_KERNEL, &tmp);
    + return area->addr;
    +}
    +#else /* !CONFIG_VMALLOC_STACK */
    +static void * __init __alloc_irqstack(int cpu)
    +{
    + if (!cpu)
    + return __alloc_bootmem(THREAD_SIZE, THREAD_SIZE,
    + __pa(MAX_DMA_ADDRESS));
    +
    + return (void *)__get_free_pages(GFP_KERNEL,
    + ilog2(THREAD_SIZE/PAGE_SIZE));
    +}
    +#endif /* !CONFIG_VMALLOC_STACK */
    +
    +static void __init alloc_irqstacks(int cpu)
    +{
    + per_cpu(softirq_stack, cpu) = __alloc_irqstack(cpu);
    + per_cpu(hardirq_stack, cpu) = __alloc_irqstack(cpu);
    +}

    /*
    * allocate per-cpu stacks for hardirq and for softirq processing
    @@ -163,34 +198,36 @@
    {
    union irq_ctx *irqctx;

    - if (hardirq_ctx[cpu])
    + if (per_cpu(hardirq_ctx, cpu))
    return;

    - irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
    + alloc_irqstacks(cpu);
    +
    + irqctx = (union irq_ctx*)per_cpu(hardirq_stack, cpu);
    irqctx->tinfo.task = NULL;
    irqctx->tinfo.exec_domain = NULL;
    irqctx->tinfo.cpu = cpu;
    irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
    irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);

    - hardirq_ctx[cpu] = irqctx;
    + per_cpu(hardirq_ctx, cpu) = irqctx;

    - irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
    + irqctx = (union irq_ctx*)per_cpu(softirq_stack, cpu);
    irqctx->tinfo.task = NULL;
    irqctx->tinfo.exec_domain = NULL;
    irqctx->tinfo.cpu = cpu;
    irqctx->tinfo.preempt_count = 0;
    irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);

    - softirq_ctx[cpu] = irqctx;
    + per_cpu(softirq_ctx, cpu) = irqctx;

    printk("CPU %u irqstacks, hard=%p soft=%p\n",
    - cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
    + cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
    }

    void irq_ctx_exit(int cpu)
    {
    - hardirq_ctx[cpu] = NULL;
    + per_cpu(hardirq_ctx, cpu) = NULL;
    }

    extern asmlinkage void __do_softirq(void);
    @@ -209,7 +246,7 @@

    if (local_softirq_pending()) {
    curctx = current_thread_info();
    - irqctx = softirq_ctx[smp_processor_id()];
    + irqctx = per_cpu(softirq_ctx, smp_processor_id());
    irqctx->tinfo.task = curctx->task;
    irqctx->tinfo.previous_esp = current_stack_pointer;

    -
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/
    \
     
     \ /
      Last update: 2007-04-30 19:49    [W:0.034 / U:59.224 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site