lkml.org 
[lkml]   [2008]   [Nov]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH] sparse_irq aka dyn_irq v14
On Thu, 13 Nov 2008 22:29:21 -0800 Yinghai Lu <yinghai@kernel.org> wrote:

> address some Andrew's concerns.
>
> ...
>
> +static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
> +{
> + struct irq_pin_list *pin;
> + int node;
> +
> + node = cpu_to_node(cpu);
> +
> + pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
> + printk(KERN_DEBUG " alloc irq_2_pin on cpu %d node %d\n", cpu, node);
> + BUG_ON(!pin);
> +
> + return pin;
> +}

GFP_ATOMIC allocation attempts are unreliable - much more so than
GFP_KERNEL. GFP_ATOMIC allocations can and do fail. With the above
code, such a failure will crash the machine.

The code should handle this error and recover gracefully.

>
> ...
>
> +static struct irq_cfg *get_one_free_irq_cfg(int cpu)
> +{
> + struct irq_cfg *cfg;
> + int node;
> +
> + node = cpu_to_node(cpu);
> +
> + cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
> + printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node);
> + BUG_ON(!cfg);
> +
> + return cfg;
> }

Ditto

> ...
>
> +static struct irq_2_iommu *get_one_free_irq_2_iommu(int cpu)
> +{
> + struct irq_2_iommu *iommu;
> + int node;
> +
> + node = cpu_to_node(cpu);
> +
> + iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node);
> + printk(KERN_DEBUG "alloc irq_2_iommu on cpu %d node %d\n", cpu, node);
> +
> + return iommu;
> +}

I spent some time trying to work out whether the callers handle failure
here but I got lost in a twisty maze.

> ...
>
> +static void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
> +{
> + unsigned long bytes;
> + char *ptr;
> + int node;
> +
> + /* Compute how many bytes we need per irq and allocate them */
> + bytes = nr * sizeof(unsigned int);
> +
> + node = cpu_to_node(cpu);
> + ptr = kzalloc_node(bytes, GFP_ATOMIC, node);
> + printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
> + BUG_ON(!ptr);
> +
> + desc->kstat_irqs = (unsigned int *)ptr;
> +}

Ditto.

>
> ...
>
> +struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
> +{
> + struct irq_desc *desc;
> + struct list_head *hash_head;
> + unsigned long flags;
> + int node;
> +
> + desc = irq_to_desc(irq);
> + if (desc)
> + return desc;
> +
> + hash_head = sparseirqhashentry(irq);
> +
> + spin_lock_irqsave(&sparse_irq_lock, flags);
> +
> + /*
> + * We have to do the hash-walk again, to avoid races
> + * with another CPU:
> + */
> + list_for_each_entry(desc, hash_head, hash_entry) {
> + if (desc->irq == irq)
> + goto out_unlock;
> + }
> +
> + node = cpu_to_node(cpu);
> + desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
> + printk(KERN_DEBUG " alloc irq_desc for %d aka %#x on cpu %d node %d\n",
> + irq, irq, cpu, node);
> + BUG_ON(!desc);

Ditto.

> + init_one_irq_desc(irq, desc, cpu);
> +
> + /*
> + * We use RCU's safe list-add method to make
> + * parallel walking of the hash-list safe:
> + */
> + list_add_tail_rcu(&desc->hash_entry, hash_head);
> + /*
> + * Add it to the global list:
> + */
> + list_add_tail_rcu(&desc->list, &sparse_irqs_head);
> +
> +out_unlock:
> + spin_unlock_irqrestore(&sparse_irq_lock, flags);
> +
> + return desc;
> +}
> +
>
> ...
>
> +static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
> + int cpu)
> +{
> + struct irq_desc *desc;
> + unsigned int irq;
> + struct list_head *hash_head;
> + unsigned long flags;
> + int node;
> +
> + irq = old_desc->irq;
> +
> + hash_head = sparseirqhashentry(irq);
> +
> + spin_lock_irqsave(&sparse_irq_lock, flags);
> + /*
> + * We have to do the hash-walk again, to avoid races
> + * with another CPU:
> + */
> + list_for_each_entry(desc, hash_head, hash_entry) {
> + if (desc->irq == irq && old_desc != desc)
> + goto out_unlock;
> + }
> +
> + node = cpu_to_node(cpu);
> + desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
> + printk(KERN_DEBUG " move irq_desc for %d aka %#x to cpu %d node %d\n",
> + irq, irq, cpu, node);
> + BUG_ON(!desc);

Ditto.

> + init_copy_one_irq_desc(irq, old_desc, desc, cpu);
> +
> + list_replace_rcu(&old_desc->hash_entry, &desc->hash_entry);
> + list_replace_rcu(&old_desc->list, &desc->list);
> +
> + /* free the old one */
> + free_one_irq_desc(old_desc);
> + kfree(old_desc);
> +
> +out_unlock:
> + spin_unlock_irqrestore(&sparse_irq_lock, flags);
> +
> + return desc;
> +}
> +




\
 
 \ /
  Last update: 2008-11-14 07:49    [W:0.124 / U:2.044 seconds]
©2003-2018 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site