mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
irq: only update affinity if ->set_affinity() is sucessfull
irq_set_affinity() and move_masked_irq() try to assign affinity before calling chip set_affinity(). Some archs are assigning it in ->set_affinity() again. We do something like: cpumask_cpy(desc->affinity, mask); desc->chip->set_affinity(mask); But in the failure path, affinity should not be touched - otherwise we'll end up with a different affinity mask despite the failure to migrate the IRQ. So try to update the afffinity only if set_affinity returns with 0. Also call irq_set_thread_affinity accordingly. v2: update after "irq, x86: Remove IRQ_DISABLED check in process context IRQ move" v3: according to Ingo, change set_affinity() in irq_chip should return int. v4: update comments by removing moving irq_desc code. [ Impact: fix /proc/irq/*/smp_affinity setting corner case bug ] Signed-off-by: Yinghai Lu <yinghai@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Rusty Russell <rusty@rustcorp.com.au> LKML-Reference: <49F65509.60307@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
d5dedd4507
commit
57b150cce8
3 changed files with 23 additions and 11 deletions
|
@ -42,6 +42,9 @@ static inline void unregister_handler_proc(unsigned int irq,
|
|||
|
||||
extern int irq_select_affinity_usr(unsigned int irq);
|
||||
|
||||
extern void
|
||||
irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask);
|
||||
|
||||
/*
|
||||
* Debugging printout:
|
||||
*/
|
||||
|
|
|
@ -80,7 +80,7 @@ int irq_can_set_affinity(unsigned int irq)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
|
||||
{
|
||||
struct irqaction *action = desc->action;
|
||||
|
@ -109,17 +109,22 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
|||
spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (desc->status & IRQ_MOVE_PCNTXT)
|
||||
desc->chip->set_affinity(irq, cpumask);
|
||||
if (desc->status & IRQ_MOVE_PCNTXT) {
|
||||
if (!desc->chip->set_affinity(irq, cpumask)) {
|
||||
cpumask_copy(desc->affinity, cpumask);
|
||||
irq_set_thread_affinity(desc, cpumask);
|
||||
}
|
||||
}
|
||||
else {
|
||||
desc->status |= IRQ_MOVE_PENDING;
|
||||
cpumask_copy(desc->pending_mask, cpumask);
|
||||
}
|
||||
#else
|
||||
cpumask_copy(desc->affinity, cpumask);
|
||||
desc->chip->set_affinity(irq, cpumask);
|
||||
if (!desc->chip->set_affinity(irq, cpumask)) {
|
||||
cpumask_copy(desc->affinity, cpumask);
|
||||
irq_set_thread_affinity(desc, cpumask);
|
||||
}
|
||||
#endif
|
||||
irq_set_thread_affinity(desc, cpumask);
|
||||
desc->status |= IRQ_AFFINITY_SET;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return 0;
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
|
||||
#include <linux/irq.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include "internals.h"
|
||||
|
||||
void move_masked_irq(int irq)
|
||||
{
|
||||
|
@ -39,11 +42,12 @@ void move_masked_irq(int irq)
|
|||
* masking the irqs.
|
||||
*/
|
||||
if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
|
||||
< nr_cpu_ids)) {
|
||||
cpumask_and(desc->affinity,
|
||||
desc->pending_mask, cpu_online_mask);
|
||||
desc->chip->set_affinity(irq, desc->affinity);
|
||||
}
|
||||
< nr_cpu_ids))
|
||||
if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
|
||||
cpumask_copy(desc->affinity, desc->pending_mask);
|
||||
irq_set_thread_affinity(desc, desc->pending_mask);
|
||||
}
|
||||
|
||||
cpumask_clear(desc->pending_mask);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue