switch the protection of percpu_counter list to spinlock

... making percpu_counter_destroy() non-blocking

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2012-07-31 09:28:31 +04:00
parent 4a55c1017b
commit d87aae2f3c

View file

@ -12,7 +12,7 @@
#ifdef CONFIG_HOTPLUG_CPU
static LIST_HEAD(percpu_counters);
static DEFINE_MUTEX(percpu_counters_lock);
static DEFINE_SPINLOCK(percpu_counters_lock);
#endif
#ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
@ -123,9 +123,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
#ifdef CONFIG_HOTPLUG_CPU
INIT_LIST_HEAD(&fbc->list);
mutex_lock(&percpu_counters_lock);
spin_lock(&percpu_counters_lock);
list_add(&fbc->list, &percpu_counters);
mutex_unlock(&percpu_counters_lock);
spin_unlock(&percpu_counters_lock);
#endif
return 0;
}
@ -139,9 +139,9 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
debug_percpu_counter_deactivate(fbc);
#ifdef CONFIG_HOTPLUG_CPU
mutex_lock(&percpu_counters_lock);
spin_lock(&percpu_counters_lock);
list_del(&fbc->list);
mutex_unlock(&percpu_counters_lock);
spin_unlock(&percpu_counters_lock);
#endif
free_percpu(fbc->counters);
fbc->counters = NULL;
@ -170,7 +170,7 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
return NOTIFY_OK;
cpu = (unsigned long)hcpu;
mutex_lock(&percpu_counters_lock);
spin_lock(&percpu_counters_lock);
list_for_each_entry(fbc, &percpu_counters, list) {
s32 *pcount;
unsigned long flags;
@ -181,7 +181,7 @@ static int __cpuinit percpu_counter_hotcpu_callback(struct notifier_block *nb,
*pcount = 0;
raw_spin_unlock_irqrestore(&fbc->lock, flags);
}
mutex_unlock(&percpu_counters_lock);
spin_unlock(&percpu_counters_lock);
#endif
return NOTIFY_OK;
}