modules: remove modlist_lock

Now we always use stop_machine for module insertion or deletion, we no
longer need the modlist_lock: merely disabling preemption is sufficient to
block against list manipulation.  This avoids deadlock on OOPSen where we
can potentially grab the lock twice.

Bug: 8695
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Tobias Oed <tobiasoed@hotmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Rusty Russell 2007-07-15 23:41:46 -07:00 committed by Linus Torvalds
parent 6c675bd43c
commit 24da1cbff9

View file

@ -61,10 +61,8 @@ extern int module_sysfs_initialized;
/* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
/* Protects module list */
static DEFINE_SPINLOCK(modlist_lock);
/* List of modules, protected by module_mutex AND modlist_lock */
/* List of modules, protected by module_mutex or preempt_disable
* (add/delete uses stop_machine). */
static DEFINE_MUTEX(module_mutex);
static LIST_HEAD(modules);
@ -760,14 +758,13 @@ static void print_unload_info(struct seq_file *m, struct module *mod)
void __symbol_put(const char *symbol)
{
struct module *owner;
unsigned long flags;
const unsigned long *crc;
spin_lock_irqsave(&modlist_lock, flags);
preempt_disable();
if (!__find_symbol(symbol, &owner, &crc, 1))
BUG();
module_put(owner);
spin_unlock_irqrestore(&modlist_lock, flags);
preempt_enable();
}
EXPORT_SYMBOL(__symbol_put);
@ -1228,14 +1225,14 @@ static void free_module(struct module *mod)
void *__symbol_get(const char *symbol)
{
struct module *owner;
unsigned long value, flags;
unsigned long value;
const unsigned long *crc;
spin_lock_irqsave(&modlist_lock, flags);
preempt_disable();
value = __find_symbol(symbol, &owner, &crc, 1);
if (value && !strong_try_module_get(owner))
value = 0;
spin_unlock_irqrestore(&modlist_lock, flags);
preempt_enable();
return (void *)value;
}
@ -2308,11 +2305,10 @@ const struct seq_operations modules_op = {
/* Given an address, look for it in the module exception tables. */
const struct exception_table_entry *search_module_extables(unsigned long addr)
{
unsigned long flags;
const struct exception_table_entry *e = NULL;
struct module *mod;
spin_lock_irqsave(&modlist_lock, flags);
preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (mod->num_exentries == 0)
continue;
@ -2323,7 +2319,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
if (e)
break;
}
spin_unlock_irqrestore(&modlist_lock, flags);
preempt_enable();
/* Now, if we found one, we are running inside it now, hence
we cannot unload the module, hence no refcnt needed. */
@ -2335,25 +2331,24 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
*/
int is_module_address(unsigned long addr)
{
unsigned long flags;
struct module *mod;
spin_lock_irqsave(&modlist_lock, flags);
preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (within(addr, mod->module_core, mod->core_size)) {
spin_unlock_irqrestore(&modlist_lock, flags);
preempt_enable();
return 1;
}
}
spin_unlock_irqrestore(&modlist_lock, flags);
preempt_enable();
return 0;
}
/* Is this a valid kernel address? We don't grab the lock: we are oopsing. */
/* Is this a valid kernel address? */
struct module *__module_text_address(unsigned long addr)
{
struct module *mod;
@ -2368,11 +2363,10 @@ struct module *__module_text_address(unsigned long addr)
struct module *module_text_address(unsigned long addr)
{
struct module *mod;
unsigned long flags;
spin_lock_irqsave(&modlist_lock, flags);
preempt_disable();
mod = __module_text_address(addr);
spin_unlock_irqrestore(&modlist_lock, flags);
preempt_enable();
return mod;
}