threadgroup: rename signal->threadgroup_fork_lock to ->group_rwsem

Make the following renames to prepare for extension of threadgroup
locking.

* s/signal->threadgroup_fork_lock/signal->group_rwsem/
* s/threadgroup_fork_read_lock()/threadgroup_change_begin()/
* s/threadgroup_fork_read_unlock()/threadgroup_change_end()/
* s/threadgroup_fork_write_lock()/threadgroup_lock()/
* s/threadgroup_fork_write_unlock()/threadgroup_unlock()/

This patch doesn't cause any behavior change.

-v2: Rename threadgroup_change_done() to threadgroup_change_end() per
     KAMEZAWA's suggestion.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Li Zefan <lizf@cn.fujitsu.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Paul Menage <paul@paulmenage.org>
This commit is contained in:
Tejun Heo 2011-12-12 18:12:21 -08:00
parent e25e2cbb4c
commit 257058ae2b
4 changed files with 29 additions and 31 deletions

View file

@ -23,11 +23,10 @@ extern struct files_struct init_files;
extern struct fs_struct init_fs; extern struct fs_struct init_fs;
#ifdef CONFIG_CGROUPS #ifdef CONFIG_CGROUPS
#define INIT_THREADGROUP_FORK_LOCK(sig) \ #define INIT_GROUP_RWSEM(sig) \
.threadgroup_fork_lock = \ .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
__RWSEM_INITIALIZER(sig.threadgroup_fork_lock),
#else #else
#define INIT_THREADGROUP_FORK_LOCK(sig) #define INIT_GROUP_RWSEM(sig)
#endif #endif
#define INIT_SIGNALS(sig) { \ #define INIT_SIGNALS(sig) { \
@ -46,7 +45,7 @@ extern struct fs_struct init_fs;
}, \ }, \
.cred_guard_mutex = \ .cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \ __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
INIT_THREADGROUP_FORK_LOCK(sig) \ INIT_GROUP_RWSEM(sig) \
} }
extern struct nsproxy init_nsproxy; extern struct nsproxy init_nsproxy;

View file

@ -635,13 +635,13 @@ struct signal_struct {
#endif #endif
#ifdef CONFIG_CGROUPS #ifdef CONFIG_CGROUPS
/* /*
* The threadgroup_fork_lock prevents threads from forking with * The group_rwsem prevents threads from forking with
* CLONE_THREAD while held for writing. Use this for fork-sensitive * CLONE_THREAD while held for writing. Use this for fork-sensitive
* threadgroup-wide operations. It's taken for reading in fork.c in * threadgroup-wide operations. It's taken for reading in fork.c in
* copy_process(). * copy_process().
* Currently only needed write-side by cgroups. * Currently only needed write-side by cgroups.
*/ */
struct rw_semaphore threadgroup_fork_lock; struct rw_semaphore group_rwsem;
#endif #endif
int oom_adj; /* OOM kill score adjustment (bit shift) */ int oom_adj; /* OOM kill score adjustment (bit shift) */
@ -2371,29 +2371,29 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
} }
/* See the declaration of threadgroup_fork_lock in signal_struct. */ /* See the declaration of group_rwsem in signal_struct. */
#ifdef CONFIG_CGROUPS #ifdef CONFIG_CGROUPS
static inline void threadgroup_fork_read_lock(struct task_struct *tsk) static inline void threadgroup_change_begin(struct task_struct *tsk)
{ {
down_read(&tsk->signal->threadgroup_fork_lock); down_read(&tsk->signal->group_rwsem);
} }
static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) static inline void threadgroup_change_end(struct task_struct *tsk)
{ {
up_read(&tsk->signal->threadgroup_fork_lock); up_read(&tsk->signal->group_rwsem);
} }
static inline void threadgroup_fork_write_lock(struct task_struct *tsk) static inline void threadgroup_lock(struct task_struct *tsk)
{ {
down_write(&tsk->signal->threadgroup_fork_lock); down_write(&tsk->signal->group_rwsem);
} }
static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) static inline void threadgroup_unlock(struct task_struct *tsk)
{ {
up_write(&tsk->signal->threadgroup_fork_lock); up_write(&tsk->signal->group_rwsem);
} }
#else #else
static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {} static inline void threadgroup_change_begin(struct task_struct *tsk) {}
static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {} static inline void threadgroup_change_end(struct task_struct *tsk) {}
static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {} static inline void threadgroup_lock(struct task_struct *tsk) {}
static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {} static inline void threadgroup_unlock(struct task_struct *tsk) {}
#endif #endif
#ifndef __HAVE_THREAD_FUNCTIONS #ifndef __HAVE_THREAD_FUNCTIONS

View file

@ -2003,8 +2003,8 @@ static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
* @cgrp: the cgroup to attach to * @cgrp: the cgroup to attach to
* @leader: the threadgroup leader task_struct of the group to be attached * @leader: the threadgroup leader task_struct of the group to be attached
* *
* Call holding cgroup_mutex and the threadgroup_fork_lock of the leader. Will * Call holding cgroup_mutex and the group_rwsem of the leader. Will take
* take task_lock of each thread in leader's threadgroup individually in turn. * task_lock of each thread in leader's threadgroup individually in turn.
*/ */
int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
{ {
@ -2030,8 +2030,8 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
* step 0: in order to do expensive, possibly blocking operations for * step 0: in order to do expensive, possibly blocking operations for
* every thread, we cannot iterate the thread group list, since it needs * every thread, we cannot iterate the thread group list, since it needs
* rcu or tasklist locked. instead, build an array of all threads in the * rcu or tasklist locked. instead, build an array of all threads in the
* group - threadgroup_fork_lock prevents new threads from appearing, * group - group_rwsem prevents new threads from appearing, and if
* and if threads exit, this will just be an over-estimate. * threads exit, this will just be an over-estimate.
*/ */
group_size = get_nr_threads(leader); group_size = get_nr_threads(leader);
/* flex_array supports very large thread-groups better than kmalloc. */ /* flex_array supports very large thread-groups better than kmalloc. */
@ -2249,7 +2249,6 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
cgroup_unlock(); cgroup_unlock();
return -ESRCH; return -ESRCH;
} }
/* /*
* even if we're attaching all tasks in the thread group, we * even if we're attaching all tasks in the thread group, we
* only need to check permissions on one of them. * only need to check permissions on one of them.
@ -2273,9 +2272,9 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
} }
if (threadgroup) { if (threadgroup) {
threadgroup_fork_write_lock(tsk); threadgroup_lock(tsk);
ret = cgroup_attach_proc(cgrp, tsk); ret = cgroup_attach_proc(cgrp, tsk);
threadgroup_fork_write_unlock(tsk); threadgroup_unlock(tsk);
} else { } else {
ret = cgroup_attach_task(cgrp, tsk); ret = cgroup_attach_task(cgrp, tsk);
} }

View file

@ -972,7 +972,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sched_autogroup_fork(sig); sched_autogroup_fork(sig);
#ifdef CONFIG_CGROUPS #ifdef CONFIG_CGROUPS
init_rwsem(&sig->threadgroup_fork_lock); init_rwsem(&sig->group_rwsem);
#endif #endif
sig->oom_adj = current->signal->oom_adj; sig->oom_adj = current->signal->oom_adj;
@ -1157,7 +1157,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->io_context = NULL; p->io_context = NULL;
p->audit_context = NULL; p->audit_context = NULL;
if (clone_flags & CLONE_THREAD) if (clone_flags & CLONE_THREAD)
threadgroup_fork_read_lock(current); threadgroup_change_begin(current);
cgroup_fork(p); cgroup_fork(p);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy); p->mempolicy = mpol_dup(p->mempolicy);
@ -1372,7 +1372,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
proc_fork_connector(p); proc_fork_connector(p);
cgroup_post_fork(p); cgroup_post_fork(p);
if (clone_flags & CLONE_THREAD) if (clone_flags & CLONE_THREAD)
threadgroup_fork_read_unlock(current); threadgroup_change_end(current);
perf_event_fork(p); perf_event_fork(p);
return p; return p;
@ -1407,7 +1407,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
bad_fork_cleanup_cgroup: bad_fork_cleanup_cgroup:
#endif #endif
if (clone_flags & CLONE_THREAD) if (clone_flags & CLONE_THREAD)
threadgroup_fork_read_unlock(current); threadgroup_change_end(current);
cgroup_exit(p, cgroup_callbacks_done); cgroup_exit(p, cgroup_callbacks_done);
delayacct_tsk_free(p); delayacct_tsk_free(p);
module_put(task_thread_info(p)->exec_domain->module); module_put(task_thread_info(p)->exec_domain->module);