Allocate KSEs and KSEGRPs separatly and remove them from the proc structure.

next step is to allow > 1 to be allocated per process. This would give
multi-processor threads. (when the rest of the infrastructure is
in place)

While doing this I noticed libkvm and sys/kern/kern_proc.c:fill_kinfo_proc
are diverging more than they should.. corrective action needed soon.
This commit is contained in:
Julian Elischer 2002-09-15 23:52:25 +00:00
parent 56c4ff5427
commit 4f0db5e08c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=103367
20 changed files with 277 additions and 150 deletions

View file

@ -120,6 +120,8 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
struct pstats pstats;
struct ucred ucred;
struct thread mtd;
struct kse mke;
struct ksegrp mkg;
struct proc proc;
struct proc pproc;
struct timeval tv;
@ -140,6 +142,23 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
TAILQ_FIRST(&proc.p_threads));
return (-1);
}
if (proc.p_flag & P_KSES == 0) {
if (KREAD(kd,
(u_long)TAILQ_FIRST(&proc.p_ksegrps),
&mkg)) {
_kvm_err(kd, kd->program,
"can't read ksegrp at %x",
TAILQ_FIRST(&proc.p_ksegrps));
return (-1);
}
if (KREAD(kd,
(u_long)TAILQ_FIRST(&mkg.kg_kseq), &mke)) {
_kvm_err(kd, kd->program,
"can't read kse at %x",
TAILQ_FIRST(&mkg.kg_kseq));
return (-1);
}
}
}
if (KREAD(kd, (u_long)proc.p_ucred, &ucred) == 0) {
kp->ki_ruid = ucred.cr_ruid;
@ -328,24 +347,23 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
kp->ki_sigmask = proc.p_sigmask;
kp->ki_xstat = proc.p_xstat;
kp->ki_acflag = proc.p_acflag;
kp->ki_lock = proc.p_lock;
if (proc.p_state != PRS_ZOMBIE) {
kp->ki_pctcpu = proc.p_kse.ke_pctcpu;
kp->ki_estcpu = proc.p_ksegrp.kg_estcpu;
kp->ki_slptime = proc.p_ksegrp.kg_slptime;
kp->ki_swtime = proc.p_swtime;
kp->ki_flag = proc.p_flag;
kp->ki_sflag = proc.p_sflag;
kp->ki_wchan = mtd.td_wchan;
kp->ki_traceflag = proc.p_traceflag;
if (proc.p_state == PRS_NORMAL) {
if (TD_ON_RUNQ(&mtd) ||
TD_CAN_RUN(&mtd) ||
TD_IS_RUNNING(&mtd)) {
kp->ki_stat = SRUN;
} else if (mtd.td_state == TDS_INHIBITED) {
} else if (mtd.td_state ==
TDS_INHIBITED) {
if (P_SHOULDSTOP(&proc)) {
kp->ki_stat = SSTOP;
} else if (TD_IS_SLEEPING(&mtd)) {
} else if (
TD_IS_SLEEPING(&mtd)) {
kp->ki_stat = SSLEEP;
} else if (TD_ON_MUTEX(&mtd)) {
kp->ki_stat = SMTX;
@ -356,15 +374,30 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
} else {
kp->ki_stat = SIDL;
}
kp->ki_pri.pri_class = proc.p_ksegrp.kg_pri_class;
kp->ki_pri.pri_user = proc.p_ksegrp.kg_user_pri;
/* Stuff from the thread */
kp->ki_pri.pri_level = mtd.td_priority;
kp->ki_pri.pri_native = mtd.td_base_pri;
kp->ki_nice = proc.p_ksegrp.kg_nice;
kp->ki_lock = proc.p_lock;
kp->ki_rqindex = proc.p_kse.ke_rqindex;
kp->ki_oncpu = proc.p_kse.ke_oncpu;
kp->ki_lastcpu = mtd.td_lastcpu;
kp->ki_wchan = mtd.td_wchan;
if (!(proc.p_flag & P_KSES)) {
/* stuff from the ksegrp */
kp->ki_slptime = mkg.kg_slptime;
kp->ki_pri.pri_class = mkg.kg_pri_class;
kp->ki_pri.pri_user = mkg.kg_user_pri;
kp->ki_nice = mkg.kg_nice;
kp->ki_estcpu = mkg.kg_estcpu;
/* Stuff from the kse */
kp->ki_pctcpu = mke.ke_pctcpu;
kp->ki_rqindex = mke.ke_rqindex;
kp->ki_oncpu = mke.ke_oncpu;
} else {
kp->ki_oncpu = -1;
kp->ki_lastcpu = -1;
kp->ki_tdflags = -1;
/* All the rest are 0 for now */
}
} else {
kp->ki_stat = SZOMB;
}

View file

@ -884,7 +884,7 @@ alpha_init(pfn, ptb, bim, bip, biv)
}
proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0);
proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
/*
* Init mapping for u page(s) for proc 0
*/

View file

@ -1640,7 +1640,7 @@ init386(first)
* This may be done better later if it gets more high level
* components in it. If so just link td->td_proc here.
*/
proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0);
proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
metadata_missing = 0;
if (bootinfo.bi_modulep) {

View file

@ -1640,7 +1640,7 @@ init386(first)
* This may be done better later if it gets more high level
* components in it. If so just link td->td_proc here.
*/
proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0);
proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
metadata_missing = 0;
if (bootinfo.bi_modulep) {

View file

@ -675,7 +675,7 @@ ia64_init(u_int64_t arg1, u_int64_t arg2)
}
proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0);
proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
/*
* Init mapping for u page(s) for proc 0
*/

View file

@ -87,6 +87,8 @@ static struct session session0;
static struct pgrp pgrp0;
struct proc proc0;
struct thread thread0;
struct kse kse0;
struct ksegrp ksegrp0;
static struct procsig procsig0;
static struct filedesc0 filedesc0;
static struct plimit limit0;
@ -311,6 +313,8 @@ proc0_init(void *dummy __unused)
GIANT_REQUIRED;
p = &proc0;
td = &thread0;
ke = &kse0;
kg = &ksegrp0;
/*
* Initialize magic number.
@ -357,8 +361,6 @@ proc0_init(void *dummy __unused)
* I would have done it here.. maybe this means this should be
* done earlier too.
*/
ke = &proc0.p_kse; /* XXXKSE */
kg = &proc0.p_ksegrp; /* XXXKSE */
p->p_flag = P_SYSTEM;
p->p_sflag = PS_INMEM;
p->p_state = PRS_NORMAL;

View file

@ -620,7 +620,7 @@ wait1(td, uap, compat)
mtx_lock_spin(&sched_lock);
curthread->td_ksegrp->kg_estcpu =
ESTCPULIM(curthread->td_ksegrp->kg_estcpu +
p->p_ksegrp.kg_estcpu);
FIRST_KSEGRP_IN_PROC(p)->kg_estcpu);
mtx_unlock_spin(&sched_lock);
}
@ -728,7 +728,7 @@ wait1(td, uap, compat)
/* Free the KSE spare thread. */
if (ke->ke_tdspare != NULL) {
thread_free(ke->ke_tdspare);
p->p_kse.ke_tdspare = NULL;
ke->ke_tdspare = NULL;
}
}
}

View file

@ -634,12 +634,10 @@ fork1(td, flags, procp)
}
/*
* set priority of child to be that of parent
* XXXKSE hey! copying the estcpu seems dodgy.. should split it..
* set priority of child to be that of parent.
* XXXKSE this needs redefining..
*/
mtx_lock_spin(&sched_lock);
p2->p_ksegrp.kg_estcpu = p1->p_ksegrp.kg_estcpu;
mtx_unlock_spin(&sched_lock);
kg2->kg_estcpu = td->td_ksegrp->kg_estcpu;
/*
* This begins the section where we must prevent the parent

View file

@ -54,28 +54,22 @@
#include <machine/frame.h>
/*
* Thread related storage.
* KSEGRP related storage.
*/
static uma_zone_t ksegrp_zone;
static uma_zone_t kse_zone;
static uma_zone_t thread_zone;
static int allocated_threads;
static int active_threads;
static int cached_threads;
/* DEBUG ONLY */
SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
SYSCTL_INT(_kern_threads, OID_AUTO, active, CTLFLAG_RD,
&active_threads, 0, "Number of active threads in system.");
SYSCTL_INT(_kern_threads, OID_AUTO, cached, CTLFLAG_RD,
&cached_threads, 0, "Number of threads in thread cache.");
SYSCTL_INT(_kern_threads, OID_AUTO, allocated, CTLFLAG_RD,
&allocated_threads, 0, "Number of threads in zone.");
static int oiks_debug = 1; /* 0 disable, 1 printf, 2 enter debugger */
SYSCTL_INT(_kern_threads, OID_AUTO, oiks, CTLFLAG_RW,
&oiks_debug, 0, "OIKS thread debug");
static int max_threads_per_proc = 4;
SYSCTL_INT(_kern_threads, OID_AUTO, max_per_proc, CTLFLAG_RW,
&max_threads_per_proc, 0, "Limit on threads per proc");
#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
@ -97,8 +91,6 @@ thread_ctor(void *mem, int size, void *arg)
td = (struct thread *)mem;
td->td_state = TDS_INACTIVE;
td->td_flags |= TDF_UNBOUND;
cached_threads--; /* XXXSMP */
active_threads++; /* XXXSMP */
}
/*
@ -134,10 +126,6 @@ thread_dtor(void *mem, int size, void *arg)
/* NOTREACHED */
}
#endif
/* Update counters. */
active_threads--; /* XXXSMP */
cached_threads++; /* XXXSMP */
}
/*
@ -156,8 +144,6 @@ thread_init(void *mem, int size)
pmap_new_thread(td);
mtx_unlock(&Giant);
cpu_thread_setup(td);
cached_threads++; /* XXXSMP */
allocated_threads++; /* XXXSMP */
}
/*
@ -173,8 +159,6 @@ thread_fini(void *mem, int size)
td = (struct thread *)mem;
pmap_dispose_thread(td);
cached_threads--; /* XXXSMP */
allocated_threads--; /* XXXSMP */
}
/*
@ -187,6 +171,12 @@ threadinit(void)
thread_zone = uma_zcreate("THREAD", sizeof (struct thread),
thread_ctor, thread_dtor, thread_init, thread_fini,
UMA_ALIGN_CACHE, 0);
ksegrp_zone = uma_zcreate("KSEGRP", sizeof (struct ksegrp),
NULL, NULL, NULL, NULL,
UMA_ALIGN_CACHE, 0);
kse_zone = uma_zcreate("KSE", sizeof (struct kse),
NULL, NULL, NULL, NULL,
UMA_ALIGN_CACHE, 0);
}
/*
@ -225,6 +215,24 @@ thread_reap(void)
}
}
/*
* Allocate a ksegrp.
*/
struct ksegrp *
ksegrp_alloc(void)
{
return (uma_zalloc(ksegrp_zone, M_WAITOK));
}
/*
* Allocate a kse.
*/
struct kse *
kse_alloc(void)
{
return (uma_zalloc(kse_zone, M_WAITOK));
}
/*
* Allocate a thread.
*/
@ -235,6 +243,24 @@ thread_alloc(void)
return (uma_zalloc(thread_zone, M_WAITOK));
}
/*
* Deallocate a ksegrp.
*/
void
ksegrp_free(struct ksegrp *td)
{
uma_zfree(ksegrp_zone, td);
}
/*
* Deallocate a kse.
*/
void
kse_free(struct kse *td)
{
uma_zfree(kse_zone, td);
}
/*
* Deallocate a thread.
*/
@ -387,7 +413,7 @@ thread_link(struct thread *td, struct ksegrp *kg)
TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
p->p_numthreads++;
kg->kg_numthreads++;
if (oiks_debug && p->p_numthreads > 4) {
if (oiks_debug && p->p_numthreads > max_threads_per_proc) {
printf("OIKS %d\n", p->p_numthreads);
if (oiks_debug > 1)
Debugger("OIKS");

View file

@ -98,10 +98,6 @@ struct mtx pargs_ref_lock;
uma_zone_t proc_zone;
uma_zone_t ithread_zone;
static int active_procs;
static int cached_procs;
static int allocated_procs;
int kstack_pages = KSTACK_PAGES;
int uarea_pages = UAREA_PAGES;
SYSCTL_INT(_kern, OID_AUTO, kstack_pages, CTLFLAG_RD, &kstack_pages, 0, "");
@ -142,8 +138,6 @@ proc_ctor(void *mem, int size, void *arg)
KASSERT((size == sizeof(struct proc)),
("size mismatch: %d != %d\n", size, (int)sizeof(struct proc)));
p = (struct proc *)mem;
cached_procs--;
active_procs++;
}
/*
@ -176,10 +170,6 @@ proc_dtor(void *mem, int size, void *arg)
* on the state coming in here from wait4().
*/
proc_linkup(p, kg, ke, td);
/* Stats only */
active_procs--;
cached_procs++;
}
/*
@ -198,11 +188,9 @@ proc_init(void *mem, int size)
p = (struct proc *)mem;
vm_proc_new(p);
td = thread_alloc();
ke = &p->p_kse;
kg = &p->p_ksegrp;
ke = kse_alloc();
kg = ksegrp_alloc();
proc_linkup(p, kg, ke, td);
cached_procs++;
allocated_procs++;
}
/*
@ -212,14 +200,25 @@ static void
proc_fini(void *mem, int size)
{
struct proc *p;
struct thread *td;
struct ksegrp *kg;
struct kse *ke;
KASSERT((size == sizeof(struct proc)),
("size mismatch: %d != %d\n", size, (int)sizeof(struct proc)));
p = (struct proc *)mem;
KASSERT((p->p_numthreads == 1),
("bad number of threads in freeing process"));
td = FIRST_THREAD_IN_PROC(p);
KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
kg = FIRST_KSEGRP_IN_PROC(p);
KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
ke = FIRST_KSE_IN_KSEGRP(kg);
KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
vm_proc_dispose(p);
cached_procs--;
allocated_procs--;
thread_free(FIRST_THREAD_IN_PROC(p));
thread_free(td);
ksegrp_free(kg);
kse_free(ke);
}
/*
@ -787,6 +786,8 @@ fill_kinfo_proc(p, kp)
struct kinfo_proc *kp;
{
struct thread *td;
struct kse *ke;
struct ksegrp *kg;
struct tty *tp;
struct session *sp;
struct timeval tv;
@ -862,13 +863,14 @@ fill_kinfo_proc(p, kp)
}
if (p->p_state == PRS_NORMAL) { /* XXXKSE very approximate */
if ((TD_ON_RUNQ(td)) ||
(TD_IS_RUNNING(td))) {
if (TD_ON_RUNQ(td) ||
TD_CAN_RUN(td) ||
TD_IS_RUNNING(td)) {
kp->ki_stat = SRUN;
} else if (TD_IS_SLEEPING(td)) {
kp->ki_stat = SSLEEP;
} else if (P_SHOULDSTOP(p)) {
kp->ki_stat = SSTOP;
} else if (TD_IS_SLEEPING(td)) {
kp->ki_stat = SSLEEP;
} else if (TD_ON_MUTEX(td)) {
kp->ki_stat = SMTX;
} else {
@ -883,33 +885,43 @@ fill_kinfo_proc(p, kp)
kp->ki_pid = p->p_pid;
/* vvv XXXKSE */
if (!(p->p_flag & P_KSES)) {
kg = td->td_ksegrp;
ke = td->td_kse;
KASSERT((ke != NULL), ("fill_kinfo_proc: Null KSE"));
bintime2timeval(&p->p_runtime, &tv);
kp->ki_runtime = tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
kp->ki_pctcpu = p->p_kse.ke_pctcpu;
kp->ki_estcpu = p->p_ksegrp.kg_estcpu;
kp->ki_slptime = p->p_ksegrp.kg_slptime;
kp->ki_runtime =
tv.tv_sec * (u_int64_t)1000000 + tv.tv_usec;
/* things in the KSE GROUP */
kp->ki_estcpu = kg->kg_estcpu;
kp->ki_slptime = kg->kg_slptime;
kp->ki_pri.pri_user = kg->kg_user_pri;
kp->ki_pri.pri_class = kg->kg_pri_class;
kp->ki_nice = kg->kg_nice;
/* Things in the thread */
kp->ki_wchan = td->td_wchan;
kp->ki_pri.pri_level = td->td_priority;
kp->ki_pri.pri_user = p->p_ksegrp.kg_user_pri;
kp->ki_pri.pri_class = p->p_ksegrp.kg_pri_class;
kp->ki_pri.pri_native = td->td_base_pri;
kp->ki_nice = p->p_ksegrp.kg_nice;
kp->ki_rqindex = p->p_kse.ke_rqindex;
kp->ki_oncpu = p->p_kse.ke_oncpu;
kp->ki_lastcpu = td->td_lastcpu;
kp->ki_tdflags = td->td_flags;
kp->ki_pcb = td->td_pcb;
kp->ki_kstack = (void *)td->td_kstack;
/* Things in the kse */
kp->ki_rqindex = ke->ke_rqindex;
kp->ki_oncpu = ke->ke_oncpu;
kp->ki_pctcpu = ke->ke_pctcpu;
} else {
kp->ki_oncpu = -1;
kp->ki_lastcpu = -1;
kp->ki_tdflags = -1;
/* All the reast are 0 */
/* All the rest are 0 for now */
}
/* ^^^ XXXKSE */
} else {
kp->ki_stat = SZOMB;
}
/* ^^^ XXXKSE */
mtx_unlock_spin(&sched_lock);
sp = NULL;
tp = NULL;
@ -1255,11 +1267,3 @@ SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
sysctl_kern_proc_args, "Process argument list");
SYSCTL_INT(_kern_proc, OID_AUTO, active, CTLFLAG_RD,
&active_procs, 0, "Number of active procs in system.");
SYSCTL_INT(_kern_proc, OID_AUTO, cached, CTLFLAG_RD,
&cached_procs, 0, "Number of procs in proc cache.");
SYSCTL_INT(_kern_proc, OID_AUTO, allocated, CTLFLAG_RD,
&allocated_procs, 0, "Number of procs in zone.");

View file

@ -87,9 +87,10 @@ getpriority(td, uap)
struct thread *td;
register struct getpriority_args *uap;
{
register struct proc *p;
register int low = PRIO_MAX + 1;
struct proc *p;
int low = PRIO_MAX + 1;
int error = 0;
struct ksegrp *kg;
mtx_lock(&Giant);
@ -101,8 +102,12 @@ getpriority(td, uap)
p = pfind(uap->who);
if (p == NULL)
break;
if (p_cansee(td, p) == 0)
low = p->p_ksegrp.kg_nice /* XXXKSE */ ;
if (p_cansee(td, p) == 0) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
if (kg->kg_nice < low)
low = kg->kg_nice;
}
}
PROC_UNLOCK(p);
}
break;
@ -124,8 +129,12 @@ getpriority(td, uap)
sx_sunlock(&proctree_lock);
LIST_FOREACH(p, &pg->pg_members, p_pglist) {
PROC_LOCK(p);
if (!p_cansee(td, p) && p->p_ksegrp.kg_nice /* XXXKSE */ < low)
low = p->p_ksegrp.kg_nice /* XXXKSE */ ;
if (!p_cansee(td, p)) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
if (kg->kg_nice < low)
low = kg->kg_nice;
}
}
PROC_UNLOCK(p);
}
PGRP_UNLOCK(pg);
@ -139,9 +148,12 @@ getpriority(td, uap)
LIST_FOREACH(p, &allproc, p_list) {
PROC_LOCK(p);
if (!p_cansee(td, p) &&
p->p_ucred->cr_uid == uap->who &&
p->p_ksegrp.kg_nice /* XXXKSE */ < low)
low = p->p_ksegrp.kg_nice /* XXXKSE */ ;
p->p_ucred->cr_uid == uap->who) {
FOREACH_KSEGRP_IN_PROC(p, kg) {
if (kg->kg_nice < low)
low = kg->kg_nice;
}
}
PROC_UNLOCK(p);
}
sx_sunlock(&allproc_lock);
@ -250,25 +262,41 @@ setpriority(td, uap)
return (error);
}
/*
* Set "nice" for a process. Doesn't really understand threaded processes well
* but does try. Has the unfortunate side effect of making all the NICE
* values for a process's ksegrps the same.. This suggests that
* NICE valuse should be stored as a process nice and deltas for the ksegrps.
* (but not yet).
*/
static int
donice(td, chgp, n)
struct thread *td;
register struct proc *chgp;
register int n;
donice(struct thread *td, struct proc *p, int n)
{
int error;
int low = PRIO_MAX + 1;
struct ksegrp *kg;
PROC_LOCK_ASSERT(chgp, MA_OWNED);
if ((error = p_cansched(td, chgp)))
PROC_LOCK_ASSERT(p, MA_OWNED);
if ((error = p_cansched(td, p)))
return (error);
if (n > PRIO_MAX)
n = PRIO_MAX;
if (n < PRIO_MIN)
n = PRIO_MIN;
if (n < chgp->p_ksegrp.kg_nice /* XXXKSE */ && suser(td))
/*
* Only allow nicing if to more than the lowest nice.
* e.g. nices of 4,3,2 allow nice to 3 but not 1
*/
FOREACH_KSEGRP_IN_PROC(p, kg) {
if (kg->kg_nice < low)
low = kg->kg_nice;
}
if (n < low && suser(td))
return (EACCES);
chgp->p_ksegrp.kg_nice /* XXXKSE */ = n;
(void)resetpriority(&chgp->p_ksegrp); /* XXXKSE */
FOREACH_KSEGRP_IN_PROC(p, kg) {
kg->kg_nice = n;
(void)resetpriority(kg);
}
return (0);
}
@ -317,7 +345,7 @@ rtprio(td, uap)
if ((error = p_cansee(td, p)))
break;
mtx_lock_spin(&sched_lock);
pri_to_rtp(&p->p_ksegrp /* XXXKSE */ , &rtp);
pri_to_rtp(FIRST_KSEGRP_IN_PROC(p), &rtp);
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
@ -348,7 +376,7 @@ rtprio(td, uap)
}
}
mtx_lock_spin(&sched_lock);
error = rtp_to_pri(&rtp, &p->p_ksegrp);
error = rtp_to_pri(&rtp, FIRST_KSEGRP_IN_PROC(p));
mtx_unlock_spin(&sched_lock);
break;
default:

View file

@ -189,12 +189,19 @@ cursig(struct thread *td)
void
signotify(struct proc *p)
{
struct kse *ke;
struct ksegrp *kg;
PROC_LOCK_ASSERT(p, MA_OWNED);
mtx_lock_spin(&sched_lock);
if (SIGPENDING(p)) {
p->p_sflag |= PS_NEEDSIGCHK;
p->p_kse.ke_flags |= KEF_ASTPENDING; /* XXXKSE */
/* XXXKSE for now punish all KSEs */
FOREACH_KSEGRP_IN_PROC(p, kg) {
FOREACH_KSE_IN_GROUP(kg, ke) {
ke->ke_flags |= KEF_ASTPENDING;
}
}
}
mtx_unlock_spin(&sched_lock);
}

View file

@ -659,13 +659,10 @@ thread_sanity_check(struct thread *td)
kg = td->td_ksegrp;
ke = td->td_kse;
if (kg != &p->p_ksegrp) {
panic ("wrong ksegrp");
}
if (ke) {
if (ke != &p->p_kse) {
panic("wrong kse");
if (p != ke->ke_proc) {
panic("wrong proc");
}
if (ke->ke_thread != td) {
panic("wrong thread");

View file

@ -54,28 +54,22 @@
#include <machine/frame.h>
/*
* Thread related storage.
* KSEGRP related storage.
*/
static uma_zone_t ksegrp_zone;
static uma_zone_t kse_zone;
static uma_zone_t thread_zone;
static int allocated_threads;
static int active_threads;
static int cached_threads;
/* DEBUG ONLY */
SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
SYSCTL_INT(_kern_threads, OID_AUTO, active, CTLFLAG_RD,
&active_threads, 0, "Number of active threads in system.");
SYSCTL_INT(_kern_threads, OID_AUTO, cached, CTLFLAG_RD,
&cached_threads, 0, "Number of threads in thread cache.");
SYSCTL_INT(_kern_threads, OID_AUTO, allocated, CTLFLAG_RD,
&allocated_threads, 0, "Number of threads in zone.");
static int oiks_debug = 1; /* 0 disable, 1 printf, 2 enter debugger */
SYSCTL_INT(_kern_threads, OID_AUTO, oiks, CTLFLAG_RW,
&oiks_debug, 0, "OIKS thread debug");
static int max_threads_per_proc = 4;
SYSCTL_INT(_kern_threads, OID_AUTO, max_per_proc, CTLFLAG_RW,
&max_threads_per_proc, 0, "Limit on threads per proc");
#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
@ -97,8 +91,6 @@ thread_ctor(void *mem, int size, void *arg)
td = (struct thread *)mem;
td->td_state = TDS_INACTIVE;
td->td_flags |= TDF_UNBOUND;
cached_threads--; /* XXXSMP */
active_threads++; /* XXXSMP */
}
/*
@ -134,10 +126,6 @@ thread_dtor(void *mem, int size, void *arg)
/* NOTREACHED */
}
#endif
/* Update counters. */
active_threads--; /* XXXSMP */
cached_threads++; /* XXXSMP */
}
/*
@ -156,8 +144,6 @@ thread_init(void *mem, int size)
pmap_new_thread(td);
mtx_unlock(&Giant);
cpu_thread_setup(td);
cached_threads++; /* XXXSMP */
allocated_threads++; /* XXXSMP */
}
/*
@ -173,8 +159,6 @@ thread_fini(void *mem, int size)
td = (struct thread *)mem;
pmap_dispose_thread(td);
cached_threads--; /* XXXSMP */
allocated_threads--; /* XXXSMP */
}
/*
@ -187,6 +171,12 @@ threadinit(void)
thread_zone = uma_zcreate("THREAD", sizeof (struct thread),
thread_ctor, thread_dtor, thread_init, thread_fini,
UMA_ALIGN_CACHE, 0);
ksegrp_zone = uma_zcreate("KSEGRP", sizeof (struct ksegrp),
NULL, NULL, NULL, NULL,
UMA_ALIGN_CACHE, 0);
kse_zone = uma_zcreate("KSE", sizeof (struct kse),
NULL, NULL, NULL, NULL,
UMA_ALIGN_CACHE, 0);
}
/*
@ -225,6 +215,24 @@ thread_reap(void)
}
}
/*
* Allocate a ksegrp.
*/
struct ksegrp *
ksegrp_alloc(void)
{
return (uma_zalloc(ksegrp_zone, M_WAITOK));
}
/*
* Allocate a kse.
*/
struct kse *
kse_alloc(void)
{
return (uma_zalloc(kse_zone, M_WAITOK));
}
/*
* Allocate a thread.
*/
@ -235,6 +243,24 @@ thread_alloc(void)
return (uma_zalloc(thread_zone, M_WAITOK));
}
/*
* Deallocate a ksegrp.
*/
void
ksegrp_free(struct ksegrp *td)
{
uma_zfree(ksegrp_zone, td);
}
/*
* Deallocate a kse.
*/
void
kse_free(struct kse *td)
{
uma_zfree(kse_zone, td);
}
/*
* Deallocate a thread.
*/
@ -387,7 +413,7 @@ thread_link(struct thread *td, struct ksegrp *kg)
TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
p->p_numthreads++;
kg->kg_numthreads++;
if (oiks_debug && p->p_numthreads > 4) {
if (oiks_debug && p->p_numthreads > max_threads_per_proc) {
printf("OIKS %d\n", p->p_numthreads);
if (oiks_debug > 1)
Debugger("OIKS");

View file

@ -1704,7 +1704,7 @@ init386(first)
* This may be done better later if it gets more high level
* components in it. If so just link td->td_proc here.
*/
proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0);
proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
#ifdef PC98
/*

View file

@ -1704,7 +1704,7 @@ init386(first)
* This may be done better later if it gets more high level
* components in it. If so just link td->td_proc here.
*/
proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0);
proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
#ifdef PC98
/*

View file

@ -392,7 +392,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
/*
* Start initializing proc0 and thread0.
*/
proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0);
proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
proc0.p_uarea = (struct user *)uarea0;
proc0.p_stats = &proc0.p_uarea->u_stats;
thread0.td_frame = &frame0;
@ -507,7 +507,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, char *args)
pmap_setavailmem(startkernel, endkernel);
proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0);
proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
proc0uarea = (struct user *)pmap_steal_memory(UAREA_PAGES * PAGE_SIZE);
proc0kstack = pmap_steal_memory(KSTACK_PAGES * PAGE_SIZE);

View file

@ -392,7 +392,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
/*
* Start initializing proc0 and thread0.
*/
proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0);
proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
proc0.p_uarea = (struct user *)uarea0;
proc0.p_stats = &proc0.p_uarea->u_stats;
thread0.td_frame = &frame0;
@ -507,7 +507,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, char *args)
pmap_setavailmem(startkernel, endkernel);
proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0);
proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
proc0uarea = (struct user *)pmap_steal_memory(UAREA_PAGES * PAGE_SIZE);
proc0kstack = pmap_steal_memory(KSTACK_PAGES * PAGE_SIZE);

View file

@ -297,7 +297,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
/*
* Initialize proc0 stuff (p_contested needs to be done early).
*/
proc_linkup(&proc0, &proc0.p_ksegrp, &proc0.p_kse, &thread0);
proc_linkup(&proc0, &ksegrp0, &kse0, &thread0);
proc0.p_md.md_sigtramp = NULL;
proc0.p_md.md_utrap = NULL;
proc0.p_uarea = (struct user *)uarea0;

View file

@ -507,8 +507,8 @@ struct proc {
struct vm_object *p_upages_obj; /* (a) Upages object. */
struct procsig *p_procsig; /* (c) Signal actions, state (CPU). */
struct ksegrp p_ksegrp;
struct kse p_kse;
/*struct ksegrp p_ksegrp;
struct kse p_kse; */
/*
* The following don't make too much sense..
@ -800,6 +800,8 @@ extern struct sx proctree_lock;
extern struct mtx pargs_ref_lock;
extern struct proc proc0; /* Process slot for swapper. */
extern struct thread thread0; /* Primary thread in proc0 */
extern struct ksegrp ksegrp0; /* Primary ksegrp in proc0 */
extern struct kse kse0; /* Primary kse in proc0 */
extern int hogticks; /* Limit on kernel cpu hogs. */
extern int nprocs, maxproc; /* Current and max number of procs. */
extern int maxprocperuid; /* Max procs per uid. */
@ -890,6 +892,10 @@ void cpu_set_fork_handler(struct thread *, void (*)(void *), void *);
void cpu_wait(struct proc *);
/* New in KSE. */
struct ksegrp *ksegrp_alloc(void);
void ksegrp_free(struct ksegrp *td);
struct kse *kse_alloc(void);
void kse_free(struct kse *td);
struct thread *thread_alloc(void);
void thread_free(struct thread *td);
int cpu_export_context(struct thread *td);