Allow the concurrency level to be reduced.

Reviewed by:	davidxu
This commit is contained in:
Daniel Eischen 2003-08-30 12:09:16 +00:00
parent 59efee01a3
commit 9cd8ed99ee
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=119577
6 changed files with 126 additions and 70 deletions

View file

@ -78,7 +78,7 @@ int
_thr_setconcurrency(int new_level)
{
struct pthread *curthread;
struct kse *newkse;
struct kse *newkse, *kse;
kse_critical_t crit;
int kse_count;
int i;
@ -88,37 +88,63 @@ _thr_setconcurrency(int new_level)
curthread = _get_curthread();
/* Race condition, but so what. */
kse_count = _kse_initial->k_kseg->kg_ksecount;
for (i = kse_count; i < new_level; i++) {
newkse = _kse_alloc(curthread, 0);
if (newkse == NULL) {
DBG_MSG("Can't alloc new KSE.\n");
ret = EAGAIN;
break;
}
newkse->k_kseg = _kse_initial->k_kseg;
newkse->k_schedq = _kse_initial->k_schedq;
newkse->k_curthread = NULL;
crit = _kse_critical_enter();
KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg);
TAILQ_INSERT_TAIL(&newkse->k_kseg->kg_kseq,
newkse, k_kgqe);
newkse->k_kseg->kg_ksecount++;
newkse->k_flags |= KF_STARTED;
KSE_SCHED_UNLOCK(curthread->kse, newkse->k_kseg);
if (kse_create(&newkse->k_kcb->kcb_kmbx, 0) != 0) {
if (new_level > kse_count) {
for (i = kse_count; i < new_level; i++) {
newkse = _kse_alloc(curthread, 0);
if (newkse == NULL) {
DBG_MSG("Can't alloc new KSE.\n");
ret = EAGAIN;
break;
}
newkse->k_kseg = _kse_initial->k_kseg;
newkse->k_schedq = _kse_initial->k_schedq;
newkse->k_curthread = NULL;
crit = _kse_critical_enter();
KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg);
TAILQ_REMOVE(&newkse->k_kseg->kg_kseq,
TAILQ_INSERT_TAIL(&newkse->k_kseg->kg_kseq,
newkse, k_kgqe);
newkse->k_kseg->kg_ksecount--;
newkse->k_kseg->kg_ksecount++;
newkse->k_flags |= KF_STARTED;
KSE_SCHED_UNLOCK(curthread->kse, newkse->k_kseg);
_kse_critical_leave(crit);
_kse_free(curthread, newkse);
DBG_MSG("kse_create syscall failed.\n");
ret = EAGAIN;
break;
} else {
_kse_critical_leave(crit);
if (kse_create(&newkse->k_kcb->kcb_kmbx, 0) != 0) {
KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg);
TAILQ_REMOVE(&newkse->k_kseg->kg_kseq,
newkse, k_kgqe);
newkse->k_kseg->kg_ksecount--;
KSE_SCHED_UNLOCK(curthread->kse,
newkse->k_kseg);
_kse_critical_leave(crit);
_kse_free(curthread, newkse);
DBG_MSG("kse_create syscall failed.\n");
ret = EAGAIN;
break;
} else {
_kse_critical_leave(crit);
}
}
} else if (new_level < kse_count) {
kse_count = 0;
crit = _kse_critical_enter();
KSE_SCHED_LOCK(curthread->kse, _kse_initial->k_kseg);
/* Count the number of active KSEs */
TAILQ_FOREACH(kse, &_kse_initial->k_kseg->kg_kseq, k_kgqe) {
if ((kse->k_flags & KF_TERMINATED) == 0)
kse_count++;
}
/* Reduce the number of active KSEs appropriately. */
kse = TAILQ_FIRST(&_kse_initial->k_kseg->kg_kseq);
while ((kse != NULL) && (kse_count > new_level)) {
if ((kse != _kse_initial) &&
((kse->k_flags & KF_TERMINATED) == 0)) {
kse->k_flags |= KF_TERMINATED;
kse_count--;
/* Wakup the KSE in case it is idle. */
kse_wakeup(&kse->k_kcb->kcb_kmbx);
}
kse = TAILQ_NEXT(kse, k_kgqe);
}
KSE_SCHED_UNLOCK(curthread->kse, _kse_initial->k_kseg);
_kse_critical_leave(crit);
}
return (ret);
}

View file

@ -1047,7 +1047,8 @@ kse_sched_multi(struct kse_mailbox *kmbx)
/* Check if there are no threads ready to run: */
while (((curthread = KSE_RUNQ_FIRST(curkse)) == NULL) &&
(curkse->k_kseg->kg_threadcount != 0)) {
(curkse->k_kseg->kg_threadcount != 0) &&
((curkse->k_flags & KF_TERMINATED) == 0)) {
/*
* Wait for a thread to become active or until there are
* no more threads.
@ -1059,7 +1060,8 @@ kse_sched_multi(struct kse_mailbox *kmbx)
}
/* Check for no more threads: */
if (curkse->k_kseg->kg_threadcount == 0) {
if ((curkse->k_kseg->kg_threadcount == 0) ||
((curkse->k_flags & KF_TERMINATED) != 0)) {
/*
* Normally this shouldn't return, but it will if there
* are other KSEs running that create new threads that
@ -1874,12 +1876,12 @@ kse_fini(struct kse *kse)
PANIC("kse_exit()");
#endif
} else {
#ifdef NOT_YET
/*
* In future, we might allow program to kill
* kse in initial group.
* We allow program to kill kse in initial group (by
* lowering the concurrency).
*/
if (kse != _kse_initial) {
if ((kse != _kse_initial) &&
((kse->k_flags & KF_TERMINATED) != 0)) {
KSE_SCHED_LOCK(kse, kse->k_kseg);
TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe);
kse->k_kseg->kg_ksecount--;
@ -1891,7 +1893,6 @@ kse_fini(struct kse *kse)
/* Never returns. */
PANIC("kse_exit() failed for initial kseg");
}
#endif
KSE_SCHED_LOCK(kse, kse->k_kseg);
KSE_SET_IDLE(kse);
kse->k_kseg->kg_idle_kses++;

View file

@ -192,6 +192,7 @@ struct kse {
int k_flags;
#define KF_STARTED 0x0001 /* kernel kse created */
#define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */
#define KF_TERMINATED 0x0004
int k_idle; /* kse is idle */
int k_error; /* syscall errno in critical */
int k_cpu; /* CPU ID when bound */

View file

@ -78,7 +78,7 @@ int
_thr_setconcurrency(int new_level)
{
struct pthread *curthread;
struct kse *newkse;
struct kse *newkse, *kse;
kse_critical_t crit;
int kse_count;
int i;
@ -88,37 +88,63 @@ _thr_setconcurrency(int new_level)
curthread = _get_curthread();
/* Race condition, but so what. */
kse_count = _kse_initial->k_kseg->kg_ksecount;
for (i = kse_count; i < new_level; i++) {
newkse = _kse_alloc(curthread, 0);
if (newkse == NULL) {
DBG_MSG("Can't alloc new KSE.\n");
ret = EAGAIN;
break;
}
newkse->k_kseg = _kse_initial->k_kseg;
newkse->k_schedq = _kse_initial->k_schedq;
newkse->k_curthread = NULL;
crit = _kse_critical_enter();
KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg);
TAILQ_INSERT_TAIL(&newkse->k_kseg->kg_kseq,
newkse, k_kgqe);
newkse->k_kseg->kg_ksecount++;
newkse->k_flags |= KF_STARTED;
KSE_SCHED_UNLOCK(curthread->kse, newkse->k_kseg);
if (kse_create(&newkse->k_kcb->kcb_kmbx, 0) != 0) {
if (new_level > kse_count) {
for (i = kse_count; i < new_level; i++) {
newkse = _kse_alloc(curthread, 0);
if (newkse == NULL) {
DBG_MSG("Can't alloc new KSE.\n");
ret = EAGAIN;
break;
}
newkse->k_kseg = _kse_initial->k_kseg;
newkse->k_schedq = _kse_initial->k_schedq;
newkse->k_curthread = NULL;
crit = _kse_critical_enter();
KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg);
TAILQ_REMOVE(&newkse->k_kseg->kg_kseq,
TAILQ_INSERT_TAIL(&newkse->k_kseg->kg_kseq,
newkse, k_kgqe);
newkse->k_kseg->kg_ksecount--;
newkse->k_kseg->kg_ksecount++;
newkse->k_flags |= KF_STARTED;
KSE_SCHED_UNLOCK(curthread->kse, newkse->k_kseg);
_kse_critical_leave(crit);
_kse_free(curthread, newkse);
DBG_MSG("kse_create syscall failed.\n");
ret = EAGAIN;
break;
} else {
_kse_critical_leave(crit);
if (kse_create(&newkse->k_kcb->kcb_kmbx, 0) != 0) {
KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg);
TAILQ_REMOVE(&newkse->k_kseg->kg_kseq,
newkse, k_kgqe);
newkse->k_kseg->kg_ksecount--;
KSE_SCHED_UNLOCK(curthread->kse,
newkse->k_kseg);
_kse_critical_leave(crit);
_kse_free(curthread, newkse);
DBG_MSG("kse_create syscall failed.\n");
ret = EAGAIN;
break;
} else {
_kse_critical_leave(crit);
}
}
} else if (new_level < kse_count) {
kse_count = 0;
crit = _kse_critical_enter();
KSE_SCHED_LOCK(curthread->kse, _kse_initial->k_kseg);
/* Count the number of active KSEs */
TAILQ_FOREACH(kse, &_kse_initial->k_kseg->kg_kseq, k_kgqe) {
if ((kse->k_flags & KF_TERMINATED) == 0)
kse_count++;
}
/* Reduce the number of active KSEs appropriately. */
kse = TAILQ_FIRST(&_kse_initial->k_kseg->kg_kseq);
while ((kse != NULL) && (kse_count > new_level)) {
if ((kse != _kse_initial) &&
((kse->k_flags & KF_TERMINATED) == 0)) {
kse->k_flags |= KF_TERMINATED;
kse_count--;
/* Wakup the KSE in case it is idle. */
kse_wakeup(&kse->k_kcb->kcb_kmbx);
}
kse = TAILQ_NEXT(kse, k_kgqe);
}
KSE_SCHED_UNLOCK(curthread->kse, _kse_initial->k_kseg);
_kse_critical_leave(crit);
}
return (ret);
}

View file

@ -1047,7 +1047,8 @@ kse_sched_multi(struct kse_mailbox *kmbx)
/* Check if there are no threads ready to run: */
while (((curthread = KSE_RUNQ_FIRST(curkse)) == NULL) &&
(curkse->k_kseg->kg_threadcount != 0)) {
(curkse->k_kseg->kg_threadcount != 0) &&
((curkse->k_flags & KF_TERMINATED) == 0)) {
/*
* Wait for a thread to become active or until there are
* no more threads.
@ -1059,7 +1060,8 @@ kse_sched_multi(struct kse_mailbox *kmbx)
}
/* Check for no more threads: */
if (curkse->k_kseg->kg_threadcount == 0) {
if ((curkse->k_kseg->kg_threadcount == 0) ||
((curkse->k_flags & KF_TERMINATED) != 0)) {
/*
* Normally this shouldn't return, but it will if there
* are other KSEs running that create new threads that
@ -1874,12 +1876,12 @@ kse_fini(struct kse *kse)
PANIC("kse_exit()");
#endif
} else {
#ifdef NOT_YET
/*
* In future, we might allow program to kill
* kse in initial group.
* We allow program to kill kse in initial group (by
* lowering the concurrency).
*/
if (kse != _kse_initial) {
if ((kse != _kse_initial) &&
((kse->k_flags & KF_TERMINATED) != 0)) {
KSE_SCHED_LOCK(kse, kse->k_kseg);
TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe);
kse->k_kseg->kg_ksecount--;
@ -1891,7 +1893,6 @@ kse_fini(struct kse *kse)
/* Never returns. */
PANIC("kse_exit() failed for initial kseg");
}
#endif
KSE_SCHED_LOCK(kse, kse->k_kseg);
KSE_SET_IDLE(kse);
kse->k_kseg->kg_idle_kses++;

View file

@ -192,6 +192,7 @@ struct kse {
int k_flags;
#define KF_STARTED 0x0001 /* kernel kse created */
#define KF_INITIALIZED 0x0002 /* initialized on 1st upcall */
#define KF_TERMINATED 0x0004
int k_idle; /* kse is idle */
int k_error; /* syscall errno in critical */
int k_cpu; /* CPU ID when bound */