init_main.c subr_autoconf.c:

Add support for "interrupt driven configuration hooks".
	A component of the kernel can register a hook, most likely
	during auto-configuration, and receive a callback once
	interrupt services are available.  This callback will occur before
	the root and dump devices are configured, so the configuration
	task can affect the selection of those two devices or complete
	any tasks that need to be performed prior to launching init.
	System boot is posponed so long as a hook is registered.  The
	hook owner is responsible for removing the hook once their task
	is complete or the system boot can continue.

kern_acct.c kern_clock.c kern_exit.c kern_synch.c kern_time.c:
	Change the interface and implementation for the kernel callout
	service.  The new implemntaion is based on the work of
	Adam M. Costello and George Varghese, published in a technical
	report entitled "Redesigning the BSD Callout and Timer Facilities".
	The interface used in FreeBSD is a little different than the one
	outlined in the paper.  The new function prototypes are:

	struct callout_handle timeout(void (*func)(void *),
				      void *arg, int ticks);

	void untimeout(void (*func)(void *), void *arg,
		       struct callout_handle handle);

	If a client wishes to remove a timeout, it must store the
	callout_handle returned by timeout and pass it to untimeout.

	The new implementation gives 0(1) insert and removal of callouts
	making this interface scale well even for applications that
	keep 100s of callouts outstanding.

	See the updated timeout.9 man page for more details.
This commit is contained in:
Justin T. Gibbs 1997-09-21 22:00:25 +00:00
parent 3544218335
commit ab36c06737
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=29680
10 changed files with 489 additions and 306 deletions

View file

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* @(#)init_main.c 8.9 (Berkeley) 1/21/94
* $Id: init_main.c,v 1.70 1997/08/26 18:10:37 peter Exp $
* $Id: init_main.c,v 1.71 1997/09/02 20:05:35 bde Exp $
*/
#include "opt_devfs.h"
@ -459,7 +459,7 @@ SYSINIT(p0post, SI_SUB_INTRINSIC_POST, SI_ORDER_FIRST, proc0_post, NULL)
****
***************************************************************************
*/
/* ARGSUSED*/
/* ARGSUSED */
static void sched_setup __P((void *dummy));
static void
sched_setup(dummy)
@ -471,6 +471,26 @@ sched_setup(dummy)
}
SYSINIT(sched_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, sched_setup, NULL)
/* ARGSUSED */
static void root_conf __P((void *dummy));
static void
root_conf(dummy)
void *dummy;
{
cpu_rootconf();
}
SYSINIT(root_conf, SI_SUB_ROOT_CONF, SI_ORDER_FIRST, root_conf, NULL)
/* ARGSUSED */
static void dump_conf __P((void *dummy));
static void
dump_conf(dummy)
void *dummy;
{
cpu_dumpconf();
}
SYSINIT(dump_conf, SI_SUB_DUMP_CONF, SI_ORDER_FIRST, dump_conf, NULL)
/* ARGSUSED*/
static void xxx_vfs_mountroot __P((void *fsnamep));
#ifdef BOOTP
@ -488,7 +508,8 @@ xxx_vfs_mountroot(fsnamep)
if (vfs_mountrootfs(*((char **) fsnamep)))
panic("cannot mount root");
}
SYSINIT(mountroot, SI_SUB_ROOT, SI_ORDER_FIRST, xxx_vfs_mountroot, &mountrootfsname)
SYSINIT(mountroot, SI_SUB_MOUNT_ROOT, SI_ORDER_FIRST, xxx_vfs_mountroot,
&mountrootfsname)
/* ARGSUSED*/
static void xxx_vfs_root_fdtab __P((void *dummy));

View file

@ -37,7 +37,7 @@
* SUCH DAMAGE.
*
* @(#)kern_acct.c 8.1 (Berkeley) 6/14/93
* $Id: kern_acct.c,v 1.15 1997/03/24 11:24:34 bde Exp $
* $Id: kern_acct.c,v 1.16 1997/09/02 20:05:36 bde Exp $
*/
#include <sys/param.h>
@ -75,6 +75,13 @@
static comp_t encode_comp_t __P((u_long, u_long));
static void acctwatch __P((void *));
/*
* Accounting callout handle used for periodic scheduling of
* acctwatch.
*/
static struct callout_handle acctwatch_handle
= CALLOUT_HANDLE_INITIALIZER(&acctwatch_handle);
/*
* Accounting vnode pointer, and saved vnode pointer.
*/
@ -139,7 +146,7 @@ acct(a1, uap, a3)
* close the file, and (if no new file was specified, leave).
*/
if (acctp != NULLVP || savacctp != NULLVP) {
untimeout(acctwatch, NULL);
untimeout(acctwatch, NULL, acctwatch_handle);
error = vn_close((acctp != NULLVP ? acctp : savacctp), FWRITE,
p->p_ucred, p);
acctp = savacctp = NULLVP;
@ -310,5 +317,5 @@ acctwatch(a)
log(LOG_NOTICE, "Accounting suspended\n");
}
}
timeout(acctwatch, NULL, acctchkfreq * hz);
acctwatch_handle = timeout(acctwatch, NULL, acctchkfreq * hz);
}

View file

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
* $Id: kern_clock.c,v 1.39 1997/09/02 20:05:37 bde Exp $
* $Id: kern_clock.c,v 1.40 1997/09/07 05:25:43 bde Exp $
*/
/* Portions of this software are covered by the following: */
@ -86,9 +86,11 @@ static void initclocks __P((void *dummy));
SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
/* Exported to machdep.c. */
struct callout *callfree, *callout;
struct callout *callout;
struct callout_list callfree;
int callwheelsize, callwheelbits, callwheelmask;
struct callout_tailq *callwheel;
static struct callout calltodo;
/* Some of these don't belong here, but it's easiest to concentrate them. */
static long cp_time[CPUSTATES];
@ -154,8 +156,10 @@ int stathz;
int profhz;
static int profprocs;
int ticks;
static int psdiv, pscnt; /* prof => stat divider */
int psratio; /* ratio: prof / stat */
static int softticks; /* Like ticks, but for softclock(). */
static struct callout *nextsoftcheck; /* Next callout to be checked. */
static int psdiv, pscnt; /* prof => stat divider */
int psratio; /* ratio: prof / stat */
volatile struct timeval time;
volatile struct timeval mono_time;
@ -452,26 +456,6 @@ hardclock(frame)
{
register struct callout *p1;
register struct proc *p;
register int needsoft;
/*
* Update real-time timeout queue.
* At front of queue are some number of events which are ``due''.
* The time to these is <= 0 and if negative represents the
* number of ticks which have passed since it was supposed to happen.
* The rest of the q elements (times > 0) are events yet to happen,
* where the time for each is given as a delta from the previous.
* Decrementing just the first of these serves to decrement the time
* to all events.
*/
needsoft = 0;
for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) {
if (--p1->c_time > 0)
break;
needsoft = 1;
if (p1->c_time == 0)
break;
}
p = curproc;
if (p) {
@ -677,7 +661,7 @@ hardclock(frame)
* Process callouts at a very low cpu priority, so we don't keep the
* relatively high clock interrupt priority any longer than necessary.
*/
if (needsoft) {
if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
if (CLKF_BASEPRI(frame)) {
/*
* Save the overhead of a software interrupt;
@ -687,9 +671,22 @@ hardclock(frame)
softclock();
} else
setsoftclock();
} else if (softticks + 1 == ticks) {
++softticks;
}
}
/*
* The callout mechanism is based on the work of Adam M. Costello and
* George Varghese, published in a technical report entitled "Redesigning
* the BSD Callout and Timer Facilities" and modified slightly for inclusion
* in FreeBSD by Justin T. Gibbs. The original work on the data structures
* used in this implementation was published by G.Varghese and A. Lauck in
* the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
* the Efficient Implementation of a Timer Facility" in the Proceedings of
* the 11th ACM Annual Symposium on Operating Systems Principles,
* Austin, Texas Nov 1987.
*/
/*
* Software (low priority) clock interrupt.
* Run periodic events from timeout queue.
@ -699,21 +696,52 @@ void
softclock()
{
register struct callout *c;
register void *arg;
register void (*func) __P((void *));
register int s;
register int steps; /*
* Number of steps taken since
* we last allowed interrupts.
*/
#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */
steps = 0;
s = splhigh();
while ((c = calltodo.c_next) != NULL && c->c_time <= 0) {
func = c->c_func;
arg = c->c_arg;
calltodo.c_next = c->c_next;
c->c_next = callfree;
callfree = c;
splx(s);
(*func)(arg);
(void) splhigh();
while (softticks != ticks) {
c = TAILQ_FIRST(&callwheel[++softticks & callwheelmask]);
while (c) {
if (c->c_time > 0) {
c->c_time--;
c = TAILQ_NEXT(c, c_links.tqe);
++steps;
if (steps >= MAX_SOFTCLOCK_STEPS) {
nextsoftcheck = c;
splx(s);
/* Give hardclock() a chance. */
s = splhigh();
c = nextsoftcheck;
steps = 0;
}
} else {
void (*c_func)(void *);
void *c_arg;
nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
TAILQ_REMOVE(c->c_bucket, c, c_links.tqe);
c_func = c->c_func;
c_arg = c->c_arg;
c->c_func = NULL;
SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
splx(s);
c_func(c_arg);
s = splhigh();
steps = 0;
c = nextsoftcheck;
}
}
}
nextsoftcheck = NULL;
splx(s);
}
@ -724,80 +752,86 @@ softclock()
* untimeout --
* Cancel previous timeout function call.
*
* callout_handle_init --
* Initialize a handle so that using it with untimeout is benign.
*
* See AT&T BCI Driver Reference Manual for specification. This
* implementation differs from that one in that no identification
* value is returned from timeout, rather, the original arguments
* to timeout are used to identify entries for untimeout.
* implementation differs from that one in that although an
* identification value is returned from timeout, the original
* arguments to timeout as well as the identifier are used to
* identify entries for untimeout.
*/
void
timeout(ftn, arg, ticks)
struct callout_handle
timeout(ftn, arg, to_ticks)
timeout_t ftn;
void *arg;
register int ticks;
register int to_ticks;
{
register struct callout *new, *p, *t;
register int s;
int s;
struct callout *new;
struct callout_handle handle;
if (ticks <= 0)
ticks = 1;
if (to_ticks <= 0)
to_ticks = 1;
/* Lock out the clock. */
s = splhigh();
/* Fill in the next free callout structure. */
if (callfree == NULL)
new = SLIST_FIRST(&callfree);
if (new == NULL)
/* XXX Attempt to malloc first */
panic("timeout table full");
new = callfree;
callfree = new->c_next;
SLIST_REMOVE_HEAD(&callfree, c_links.sle);
new->c_arg = arg;
new->c_func = ftn;
new->c_time = to_ticks >> callwheelbits;
new->c_bucket = &callwheel[(ticks + to_ticks) & callwheelmask];
TAILQ_INSERT_TAIL(new->c_bucket, new, c_links.tqe);
/*
* The time for each event is stored as a difference from the time
* of the previous event on the queue. Walk the queue, correcting
* the ticks argument for queue entries passed. Correct the ticks
* value for the queue entry immediately after the insertion point
* as well. Watch out for negative c_time values; these represent
* overdue events.
*/
for (p = &calltodo;
(t = p->c_next) != NULL && ticks > t->c_time; p = t)
if (t->c_time > 0)
ticks -= t->c_time;
new->c_time = ticks;
if (t != NULL)
t->c_time -= ticks;
/* Insert the new entry into the queue. */
p->c_next = new;
new->c_next = t;
splx(s);
handle.callout = new;
return (handle);
}
void
untimeout(ftn, arg)
untimeout(ftn, arg, handle)
timeout_t ftn;
void *arg;
struct callout_handle handle;
{
register struct callout *p, *t;
register int s;
s = splhigh();
for (p = &calltodo; (t = p->c_next) != NULL; p = t)
if (t->c_func == ftn && t->c_arg == arg) {
/* Increment next entry's tick count. */
if (t->c_next && t->c_time > 0)
t->c_next->c_time += t->c_time;
/*
* Check for a handle that was initialized
* by callout_handle_init, but never used
* for a real timeout.
*/
if (handle.callout == NULL)
return;
/* Move entry from callout queue to callfree queue. */
p->c_next = t->c_next;
t->c_next = callfree;
callfree = t;
break;
s = splhigh();
if ((handle.callout->c_func == ftn)
&& (handle.callout->c_arg == arg)) {
if (nextsoftcheck == handle.callout) {
nextsoftcheck = TAILQ_NEXT(handle.callout, c_links.tqe);
}
TAILQ_REMOVE(handle.callout->c_bucket,
handle.callout, c_links.tqe);
handle.callout->c_func = NULL;
SLIST_INSERT_HEAD(&callfree, handle.callout, c_links.sle);
}
splx(s);
}
void
callout_handle_init(struct callout_handle *handle)
{
handle->callout = NULL;
}
void
gettime(struct timeval *tvp)
{

View file

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
* $Id: kern_exit.c,v 1.54 1997/09/02 20:05:38 bde Exp $
* $Id: kern_exit.c,v 1.55 1997/09/13 19:42:10 joerg Exp $
*/
#include "opt_ktrace.h"
@ -176,7 +176,8 @@ exit1(p, rv)
p->p_flag |= P_WEXIT;
p->p_sigignore = ~0;
p->p_siglist = 0;
untimeout(realitexpire, (caddr_t)p);
if (timerisset(&p->p_realtimer.it_value))
untimeout(realitexpire, (caddr_t)p, p->p_ithandle);
/*
* Close open files and release open-file table.

View file

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
* $Id: kern_synch.c,v 1.37 1997/08/21 20:33:39 bde Exp $
* $Id: kern_synch.c,v 1.38 1997/09/02 20:05:43 bde Exp $
*/
#include "opt_ktrace.h"
@ -331,6 +331,7 @@ tsleep(ident, priority, wmesg, timo)
{
struct proc *p = curproc;
int s, sig, catch = priority & PCATCH;
struct callout_handle thandle;
#ifdef KTRACE
if (KTRPOINT(p, KTR_CSW))
@ -363,7 +364,7 @@ tsleep(ident, priority, wmesg, timo)
p->p_priority = priority & PRIMASK;
TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_procq);
if (timo)
timeout(endtsleep, (void *)p, timo);
thandle = timeout(endtsleep, (void *)p, timo);
/*
* We put ourselves on the sleep queue and start our timeout
* before calling CURSIG, as we could stop there, and a wakeup
@ -404,7 +405,7 @@ tsleep(ident, priority, wmesg, timo)
return (EWOULDBLOCK);
}
} else if (timo)
untimeout(endtsleep, (void *)p);
untimeout(endtsleep, (void *)p, thandle);
if (catch && (sig != 0 || (sig = CURSIG(p)))) {
#ifdef KTRACE
if (KTRPOINT(p, KTR_CSW))

View file

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
* $Id: kern_clock.c,v 1.39 1997/09/02 20:05:37 bde Exp $
* $Id: kern_clock.c,v 1.40 1997/09/07 05:25:43 bde Exp $
*/
/* Portions of this software are covered by the following: */
@ -86,9 +86,11 @@ static void initclocks __P((void *dummy));
SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
/* Exported to machdep.c. */
struct callout *callfree, *callout;
struct callout *callout;
struct callout_list callfree;
int callwheelsize, callwheelbits, callwheelmask;
struct callout_tailq *callwheel;
static struct callout calltodo;
/* Some of these don't belong here, but it's easiest to concentrate them. */
static long cp_time[CPUSTATES];
@ -154,8 +156,10 @@ int stathz;
int profhz;
static int profprocs;
int ticks;
static int psdiv, pscnt; /* prof => stat divider */
int psratio; /* ratio: prof / stat */
static int softticks; /* Like ticks, but for softclock(). */
static struct callout *nextsoftcheck; /* Next callout to be checked. */
static int psdiv, pscnt; /* prof => stat divider */
int psratio; /* ratio: prof / stat */
volatile struct timeval time;
volatile struct timeval mono_time;
@ -452,26 +456,6 @@ hardclock(frame)
{
register struct callout *p1;
register struct proc *p;
register int needsoft;
/*
* Update real-time timeout queue.
* At front of queue are some number of events which are ``due''.
* The time to these is <= 0 and if negative represents the
* number of ticks which have passed since it was supposed to happen.
* The rest of the q elements (times > 0) are events yet to happen,
* where the time for each is given as a delta from the previous.
* Decrementing just the first of these serves to decrement the time
* to all events.
*/
needsoft = 0;
for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) {
if (--p1->c_time > 0)
break;
needsoft = 1;
if (p1->c_time == 0)
break;
}
p = curproc;
if (p) {
@ -677,7 +661,7 @@ hardclock(frame)
* Process callouts at a very low cpu priority, so we don't keep the
* relatively high clock interrupt priority any longer than necessary.
*/
if (needsoft) {
if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
if (CLKF_BASEPRI(frame)) {
/*
* Save the overhead of a software interrupt;
@ -687,9 +671,22 @@ hardclock(frame)
softclock();
} else
setsoftclock();
} else if (softticks + 1 == ticks) {
++softticks;
}
}
/*
* The callout mechanism is based on the work of Adam M. Costello and
* George Varghese, published in a technical report entitled "Redesigning
* the BSD Callout and Timer Facilities" and modified slightly for inclusion
* in FreeBSD by Justin T. Gibbs. The original work on the data structures
* used in this implementation was published by G.Varghese and A. Lauck in
* the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
* the Efficient Implementation of a Timer Facility" in the Proceedings of
* the 11th ACM Annual Symposium on Operating Systems Principles,
* Austin, Texas Nov 1987.
*/
/*
* Software (low priority) clock interrupt.
* Run periodic events from timeout queue.
@ -699,21 +696,52 @@ void
softclock()
{
register struct callout *c;
register void *arg;
register void (*func) __P((void *));
register int s;
register int steps; /*
* Number of steps taken since
* we last allowed interrupts.
*/
#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */
steps = 0;
s = splhigh();
while ((c = calltodo.c_next) != NULL && c->c_time <= 0) {
func = c->c_func;
arg = c->c_arg;
calltodo.c_next = c->c_next;
c->c_next = callfree;
callfree = c;
splx(s);
(*func)(arg);
(void) splhigh();
while (softticks != ticks) {
c = TAILQ_FIRST(&callwheel[++softticks & callwheelmask]);
while (c) {
if (c->c_time > 0) {
c->c_time--;
c = TAILQ_NEXT(c, c_links.tqe);
++steps;
if (steps >= MAX_SOFTCLOCK_STEPS) {
nextsoftcheck = c;
splx(s);
/* Give hardclock() a chance. */
s = splhigh();
c = nextsoftcheck;
steps = 0;
}
} else {
void (*c_func)(void *);
void *c_arg;
nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
TAILQ_REMOVE(c->c_bucket, c, c_links.tqe);
c_func = c->c_func;
c_arg = c->c_arg;
c->c_func = NULL;
SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
splx(s);
c_func(c_arg);
s = splhigh();
steps = 0;
c = nextsoftcheck;
}
}
}
nextsoftcheck = NULL;
splx(s);
}
@ -724,80 +752,86 @@ softclock()
* untimeout --
* Cancel previous timeout function call.
*
* callout_handle_init --
* Initialize a handle so that using it with untimeout is benign.
*
* See AT&T BCI Driver Reference Manual for specification. This
* implementation differs from that one in that no identification
* value is returned from timeout, rather, the original arguments
* to timeout are used to identify entries for untimeout.
* implementation differs from that one in that although an
* identification value is returned from timeout, the original
* arguments to timeout as well as the identifier are used to
* identify entries for untimeout.
*/
void
timeout(ftn, arg, ticks)
struct callout_handle
timeout(ftn, arg, to_ticks)
timeout_t ftn;
void *arg;
register int ticks;
register int to_ticks;
{
register struct callout *new, *p, *t;
register int s;
int s;
struct callout *new;
struct callout_handle handle;
if (ticks <= 0)
ticks = 1;
if (to_ticks <= 0)
to_ticks = 1;
/* Lock out the clock. */
s = splhigh();
/* Fill in the next free callout structure. */
if (callfree == NULL)
new = SLIST_FIRST(&callfree);
if (new == NULL)
/* XXX Attempt to malloc first */
panic("timeout table full");
new = callfree;
callfree = new->c_next;
SLIST_REMOVE_HEAD(&callfree, c_links.sle);
new->c_arg = arg;
new->c_func = ftn;
new->c_time = to_ticks >> callwheelbits;
new->c_bucket = &callwheel[(ticks + to_ticks) & callwheelmask];
TAILQ_INSERT_TAIL(new->c_bucket, new, c_links.tqe);
/*
* The time for each event is stored as a difference from the time
* of the previous event on the queue. Walk the queue, correcting
* the ticks argument for queue entries passed. Correct the ticks
* value for the queue entry immediately after the insertion point
* as well. Watch out for negative c_time values; these represent
* overdue events.
*/
for (p = &calltodo;
(t = p->c_next) != NULL && ticks > t->c_time; p = t)
if (t->c_time > 0)
ticks -= t->c_time;
new->c_time = ticks;
if (t != NULL)
t->c_time -= ticks;
/* Insert the new entry into the queue. */
p->c_next = new;
new->c_next = t;
splx(s);
handle.callout = new;
return (handle);
}
void
untimeout(ftn, arg)
untimeout(ftn, arg, handle)
timeout_t ftn;
void *arg;
struct callout_handle handle;
{
register struct callout *p, *t;
register int s;
s = splhigh();
for (p = &calltodo; (t = p->c_next) != NULL; p = t)
if (t->c_func == ftn && t->c_arg == arg) {
/* Increment next entry's tick count. */
if (t->c_next && t->c_time > 0)
t->c_next->c_time += t->c_time;
/*
* Check for a handle that was initialized
* by callout_handle_init, but never used
* for a real timeout.
*/
if (handle.callout == NULL)
return;
/* Move entry from callout queue to callfree queue. */
p->c_next = t->c_next;
t->c_next = callfree;
callfree = t;
break;
s = splhigh();
if ((handle.callout->c_func == ftn)
&& (handle.callout->c_arg == arg)) {
if (nextsoftcheck == handle.callout) {
nextsoftcheck = TAILQ_NEXT(handle.callout, c_links.tqe);
}
TAILQ_REMOVE(handle.callout->c_bucket,
handle.callout, c_links.tqe);
handle.callout->c_func = NULL;
SLIST_INSERT_HEAD(&callfree, handle.callout, c_links.sle);
}
splx(s);
}
void
callout_handle_init(struct callout_handle *handle)
{
handle->callout = NULL;
}
void
gettime(struct timeval *tvp)
{

View file

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)kern_time.c 8.1 (Berkeley) 6/10/93
* $Id: kern_time.c,v 1.33 1997/08/26 00:40:04 bde Exp $
* $Id: kern_time.c,v 1.34 1997/09/02 20:05:49 bde Exp $
*/
#include <sys/param.h>
@ -586,10 +586,12 @@ setitimer(p, uap, retval)
return (EINVAL);
s = splclock();
if (uap->which == ITIMER_REAL) {
untimeout(realitexpire, (caddr_t)p);
if (timerisset(&p->p_realtimer.it_value))
untimeout(realitexpire, (caddr_t)p, p->p_ithandle);
if (timerisset(&aitv.it_value)) {
timevaladd(&aitv.it_value, &time);
timeout(realitexpire, (caddr_t)p, hzto(&aitv.it_value));
p->p_ithandle = timeout(realitexpire, (caddr_t)p,
hzto(&aitv.it_value));
}
p->p_realtimer = aitv;
} else
@ -628,8 +630,9 @@ realitexpire(arg)
timevaladd(&p->p_realtimer.it_value,
&p->p_realtimer.it_interval);
if (timercmp(&p->p_realtimer.it_value, &time, >)) {
timeout(realitexpire, (caddr_t)p,
hzto(&p->p_realtimer.it_value) - 1);
p->p_ithandle =
timeout(realitexpire, (caddr_t)p,
hzto(&p->p_realtimer.it_value) - 1);
splx(s);
return;
}

View file

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
* $Id: kern_clock.c,v 1.39 1997/09/02 20:05:37 bde Exp $
* $Id: kern_clock.c,v 1.40 1997/09/07 05:25:43 bde Exp $
*/
/* Portions of this software are covered by the following: */
@ -86,9 +86,11 @@ static void initclocks __P((void *dummy));
SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
/* Exported to machdep.c. */
struct callout *callfree, *callout;
struct callout *callout;
struct callout_list callfree;
int callwheelsize, callwheelbits, callwheelmask;
struct callout_tailq *callwheel;
static struct callout calltodo;
/* Some of these don't belong here, but it's easiest to concentrate them. */
static long cp_time[CPUSTATES];
@ -154,8 +156,10 @@ int stathz;
int profhz;
static int profprocs;
int ticks;
static int psdiv, pscnt; /* prof => stat divider */
int psratio; /* ratio: prof / stat */
static int softticks; /* Like ticks, but for softclock(). */
static struct callout *nextsoftcheck; /* Next callout to be checked. */
static int psdiv, pscnt; /* prof => stat divider */
int psratio; /* ratio: prof / stat */
volatile struct timeval time;
volatile struct timeval mono_time;
@ -452,26 +456,6 @@ hardclock(frame)
{
register struct callout *p1;
register struct proc *p;
register int needsoft;
/*
* Update real-time timeout queue.
* At front of queue are some number of events which are ``due''.
* The time to these is <= 0 and if negative represents the
* number of ticks which have passed since it was supposed to happen.
* The rest of the q elements (times > 0) are events yet to happen,
* where the time for each is given as a delta from the previous.
* Decrementing just the first of these serves to decrement the time
* to all events.
*/
needsoft = 0;
for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) {
if (--p1->c_time > 0)
break;
needsoft = 1;
if (p1->c_time == 0)
break;
}
p = curproc;
if (p) {
@ -677,7 +661,7 @@ hardclock(frame)
* Process callouts at a very low cpu priority, so we don't keep the
* relatively high clock interrupt priority any longer than necessary.
*/
if (needsoft) {
if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
if (CLKF_BASEPRI(frame)) {
/*
* Save the overhead of a software interrupt;
@ -687,9 +671,22 @@ hardclock(frame)
softclock();
} else
setsoftclock();
} else if (softticks + 1 == ticks) {
++softticks;
}
}
/*
* The callout mechanism is based on the work of Adam M. Costello and
* George Varghese, published in a technical report entitled "Redesigning
* the BSD Callout and Timer Facilities" and modified slightly for inclusion
* in FreeBSD by Justin T. Gibbs. The original work on the data structures
* used in this implementation was published by G.Varghese and A. Lauck in
* the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
* the Efficient Implementation of a Timer Facility" in the Proceedings of
* the 11th ACM Annual Symposium on Operating Systems Principles,
* Austin, Texas Nov 1987.
*/
/*
* Software (low priority) clock interrupt.
* Run periodic events from timeout queue.
@ -699,21 +696,52 @@ void
softclock()
{
register struct callout *c;
register void *arg;
register void (*func) __P((void *));
register int s;
register int steps; /*
* Number of steps taken since
* we last allowed interrupts.
*/
#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
#endif /* MAX_SOFTCLOCK_STEPS */
steps = 0;
s = splhigh();
while ((c = calltodo.c_next) != NULL && c->c_time <= 0) {
func = c->c_func;
arg = c->c_arg;
calltodo.c_next = c->c_next;
c->c_next = callfree;
callfree = c;
splx(s);
(*func)(arg);
(void) splhigh();
while (softticks != ticks) {
c = TAILQ_FIRST(&callwheel[++softticks & callwheelmask]);
while (c) {
if (c->c_time > 0) {
c->c_time--;
c = TAILQ_NEXT(c, c_links.tqe);
++steps;
if (steps >= MAX_SOFTCLOCK_STEPS) {
nextsoftcheck = c;
splx(s);
/* Give hardclock() a chance. */
s = splhigh();
c = nextsoftcheck;
steps = 0;
}
} else {
void (*c_func)(void *);
void *c_arg;
nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
TAILQ_REMOVE(c->c_bucket, c, c_links.tqe);
c_func = c->c_func;
c_arg = c->c_arg;
c->c_func = NULL;
SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
splx(s);
c_func(c_arg);
s = splhigh();
steps = 0;
c = nextsoftcheck;
}
}
}
nextsoftcheck = NULL;
splx(s);
}
@ -724,80 +752,86 @@ softclock()
* untimeout --
* Cancel previous timeout function call.
*
* callout_handle_init --
* Initialize a handle so that using it with untimeout is benign.
*
* See AT&T BCI Driver Reference Manual for specification. This
* implementation differs from that one in that no identification
* value is returned from timeout, rather, the original arguments
* to timeout are used to identify entries for untimeout.
* implementation differs from that one in that although an
* identification value is returned from timeout, the original
* arguments to timeout as well as the identifier are used to
* identify entries for untimeout.
*/
void
timeout(ftn, arg, ticks)
struct callout_handle
timeout(ftn, arg, to_ticks)
timeout_t ftn;
void *arg;
register int ticks;
register int to_ticks;
{
register struct callout *new, *p, *t;
register int s;
int s;
struct callout *new;
struct callout_handle handle;
if (ticks <= 0)
ticks = 1;
if (to_ticks <= 0)
to_ticks = 1;
/* Lock out the clock. */
s = splhigh();
/* Fill in the next free callout structure. */
if (callfree == NULL)
new = SLIST_FIRST(&callfree);
if (new == NULL)
/* XXX Attempt to malloc first */
panic("timeout table full");
new = callfree;
callfree = new->c_next;
SLIST_REMOVE_HEAD(&callfree, c_links.sle);
new->c_arg = arg;
new->c_func = ftn;
new->c_time = to_ticks >> callwheelbits;
new->c_bucket = &callwheel[(ticks + to_ticks) & callwheelmask];
TAILQ_INSERT_TAIL(new->c_bucket, new, c_links.tqe);
/*
* The time for each event is stored as a difference from the time
* of the previous event on the queue. Walk the queue, correcting
* the ticks argument for queue entries passed. Correct the ticks
* value for the queue entry immediately after the insertion point
* as well. Watch out for negative c_time values; these represent
* overdue events.
*/
for (p = &calltodo;
(t = p->c_next) != NULL && ticks > t->c_time; p = t)
if (t->c_time > 0)
ticks -= t->c_time;
new->c_time = ticks;
if (t != NULL)
t->c_time -= ticks;
/* Insert the new entry into the queue. */
p->c_next = new;
new->c_next = t;
splx(s);
handle.callout = new;
return (handle);
}
void
untimeout(ftn, arg)
untimeout(ftn, arg, handle)
timeout_t ftn;
void *arg;
struct callout_handle handle;
{
register struct callout *p, *t;
register int s;
s = splhigh();
for (p = &calltodo; (t = p->c_next) != NULL; p = t)
if (t->c_func == ftn && t->c_arg == arg) {
/* Increment next entry's tick count. */
if (t->c_next && t->c_time > 0)
t->c_next->c_time += t->c_time;
/*
* Check for a handle that was initialized
* by callout_handle_init, but never used
* for a real timeout.
*/
if (handle.callout == NULL)
return;
/* Move entry from callout queue to callfree queue. */
p->c_next = t->c_next;
t->c_next = callfree;
callfree = t;
break;
s = splhigh();
if ((handle.callout->c_func == ftn)
&& (handle.callout->c_arg == arg)) {
if (nextsoftcheck == handle.callout) {
nextsoftcheck = TAILQ_NEXT(handle.callout, c_links.tqe);
}
TAILQ_REMOVE(handle.callout->c_bucket,
handle.callout, c_links.tqe);
handle.callout->c_func = NULL;
SLIST_INSERT_HEAD(&callfree, handle.callout, c_links.sle);
}
splx(s);
}
void
callout_handle_init(struct callout_handle *handle)
{
handle->callout = NULL;
}
void
gettime(struct timeval *tvp)
{

View file

@ -41,10 +41,13 @@
*
* @(#)subr_autoconf.c 8.1 (Berkeley) 6/10/93
*
* $Id$
* $Id: subr_autoconf.c,v 1.4 1997/02/22 09:39:15 peter Exp $
*/
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/queue.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/malloc.h>
@ -52,6 +55,7 @@
* Autoconfiguration subroutines.
*/
#ifdef UNUSED
/*
* ioconf.c exports exactly two names: cfdata and cfroots. All system
* devices and drivers are found via these tables.
@ -340,3 +344,78 @@ evcnt_attach(dev, name, ev)
*nextp = ev;
nextp = &ev->ev_next;
}
#endif
/*
* "Interrupt driven config" functions.
*/
static TAILQ_HEAD(, intr_config_hook) intr_config_hook_list =
TAILQ_HEAD_INITIALIZER(intr_config_hook_list);
/* ARGSUSED */
static void run_interrupt_driven_config_hooks __P((void *dummy));
static void
run_interrupt_driven_config_hooks(dummy)
void *dummy;
{
struct intr_config_hook *hook;
for (hook = intr_config_hook_list.tqh_first; hook != NULL;
hook = hook->ich_links.tqe_next) {
(*hook->ich_func)(hook->ich_arg);
}
while (intr_config_hook_list.tqh_first != NULL) {
tsleep(&intr_config_hook_list, PCONFIG, "conifhk", 0);
}
}
SYSINIT(intr_config_hooks, SI_SUB_INT_CONFIG_HOOKS, SI_ORDER_FIRST,
run_interrupt_driven_config_hooks, NULL)
/*
* Register a hook that will be called after "cold"
* autoconfiguration is complete and interrupts can
* be used to complete initialization.
*/
int
config_intrhook_establish(hook)
struct intr_config_hook *hook;
{
struct intr_config_hook *hook_entry;
for (hook_entry = intr_config_hook_list.tqh_first; hook_entry != NULL;
hook_entry = hook_entry->ich_links.tqe_next)
if (hook_entry == hook)
break;
if (hook_entry != NULL) {
printf("config_intrhook_establish: establishing an "
"already established hook.\n");
return (1);
}
TAILQ_INSERT_TAIL(&intr_config_hook_list, hook, ich_links);
if (cold == 0)
/* XXX Sufficient for LKMs loaded after initial config??? */
run_interrupt_driven_config_hooks(NULL);
return (0);
}
void
config_intrhook_disestablish(hook)
struct intr_config_hook *hook;
{
struct intr_config_hook *hook_entry;
for (hook_entry = intr_config_hook_list.tqh_first; hook_entry != NULL;
hook_entry = hook_entry->ich_links.tqe_next)
if (hook_entry == hook)
break;
if (hook_entry == NULL)
panic("config_intrhook_disestablish: disestablishing an "
"unestablished hook");
TAILQ_REMOVE(&intr_config_hook_list, hook, ich_links);
/* Wakeup anyone watching the list */
wakeup(&intr_config_hook_list);
}

View file

@ -18,7 +18,7 @@
* 5. Modifications may be freely made to this file if the above conditions
* are met.
*
* $Id: vfs_bio.c,v 1.126 1997/09/10 20:09:22 phk Exp $
* $Id: vfs_bio.c,v 1.127 1997/09/21 04:49:30 dyson Exp $
*/
/*
@ -383,40 +383,6 @@ bwrite(struct buf * bp)
curproc->p_stats->p_ru.ru_oublock++;
VOP_STRATEGY(bp);
/*
* Handle ordered writes here.
* If the write was originally flagged as ordered,
* then we check to see if it was converted to async.
* If it was converted to async, and is done now, then
* we release the buffer. Otherwise we clear the
* ordered flag because it is not needed anymore.
*
* Note that biodone has been modified so that it does
* not release ordered buffers. This allows us to have
* a chance to determine whether or not the driver
* has set the async flag in the strategy routine. Otherwise
* if biodone was not modified, then the buffer may have been
* reused before we have had a chance to check the flag.
*/
if ((oldflags & B_ORDERED) == B_ORDERED) {
int s;
s = splbio();
if (bp->b_flags & B_ASYNC) {
if ((bp->b_flags & B_DONE)) {
if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
brelse(bp);
else
bqrelse(bp);
}
splx(s);
return (0);
} else {
bp->b_flags &= ~B_ORDERED;
}
splx(s);
}
if ((oldflags & B_ASYNC) == 0) {
int rtval = biowait(bp);
@ -489,7 +455,7 @@ bdwrite(struct buf * bp)
* requesting a sync -- there might not be enough memory to do
* the bmap then... So, this is important to do.
*/
if( bp->b_lblkno == bp->b_blkno) {
if (bp->b_lblkno == bp->b_blkno) {
VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
}
@ -537,6 +503,11 @@ bawrite(struct buf * bp)
int
bowrite(struct buf * bp)
{
/*
* XXX Add in B_ASYNC once the SCSI
* layer can deal with ordered
* writes properly.
*/
bp->b_flags |= B_ORDERED;
return (VOP_BWRITE(bp));
}
@ -1355,10 +1326,10 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo)
bremfree(bp);
/*
* check for size inconsistancies (note that they shouldn't happen
* but do when filesystems don't handle the size changes correctly.)
* We are conservative on metadata and don't just extend the buffer
* but write and re-constitute it.
* check for size inconsistancies (note that they shouldn't
* happen but do when filesystems don't handle the size changes
* correctly.) We are conservative on metadata and don't just
* extend the buffer but write and re-constitute it.
*/
if (bp->b_bcount != size) {
@ -1901,12 +1872,10 @@ biodone(register struct buf * bp)
*/
if (bp->b_flags & B_ASYNC) {
if ((bp->b_flags & B_ORDERED) == 0) {
if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
brelse(bp);
else
bqrelse(bp);
}
if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0)
brelse(bp);
else
bqrelse(bp);
} else {
bp->b_flags &= ~B_WANTED;
wakeup(bp);