Use atomic loads/stores when updating td->td_state

KCSAN complains about racy accesses in the locking code. Those races are
fine since they are inside a TD_SET_RUNNING() loop that expects the value
to be changed by another CPU.

Use relaxed atomic stores/loads to indicate that this variable can be
written/read by multiple CPUs at the same time. This will also prevent
the compiler from doing unexpected re-ordering.

Reported by:	GENERIC-KCSAN
Test Plan:	KCSAN no longer complains, kernel still runs fine.
Reviewed By:	markj, mjg (earlier version)
Differential Revision: https://reviews.freebsd.org/D28569
This commit is contained in:
Alex Richardson 2021-02-18 10:25:10 +00:00
parent df093aa946
commit fa2528ac64
15 changed files with 50 additions and 38 deletions

View file

@ -426,7 +426,7 @@ kvm_proclist(kvm_t *kd, int what, int arg, struct proc *p,
TD_CAN_RUN(&mtd) ||
TD_IS_RUNNING(&mtd)) {
kp->ki_stat = SRUN;
} else if (mtd.td_state ==
} else if (TD_GET_STATE(&mtd) ==
TDS_INHIBITED) {
if (P_SHOULDSTOP(&proc)) {
kp->ki_stat = SSTOP;

View file

@ -1054,7 +1054,7 @@ linprocfs_doprocstatus(PFS_FILL_ARGS)
state = "X (exiting)";
break;
}
switch(td2->td_state) {
switch(TD_GET_STATE(td2)) {
case TDS_INHIBITED:
state = "S (sleeping)";
break;

View file

@ -854,7 +854,7 @@ _db_stack_trace_all(bool active_only)
for (td = kdb_thr_first(); td != NULL; td = kdb_thr_next(td)) {
prev_jb = kdb_jmpbuf(jb);
if (setjmp(jb) == 0) {
if (td->td_state == TDS_RUNNING)
if (TD_IS_RUNNING(td))
db_printf("\nTracing command %s pid %d"
" tid %ld td %p (CPU %d)\n",
td->td_proc->p_comm, td->td_proc->p_pid,

View file

@ -173,9 +173,9 @@ db_ps_proc(struct proc *p)
*/
rflag = sflag = dflag = lflag = wflag = 0;
FOREACH_THREAD_IN_PROC(p, td) {
if (td->td_state == TDS_RUNNING ||
td->td_state == TDS_RUNQ ||
td->td_state == TDS_CAN_RUN)
if (TD_GET_STATE(td) == TDS_RUNNING ||
TD_GET_STATE(td) == TDS_RUNQ ||
TD_GET_STATE(td) == TDS_CAN_RUN)
rflag++;
if (TD_ON_LOCK(td))
lflag++;
@ -267,7 +267,7 @@ dumpthread(volatile struct proc *p, volatile struct thread *td, int all)
if (all) {
db_printf("%6d ", td->td_tid);
switch (td->td_state) {
switch (TD_GET_STATE(td)) {
case TDS_RUNNING:
snprintf(state, sizeof(state), "Run");
break;
@ -367,7 +367,7 @@ DB_SHOW_COMMAND(thread, db_show_thread)
db_printf(" flags: %#x ", td->td_flags);
db_printf(" pflags: %#x\n", td->td_pflags);
db_printf(" state: ");
switch (td->td_state) {
switch (TD_GET_STATE(td)) {
case TDS_INACTIVE:
db_printf("INACTIVE\n");
break;
@ -413,7 +413,7 @@ DB_SHOW_COMMAND(thread, db_show_thread)
db_printf("}\n");
break;
default:
db_printf("??? (%#x)\n", td->td_state);
db_printf("??? (%#x)\n", TD_GET_STATE(td));
break;
}
if (TD_ON_LOCK(td))

View file

@ -510,11 +510,11 @@ do_qXfer_threads_read(void)
sbuf_putc(&ctx.qXfer.sb, '>');
if (ctx.iter->td_state == TDS_RUNNING)
if (TD_GET_STATE(ctx.iter) == TDS_RUNNING)
sbuf_cat(&ctx.qXfer.sb, "Running");
else if (ctx.iter->td_state == TDS_RUNQ)
else if (TD_GET_STATE(ctx.iter) == TDS_RUNQ)
sbuf_cat(&ctx.qXfer.sb, "RunQ");
else if (ctx.iter->td_state == TDS_CAN_RUN)
else if (TD_GET_STATE(ctx.iter) == TDS_CAN_RUN)
sbuf_cat(&ctx.qXfer.sb, "CanRun");
else if (TD_ON_LOCK(ctx.iter))
sbuf_cat(&ctx.qXfer.sb, "Blocked");

View file

@ -499,7 +499,7 @@ proc0_init(void *dummy __unused)
p->p_nice = NZERO;
td->td_tid = THREAD0_TID;
tidhash_add(td);
td->td_state = TDS_RUNNING;
TD_SET_STATE(td, TDS_RUNNING);
td->td_pri_class = PRI_TIMESHARE;
td->td_user_pri = PUSER;
td->td_base_user_pri = PUSER;

View file

@ -992,7 +992,7 @@ intr_event_schedule_thread(struct intr_event *ie)
sched_add(td, SRQ_INTR);
} else {
CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d",
__func__, td->td_proc->p_pid, td->td_name, it->it_need, td->td_state);
__func__, td->td_proc->p_pid, td->td_name, it->it_need, TD_GET_STATE(td));
thread_unlock(td);
}

View file

@ -1955,7 +1955,7 @@ credbatch_add(struct credbatch *crb, struct thread *td)
MPASS(td->td_realucred != NULL);
MPASS(td->td_realucred == td->td_ucred);
MPASS(td->td_state == TDS_INACTIVE);
MPASS(TD_GET_STATE(td) == TDS_INACTIVE);
cr = td->td_realucred;
KASSERT(cr->cr_users > 0, ("%s: users %d not > 0 on cred %p",
__func__, cr->cr_users, cr));

View file

@ -1147,7 +1147,7 @@ racct_proc_throttle(struct proc *p, int timeout)
thread_lock(td);
td->td_flags |= TDF_ASTPENDING;
switch (td->td_state) {
switch (TD_GET_STATE(td)) {
case TDS_RUNQ:
/*
* If the thread is on the scheduler run-queue, we can

View file

@ -570,7 +570,7 @@ setrunnable(struct thread *td, int srqflags)
("setrunnable: pid %d is a zombie", td->td_proc->p_pid));
swapin = 0;
switch (td->td_state) {
switch (TD_GET_STATE(td)) {
case TDS_RUNNING:
case TDS_RUNQ:
break;
@ -593,7 +593,7 @@ setrunnable(struct thread *td, int srqflags)
}
break;
default:
panic("setrunnable: state 0x%x", td->td_state);
panic("setrunnable: state 0x%x", TD_GET_STATE(td));
}
if ((srqflags & (SRQ_HOLD | SRQ_HOLDTD)) == 0)
thread_unlock(td);

View file

@ -346,7 +346,7 @@ thread_ctor(void *mem, int size, void *arg, int flags)
struct thread *td;
td = (struct thread *)mem;
td->td_state = TDS_INACTIVE;
TD_SET_STATE(td, TDS_INACTIVE);
td->td_lastcpu = td->td_oncpu = NOCPU;
/*
@ -379,7 +379,7 @@ thread_dtor(void *mem, int size, void *arg)
#ifdef INVARIANTS
/* Verify that this thread is in a safe state to free. */
switch (td->td_state) {
switch (TD_GET_STATE(td)) {
case TDS_INHIBITED:
case TDS_RUNNING:
case TDS_CAN_RUN:
@ -944,7 +944,7 @@ thread_exit(void)
rucollect(&p->p_ru, &td->td_ru);
PROC_STATUNLOCK(p);
td->td_state = TDS_INACTIVE;
TD_SET_STATE(td, TDS_INACTIVE);
#ifdef WITNESS
witness_thread_exit(td);
#endif
@ -993,7 +993,7 @@ thread_link(struct thread *td, struct proc *p)
* its lock has been created.
* PROC_LOCK_ASSERT(p, MA_OWNED);
*/
td->td_state = TDS_INACTIVE;
TD_SET_STATE(td, TDS_INACTIVE);
td->td_proc = p;
td->td_flags = TDF_INMEM;

View file

@ -1764,7 +1764,7 @@ sched_affinity(struct thread *td)
if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
return;
switch (td->td_state) {
switch (TD_GET_STATE(td)) {
case TDS_RUNQ:
/*
* If we are on a per-CPU runqueue that is in the set,

View file

@ -288,7 +288,7 @@ propagate_priority(struct thread *td)
*/
KASSERT(TD_ON_LOCK(td), (
"thread %d(%s):%d holds %s but isn't blocked on a lock\n",
td->td_tid, td->td_name, td->td_state,
td->td_tid, td->td_name, TD_GET_STATE(td),
ts->ts_lockobj->lo_name));
/*
@ -1185,7 +1185,7 @@ print_lockchain(struct thread *td, const char *prefix)
}
db_printf("%sthread %d (pid %d, %s) is ", prefix, td->td_tid,
td->td_proc->p_pid, td->td_name);
switch (td->td_state) {
switch (TD_GET_STATE(td)) {
case TDS_INACTIVE:
db_printf("inactive\n");
return;
@ -1224,7 +1224,7 @@ print_lockchain(struct thread *td, const char *prefix)
db_printf("inhibited: %s\n", KTDSTATE(td));
return;
default:
db_printf("??? (%#x)\n", td->td_state);
db_printf("??? (%#x)\n", TD_GET_STATE(td));
return;
}
}

View file

@ -347,6 +347,7 @@ struct thread {
TDS_RUNQ,
TDS_RUNNING
} td_state; /* (t) thread state */
/* Note: td_state must be accessed using TD_{GET,SET}_STATE(). */
union {
register_t tdu_retval[2];
off_t tdu_off;
@ -540,10 +541,15 @@ do { \
#define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED)
#define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK)
#define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT)
#define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING)
#define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ)
#define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN)
#define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED)
#ifdef _KERNEL
#define TD_GET_STATE(td) atomic_load_int(&(td)->td_state)
#else
#define TD_GET_STATE(td) ((td)->td_state)
#endif
#define TD_IS_RUNNING(td) (TD_GET_STATE(td) == TDS_RUNNING)
#define TD_ON_RUNQ(td) (TD_GET_STATE(td) == TDS_RUNQ)
#define TD_CAN_RUN(td) (TD_GET_STATE(td) == TDS_CAN_RUN)
#define TD_IS_INHIBITED(td) (TD_GET_STATE(td) == TDS_INHIBITED)
#define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED)
#define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD)
@ -557,15 +563,15 @@ do { \
((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \
((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding")
#define TD_SET_INHIB(td, inhib) do { \
(td)->td_state = TDS_INHIBITED; \
(td)->td_inhibitors |= (inhib); \
#define TD_SET_INHIB(td, inhib) do { \
TD_SET_STATE(td, TDS_INHIBITED); \
(td)->td_inhibitors |= (inhib); \
} while (0)
#define TD_CLR_INHIB(td, inhib) do { \
if (((td)->td_inhibitors & (inhib)) && \
(((td)->td_inhibitors &= ~(inhib)) == 0)) \
(td)->td_state = TDS_CAN_RUN; \
TD_SET_STATE(td, TDS_CAN_RUN); \
} while (0)
#define TD_SET_SLEEPING(td) TD_SET_INHIB((td), TDI_SLEEPING)
@ -581,9 +587,15 @@ do { \
#define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED)
#define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT)
#define TD_SET_RUNNING(td) (td)->td_state = TDS_RUNNING
#define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ
#define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN
#ifdef _KERNEL
#define TD_SET_STATE(td, state) atomic_store_int(&(td)->td_state, state)
#else
#define TD_SET_STATE(td, state) (td)->td_state = state
#endif
#define TD_SET_RUNNING(td) TD_SET_STATE(td, TDS_RUNNING)
#define TD_SET_RUNQ(td) TD_SET_STATE(td, TDS_RUNQ)
#define TD_SET_CAN_RUN(td) TD_SET_STATE(td, TDS_CAN_RUN)
#define TD_SBDRY_INTR(td) \
(((td)->td_flags & (TDF_SEINTR | TDF_SERESTART)) != 0)

View file

@ -207,7 +207,7 @@ vmtotal(SYSCTL_HANDLER_ARGS)
if (p->p_state != PRS_NEW) {
FOREACH_THREAD_IN_PROC(p, td) {
thread_lock(td);
switch (td->td_state) {
switch (TD_GET_STATE(td)) {
case TDS_INHIBITED:
if (TD_IS_SWAPPED(td))
total.t_sw++;