rcu/nocb: Code-style nits in callback-offloading toggling

This commit addresses a few code-style nits in callback-offloading
toggling, including one that predates this toggling.

Cc: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
Paul E. McKenney 2020-12-21 11:17:16 -08:00
parent 3d0cef50f3
commit f759081e8f
4 changed files with 30 additions and 37 deletions

View file

@ -80,17 +80,12 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED); return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED);
} }
/* Is the specified rcu_segcblist offloaded? */ /* Is the specified rcu_segcblist offloaded, or is SEGCBLIST_SOFTIRQ_ONLY set? */
static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
{ {
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU)) { if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
/* !rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY))
* Complete de-offloading happens only when SEGCBLIST_SOFTIRQ_ONLY return true;
* is set.
*/
if (!rcu_segcblist_test_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY))
return true;
}
return false; return false;
} }
@ -99,10 +94,8 @@ static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rscl
{ {
int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED; int flags = SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP | SEGCBLIST_OFFLOADED;
if (IS_ENABLED(CONFIG_RCU_NOCB_CPU)) { if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && (rsclp->flags & flags) == flags)
if ((rsclp->flags & flags) == flags) return true;
return true;
}
return false; return false;
} }

View file

@ -1606,7 +1606,7 @@ rcu_torture_stats_print(void)
data_race(n_barrier_successes), data_race(n_barrier_successes),
data_race(n_barrier_attempts), data_race(n_barrier_attempts),
data_race(n_rcu_torture_barrier_error)); data_race(n_rcu_torture_barrier_error));
pr_cont("read-exits: %ld ", data_race(n_read_exits)); pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
pr_cont("nocb-toggles: %ld:%ld\n", pr_cont("nocb-toggles: %ld:%ld\n",
atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload)); atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));

View file

@ -1962,17 +1962,17 @@ static inline bool nocb_gp_update_state(struct rcu_data *rdp, bool *needwake_sta
*needwake_state = true; *needwake_state = true;
} }
return true; return true;
} else {
/*
* De-offloading. Clear our flag and notify the de-offload worker.
* We will ignore this rdp until it ever gets re-offloaded.
*/
WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
*needwake_state = true;
return false;
} }
/*
* De-offloading. Clear our flag and notify the de-offload worker.
* We will ignore this rdp until it ever gets re-offloaded.
*/
WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
*needwake_state = true;
return false;
} }
@ -2005,6 +2005,7 @@ static void nocb_gp_wait(struct rcu_data *my_rdp)
WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp); WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) { for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
bool needwake_state = false; bool needwake_state = false;
if (!nocb_gp_enabled_cb(rdp)) if (!nocb_gp_enabled_cb(rdp))
continue; continue;
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check")); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
@ -2160,11 +2161,11 @@ static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
static void nocb_cb_wait(struct rcu_data *rdp) static void nocb_cb_wait(struct rcu_data *rdp)
{ {
struct rcu_segcblist *cblist = &rdp->cblist; struct rcu_segcblist *cblist = &rdp->cblist;
struct rcu_node *rnp = rdp->mynode;
bool needwake_state = false;
bool needwake_gp = false;
unsigned long cur_gp_seq; unsigned long cur_gp_seq;
unsigned long flags; unsigned long flags;
bool needwake_state = false;
bool needwake_gp = false;
struct rcu_node *rnp = rdp->mynode;
local_irq_save(flags); local_irq_save(flags);
rcu_momentary_dyntick_idle(); rcu_momentary_dyntick_idle();
@ -2217,8 +2218,8 @@ static void nocb_cb_wait(struct rcu_data *rdp)
swait_event_interruptible_exclusive(rdp->nocb_cb_wq, swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
nocb_cb_wait_cond(rdp)); nocb_cb_wait_cond(rdp));
/* ^^^ Ensure CB invocation follows _sleep test. */ // VVV Ensure CB invocation follows _sleep test.
if (smp_load_acquire(&rdp->nocb_cb_sleep)) { if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
WARN_ON(signal_pending(current)); WARN_ON(signal_pending(current));
trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty")); trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
} }
@ -2323,7 +2324,7 @@ static int __rcu_nocb_rdp_deoffload(struct rcu_data *rdp)
unsigned long flags; unsigned long flags;
int ret; int ret;
printk("De-offloading %d\n", rdp->cpu); pr_info("De-offloading %d\n", rdp->cpu);
rcu_nocb_lock_irqsave(rdp, flags); rcu_nocb_lock_irqsave(rdp, flags);
/* /*
@ -2384,11 +2385,10 @@ int rcu_nocb_cpu_deoffload(int cpu)
mutex_lock(&rcu_state.barrier_mutex); mutex_lock(&rcu_state.barrier_mutex);
cpus_read_lock(); cpus_read_lock();
if (rcu_segcblist_is_offloaded(&rdp->cblist)) { if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
if (cpu_online(cpu)) { if (cpu_online(cpu))
ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp); ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
} else { else
ret = __rcu_nocb_rdp_deoffload(rdp); ret = __rcu_nocb_rdp_deoffload(rdp);
}
if (!ret) if (!ret)
cpumask_clear_cpu(cpu, rcu_nocb_mask); cpumask_clear_cpu(cpu, rcu_nocb_mask);
} }
@ -2412,7 +2412,7 @@ static int __rcu_nocb_rdp_offload(struct rcu_data *rdp)
if (!rdp->nocb_gp_rdp) if (!rdp->nocb_gp_rdp)
return -EINVAL; return -EINVAL;
printk("Offloading %d\n", rdp->cpu); pr_info("Offloading %d\n", rdp->cpu);
/* /*
* Can't use rcu_nocb_lock_irqsave() while we are in * Can't use rcu_nocb_lock_irqsave() while we are in
* SEGCBLIST_SOFTIRQ_ONLY mode. * SEGCBLIST_SOFTIRQ_ONLY mode.
@ -2460,11 +2460,10 @@ int rcu_nocb_cpu_offload(int cpu)
mutex_lock(&rcu_state.barrier_mutex); mutex_lock(&rcu_state.barrier_mutex);
cpus_read_lock(); cpus_read_lock();
if (!rcu_segcblist_is_offloaded(&rdp->cblist)) { if (!rcu_segcblist_is_offloaded(&rdp->cblist)) {
if (cpu_online(cpu)) { if (cpu_online(cpu))
ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp); ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
} else { else
ret = __rcu_nocb_rdp_offload(rdp); ret = __rcu_nocb_rdp_offload(rdp);
}
if (!ret) if (!ret)
cpumask_set_cpu(cpu, rcu_nocb_mask); cpumask_set_cpu(cpu, rcu_nocb_mask);
} }

View file

@ -1243,6 +1243,7 @@ bool timer_curr_running(struct timer_list *timer)
for (i = 0; i < NR_BASES; i++) { for (i = 0; i < NR_BASES; i++) {
struct timer_base *base = this_cpu_ptr(&timer_bases[i]); struct timer_base *base = this_cpu_ptr(&timer_bases[i]);
if (base->running_timer == timer) if (base->running_timer == timer)
return true; return true;
} }