rcu-tasks: Add data structures for lightweight grace periods

This commit adds fields to task_struct and to rcu_tasks_percpu that will
be used to avoid the task-list scan for RCU Tasks Trace grace periods,
and also initializes these fields.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Martin KaFai Lau <kafai@fb.com>
Cc: KP Singh <kpsingh@kernel.org>
This commit is contained in:
Paul E. McKenney 2022-05-16 17:56:16 -07:00
parent f90f19da88
commit 434c9eefb9
4 changed files with 8 additions and 0 deletions

View file

@ -844,6 +844,8 @@ struct task_struct {
int trc_ipi_to_cpu; int trc_ipi_to_cpu;
union rcu_special trc_reader_special; union rcu_special trc_reader_special;
struct list_head trc_holdout_list; struct list_head trc_holdout_list;
struct list_head trc_blkd_node;
int trc_blkd_cpu;
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
struct sched_info sched_info; struct sched_info sched_info;

View file

@ -157,6 +157,7 @@ struct task_struct init_task
.trc_reader_nesting = 0, .trc_reader_nesting = 0,
.trc_reader_special.s = 0, .trc_reader_special.s = 0,
.trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list), .trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
.trc_blkd_node = LIST_HEAD_INIT(init_task.trc_blkd_node),
#endif #endif
#ifdef CONFIG_CPUSETS #ifdef CONFIG_CPUSETS
.mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq, .mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq,

View file

@ -1814,6 +1814,7 @@ static inline void rcu_copy_process(struct task_struct *p)
p->trc_reader_nesting = 0; p->trc_reader_nesting = 0;
p->trc_reader_special.s = 0; p->trc_reader_special.s = 0;
INIT_LIST_HEAD(&p->trc_holdout_list); INIT_LIST_HEAD(&p->trc_holdout_list);
INIT_LIST_HEAD(&p->trc_blkd_node);
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
} }

View file

@ -29,6 +29,7 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
* @rtp_work: Work queue for invoking callbacks. * @rtp_work: Work queue for invoking callbacks.
* @rtp_irq_work: IRQ work queue for deferred wakeups. * @rtp_irq_work: IRQ work queue for deferred wakeups.
* @barrier_q_head: RCU callback for barrier operation. * @barrier_q_head: RCU callback for barrier operation.
* @rtp_blkd_tasks: List of tasks blocked as readers.
* @cpu: CPU number corresponding to this entry. * @cpu: CPU number corresponding to this entry.
* @rtpp: Pointer to the rcu_tasks structure. * @rtpp: Pointer to the rcu_tasks structure.
*/ */
@ -40,6 +41,7 @@ struct rcu_tasks_percpu {
struct work_struct rtp_work; struct work_struct rtp_work;
struct irq_work rtp_irq_work; struct irq_work rtp_irq_work;
struct rcu_head barrier_q_head; struct rcu_head barrier_q_head;
struct list_head rtp_blkd_tasks;
int cpu; int cpu;
struct rcu_tasks *rtpp; struct rcu_tasks *rtpp;
}; };
@ -256,6 +258,8 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq); INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
rtpcp->cpu = cpu; rtpcp->cpu = cpu;
rtpcp->rtpp = rtp; rtpcp->rtpp = rtp;
if (!rtpcp->rtp_blkd_tasks.next)
INIT_LIST_HEAD(&rtpcp->rtp_blkd_tasks);
raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled.
} }
raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);