linux/tools/memory-model/linux-kernel.bell
Jonas Oberhauser 9ba7d3b3b8 tools: memory-model: Make plain accesses carry dependencies
As reported by Viktor, plain accesses in LKMM are weaker than
accesses to registers: the latter carry dependencies but the former
do not. This is exemplified in the following snippet:

  int r = READ_ONCE(*x);
  WRITE_ONCE(*y, r);

Here a data dependency links the READ_ONCE() to the WRITE_ONCE(),
preserving their order, because the model treats r as a register.
If r is turned into a memory location accessed by plain accesses,
however, the link is broken and the order between READ_ONCE() and
WRITE_ONCE() is no longer preserved.

This is too conservative, since any optimizations on plain
accesses that might break dependencies are also possible on
registers; it also contradicts the intuitive notion of "dependency"
as the data stored by the WRITE_ONCE() does depend on the data read
by the READ_ONCE(), independently of whether r is a register or a
memory location.

This is resolved by redefining all dependencies to include
dependencies carried by memory accesses; a dependency is said to be
carried by memory accesses (in the model: carry-dep) from one load
to another load if the initial load is followed by an arbitrarily
long sequence alternating between stores and loads of the same
thread, where the data of each store depends on the previous load,
and is read by the next load.

Any dependency linking the final load in the sequence to another
access also links the initial load in the sequence to that access.

More deep details can be found in this LKML discussion:

https://lore.kernel.org/lkml/d86295788ad14a02874ab030ddb8a6f8@huawei.com/

Reported-by: Viktor Vafeiadis <viktor@mpi-sws.org>
Signed-off-by: Jonas Oberhauser <jonas.oberhauser@huawei.com>
Reviewed-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
2023-01-03 20:47:04 -08:00

91 lines
3.3 KiB
Plaintext

// SPDX-License-Identifier: GPL-2.0+
(*
* Copyright (C) 2015 Jade Alglave <j.alglave@ucl.ac.uk>,
* Copyright (C) 2016 Luc Maranget <luc.maranget@inria.fr> for Inria
* Copyright (C) 2017 Alan Stern <stern@rowland.harvard.edu>,
* Andrea Parri <parri.andrea@gmail.com>
*
* An earlier version of this file appeared in the companion webpage for
* "Frightening small children and disconcerting grown-ups: Concurrency
* in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern,
* which appeared in ASPLOS 2018.
*)
"Linux-kernel memory consistency model"
enum Accesses = 'once (*READ_ONCE,WRITE_ONCE*) ||
'release (*smp_store_release*) ||
'acquire (*smp_load_acquire*) ||
'noreturn (* R of non-return RMW *)
instructions R[{'once,'acquire,'noreturn}]
instructions W[{'once,'release}]
instructions RMW[{'once,'acquire,'release}]
enum Barriers = 'wmb (*smp_wmb*) ||
'rmb (*smp_rmb*) ||
'mb (*smp_mb*) ||
'barrier (*barrier*) ||
'rcu-lock (*rcu_read_lock*) ||
'rcu-unlock (*rcu_read_unlock*) ||
'sync-rcu (*synchronize_rcu*) ||
'before-atomic (*smp_mb__before_atomic*) ||
'after-atomic (*smp_mb__after_atomic*) ||
'after-spinlock (*smp_mb__after_spinlock*) ||
'after-unlock-lock (*smp_mb__after_unlock_lock*)
instructions F[Barriers]
(* SRCU *)
enum SRCU = 'srcu-lock || 'srcu-unlock || 'sync-srcu
instructions SRCU[SRCU]
(* All srcu events *)
let Srcu = Srcu-lock | Srcu-unlock | Sync-srcu
(* Compute matching pairs of nested Rcu-lock and Rcu-unlock *)
let rcu-rscs = let rec
unmatched-locks = Rcu-lock \ domain(matched)
and unmatched-unlocks = Rcu-unlock \ range(matched)
and unmatched = unmatched-locks | unmatched-unlocks
and unmatched-po = [unmatched] ; po ; [unmatched]
and unmatched-locks-to-unlocks =
[unmatched-locks] ; po ; [unmatched-unlocks]
and matched = matched | (unmatched-locks-to-unlocks \
(unmatched-po ; unmatched-po))
in matched
(* Validate nesting *)
flag ~empty Rcu-lock \ domain(rcu-rscs) as unbalanced-rcu-locking
flag ~empty Rcu-unlock \ range(rcu-rscs) as unbalanced-rcu-locking
(* Compute matching pairs of nested Srcu-lock and Srcu-unlock *)
let srcu-rscs = let rec
unmatched-locks = Srcu-lock \ domain(matched)
and unmatched-unlocks = Srcu-unlock \ range(matched)
and unmatched = unmatched-locks | unmatched-unlocks
and unmatched-po = ([unmatched] ; po ; [unmatched]) & loc
and unmatched-locks-to-unlocks =
([unmatched-locks] ; po ; [unmatched-unlocks]) & loc
and matched = matched | (unmatched-locks-to-unlocks \
(unmatched-po ; unmatched-po))
in matched
(* Validate nesting *)
flag ~empty Srcu-lock \ domain(srcu-rscs) as unbalanced-srcu-locking
flag ~empty Srcu-unlock \ range(srcu-rscs) as unbalanced-srcu-locking
(* Check for use of synchronize_srcu() inside an RCU critical section *)
flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep
(* Validate SRCU dynamic match *)
flag ~empty different-values(srcu-rscs) as srcu-bad-nesting
(* Compute marked and plain memory accesses *)
let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) |
LKR | LKW | UL | LF | RL | RU
let Plain = M \ Marked
(* Redefine dependencies to include those carried through plain accesses *)
let carry-dep = (data ; rfi)*
let addr = carry-dep ; addr
let ctrl = carry-dep ; ctrl
let data = carry-dep ; data