- Avoid rwsem lockups in certain situations when handling the handoff bit

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmLmUPkACgkQEsHwGGHe
 VUqgow/+Oj8acqImjR1OGW0MGW5F4OBRxPlWYGRBem0PwtysKSOUEuLKFGrfUPP8
 9/o/WDK7sKm0A0Ph4++zyuxQVUdww1kWR1BaOzBBJZMhB3dYk511JW2EZc7TPQg8
 qnBWOh1WGztaIATImo1JtN7GVlz6mWEq5i7CkyYWOfqqgMMfzS5N548KtFs37k1F
 GPwR2fntThsgYlL7+5ekHVBabx3Lf5CvpUkct484LtIrvO9xvBr+R5fzxdkd/j7s
 xGVFpt0sMEGjnOatLP+Q41E6n4Vugzjk9FdxOAYLcSl8NPGj/7HUtXB0oLcU7jSn
 eFxr2vurueVxpueNieBKJNiSicFsgx+QNsEtERtzLfyosgKtDkWtl5cP6k7qzqVm
 9KGAWc5tiQJ5DcIoxf+pKBEXBnf6EKFS7PrknYFTbWPFnbun0nw4OnFLufUgeg9c
 qB6afbWUOwKLWYIcJZadmnvmE2ZhaPAv1KPvqeE7E8ln5ERbg2UKY4qV37bvyJFg
 N+gVv+acSip4KtGswGUBKFriJ/vvN1dh/PiBqqJC3AHwlz+CxYsOVgpk9tkhlaQ9
 1HsQ51hyN/pb688J9SshqZf2BH3qS6Kz4eLa1eXGPEywsRBJfg4lufncn1JbrCg8
 CzkUfVPbS31LahMDs5U3IWGSiYSUsy1JDRLZ2zns9ZEMaaZWPKQ=
 =SBw2
 -----END PGP SIGNATURE-----

Merge tag 'locking_urgent_for_v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fix from Borislav Petkov:

 - Avoid rwsem lockups in certain situations when handling the handoff
   bit

* tag 'locking_urgent_for_v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/rwsem: Allow slowpath writer to ignore handoff bit if not set by first waiter
This commit is contained in:
Linus Torvalds 2022-07-31 09:21:13 -07:00
commit 5e4823e6da

View file

@ -335,8 +335,6 @@ struct rwsem_waiter {
struct task_struct *task;
enum rwsem_waiter_type type;
unsigned long timeout;
/* Writer only, not initialized in reader */
bool handoff_set;
};
#define rwsem_first_waiter(sem) \
@ -459,10 +457,12 @@ static void rwsem_mark_wake(struct rw_semaphore *sem,
* to give up the lock), request a HANDOFF to
* force the issue.
*/
if (!(oldcount & RWSEM_FLAG_HANDOFF) &&
time_after(jiffies, waiter->timeout)) {
adjustment -= RWSEM_FLAG_HANDOFF;
lockevent_inc(rwsem_rlock_handoff);
if (time_after(jiffies, waiter->timeout)) {
if (!(oldcount & RWSEM_FLAG_HANDOFF)) {
adjustment -= RWSEM_FLAG_HANDOFF;
lockevent_inc(rwsem_rlock_handoff);
}
waiter->handoff_set = true;
}
atomic_long_add(-adjustment, &sem->count);
@ -599,7 +599,7 @@ rwsem_del_wake_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter,
static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
struct rwsem_waiter *waiter)
{
bool first = rwsem_first_waiter(sem) == waiter;
struct rwsem_waiter *first = rwsem_first_waiter(sem);
long count, new;
lockdep_assert_held(&sem->wait_lock);
@ -609,11 +609,20 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
bool has_handoff = !!(count & RWSEM_FLAG_HANDOFF);
if (has_handoff) {
if (!first)
/*
* Honor handoff bit and yield only when the first
* waiter is the one that set it. Otherwisee, we
* still try to acquire the rwsem.
*/
if (first->handoff_set && (waiter != first))
return false;
/* First waiter inherits a previously set handoff bit */
waiter->handoff_set = true;
/*
* First waiter can inherit a previously set handoff
* bit and spin on rwsem if lock acquisition fails.
*/
if (waiter == first)
waiter->handoff_set = true;
}
new = count;
@ -1027,6 +1036,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_READ;
waiter.timeout = jiffies + RWSEM_WAIT_TIMEOUT;
waiter.handoff_set = false;
raw_spin_lock_irq(&sem->wait_lock);
if (list_empty(&sem->wait_list)) {