dlm: fix sleep in atomic context

This patch changes the orphans mutex to a spinlock since commit
c288745f1d ("dlm: avoid blocking receive at the end of recovery") is
using a rwlock_t to lock the DLM message receive path and do_purge() can
be called while this lock is held that forbids to sleep.

We need to use spin_lock_bh() because also a user context that calls
dlm_user_purge() can call do_purge() and since commit 92d59adfaf
("dlm: do message processing in softirq context") the DLM message
receive path is done under softirq context.

Fixes: c288745f1d ("dlm: avoid blocking receive at the end of recovery")
Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
Closes: https://lore.kernel.org/gfs2/9ad928eb-2ece-4ad9-a79c-d2bce228e4bc@moroto.mountain/
Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>
This commit is contained in:
Alexander Aring 2024-04-17 15:13:22 -04:00 committed by David Teigland
parent 15fd7e5517
commit 7b012732d0
3 changed files with 8 additions and 8 deletions

View file

@ -602,7 +602,7 @@ struct dlm_ls {
spinlock_t ls_waiters_lock;
struct list_head ls_waiters; /* lkbs needing a reply */
struct mutex ls_orphans_mutex;
spinlock_t ls_orphans_lock;
struct list_head ls_orphans;
spinlock_t ls_new_rsb_spin;

View file

@ -5880,7 +5880,7 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
int found_other_mode = 0;
int rv = 0;
mutex_lock(&ls->ls_orphans_mutex);
spin_lock_bh(&ls->ls_orphans_lock);
list_for_each_entry(iter, &ls->ls_orphans, lkb_ownqueue) {
if (iter->lkb_resource->res_length != namelen)
continue;
@ -5897,7 +5897,7 @@ int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
*lkid = iter->lkb_id;
break;
}
mutex_unlock(&ls->ls_orphans_mutex);
spin_unlock_bh(&ls->ls_orphans_lock);
if (!lkb && found_other_mode) {
rv = -EAGAIN;
@ -6089,9 +6089,9 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
int error;
hold_lkb(lkb); /* reference for the ls_orphans list */
mutex_lock(&ls->ls_orphans_mutex);
spin_lock_bh(&ls->ls_orphans_lock);
list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
mutex_unlock(&ls->ls_orphans_mutex);
spin_unlock_bh(&ls->ls_orphans_lock);
set_unlock_args(0, lkb->lkb_ua, &args);
@ -6241,7 +6241,7 @@ static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
{
struct dlm_lkb *lkb, *safe;
mutex_lock(&ls->ls_orphans_mutex);
spin_lock_bh(&ls->ls_orphans_lock);
list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
if (pid && lkb->lkb_ownpid != pid)
continue;
@ -6249,7 +6249,7 @@ static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
list_del_init(&lkb->lkb_ownqueue);
dlm_put_lkb(lkb);
}
mutex_unlock(&ls->ls_orphans_mutex);
spin_unlock_bh(&ls->ls_orphans_lock);
}
static int send_purge(struct dlm_ls *ls, int nodeid, int pid)

View file

@ -436,7 +436,7 @@ static int new_lockspace(const char *name, const char *cluster,
INIT_LIST_HEAD(&ls->ls_waiters);
spin_lock_init(&ls->ls_waiters_lock);
INIT_LIST_HEAD(&ls->ls_orphans);
mutex_init(&ls->ls_orphans_mutex);
spin_lock_init(&ls->ls_orphans_lock);
INIT_LIST_HEAD(&ls->ls_new_rsb);
spin_lock_init(&ls->ls_new_rsb_spin);