Merge patch series "can: j1939: avoid possible use-after-free when j1939_can_rx_register fails"

Fedor Pchelkin <pchelkin@ispras.ru> says:

The patch series fixes a possible racy use-after-free scenario
described in 2/2: if j1939_can_rx_register() fails then the concurrent
thread may have already read the invalid priv structure.

The 1/2 makes j1939_netdev_lock a mutex so that access to
j1939_can_rx_register() can be serialized without changing GFP_KERNEL
to GFP_ATOMIC inside can_rx_register(). This seems to be safe.

Note that the patch series has been tested only via Syzkaller and not
with a real device.

Link: https://lore.kernel.org/r/20230526171910.227615-1-pchelkin@ispras.ru
Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
This commit is contained in:
Marc Kleine-Budde 2023-06-05 08:27:23 +02:00
commit 628f725d3b

View file

@ -126,7 +126,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
#define J1939_CAN_ID CAN_EFF_FLAG
#define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG)
static DEFINE_SPINLOCK(j1939_netdev_lock);
static DEFINE_MUTEX(j1939_netdev_lock);
static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
{
@ -220,7 +220,7 @@ static void __j1939_rx_release(struct kref *kref)
j1939_can_rx_unregister(priv);
j1939_ecu_unmap_all(priv);
j1939_priv_set(priv->ndev, NULL);
spin_unlock(&j1939_netdev_lock);
mutex_unlock(&j1939_netdev_lock);
}
/* get pointer to priv without increasing ref counter */
@ -248,9 +248,9 @@ static struct j1939_priv *j1939_priv_get_by_ndev(struct net_device *ndev)
{
struct j1939_priv *priv;
spin_lock(&j1939_netdev_lock);
mutex_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
spin_unlock(&j1939_netdev_lock);
mutex_unlock(&j1939_netdev_lock);
return priv;
}
@ -260,14 +260,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
struct j1939_priv *priv, *priv_new;
int ret;
spin_lock(&j1939_netdev_lock);
mutex_lock(&j1939_netdev_lock);
priv = j1939_priv_get_by_ndev_locked(ndev);
if (priv) {
kref_get(&priv->rx_kref);
spin_unlock(&j1939_netdev_lock);
mutex_unlock(&j1939_netdev_lock);
return priv;
}
spin_unlock(&j1939_netdev_lock);
mutex_unlock(&j1939_netdev_lock);
priv = j1939_priv_create(ndev);
if (!priv)
@ -277,29 +277,31 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
spin_lock_init(&priv->j1939_socks_lock);
INIT_LIST_HEAD(&priv->j1939_socks);
spin_lock(&j1939_netdev_lock);
mutex_lock(&j1939_netdev_lock);
priv_new = j1939_priv_get_by_ndev_locked(ndev);
if (priv_new) {
/* Someone was faster than us, use their priv and roll
* back our's.
*/
kref_get(&priv_new->rx_kref);
spin_unlock(&j1939_netdev_lock);
mutex_unlock(&j1939_netdev_lock);
dev_put(ndev);
kfree(priv);
return priv_new;
}
j1939_priv_set(ndev, priv);
spin_unlock(&j1939_netdev_lock);
ret = j1939_can_rx_register(priv);
if (ret < 0)
goto out_priv_put;
mutex_unlock(&j1939_netdev_lock);
return priv;
out_priv_put:
j1939_priv_set(ndev, NULL);
mutex_unlock(&j1939_netdev_lock);
dev_put(ndev);
kfree(priv);
@ -308,7 +310,7 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
void j1939_netdev_stop(struct j1939_priv *priv)
{
kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
kref_put_mutex(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
j1939_priv_put(priv);
}