pvcalls-front: Avoid get_free_pages(GFP_KERNEL) under spinlock

The problem is that we call this with a spin lock held.
The call tree is:
pvcalls_front_accept() holds bedata->socket_lock.
    -> create_active()
        -> __get_free_pages() uses GFP_KERNEL

The create_active() function is only called from pvcalls_front_accept()
with a spin_lock held, The allocation is not allowed to sleep and
GFP_KERNEL is not sufficient.

This issue was detected by using the Coccinelle software.

v2: Add a function doing the allocations which is called
    outside the lock and passing the allocated data to
    create_active().

v3: Use the matching deallocators i.e., free_page()
    and free_pages(), respectively.

v4: It would be better to pre-populate map (struct sock_mapping),
    rather than introducing one more new struct.

v5: Since allocating the data outside of this call it should also
    be freed outside, when create_active() fails.
    Move kzalloc(sizeof(*map2), GFP_ATOMIC) outside spinlock and
    use GFP_KERNEL instead.

v6: Drop the superfluous calls.

Suggested-by: Juergen Gross <jgross@suse.com>
Suggested-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Suggested-by: Stefano Stabellini <sstabellini@kernel.org>
Signed-off-by: Wen Yang <wen.yang99@zte.com.cn>
Acked-by: Stefano Stabellini <sstabellini@kernel.org>
CC: Julia Lawall <julia.lawall@lip6.fr>
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
CC: Juergen Gross <jgross@suse.com>
CC: Stefano Stabellini <sstabellini@kernel.org>
CC: xen-devel@lists.xenproject.org
CC: linux-kernel@vger.kernel.org
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
This commit is contained in:
Wen Yang 2018-12-05 10:35:50 +08:00 committed by Boris Ostrovsky
parent 1f8ce09b36
commit 9f51c05dc4

View file

@ -341,6 +341,39 @@ int pvcalls_front_socket(struct socket *sock)
return ret;
}
static void free_active_ring(struct sock_mapping *map)
{
free_pages((unsigned long)map->active.data.in,
map->active.ring->ring_order);
free_page((unsigned long)map->active.ring);
}
static int alloc_active_ring(struct sock_mapping *map)
{
void *bytes;
map->active.ring = (struct pvcalls_data_intf *)
get_zeroed_page(GFP_KERNEL);
if (!map->active.ring)
goto out;
map->active.ring->ring_order = PVCALLS_RING_ORDER;
bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
PVCALLS_RING_ORDER);
if (!bytes)
goto out;
map->active.data.in = bytes;
map->active.data.out = bytes +
XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
return 0;
out:
free_active_ring(map);
return -ENOMEM;
}
static int create_active(struct sock_mapping *map, int *evtchn)
{
void *bytes;
@ -349,15 +382,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
*evtchn = -1;
init_waitqueue_head(&map->active.inflight_conn_req);
map->active.ring = (struct pvcalls_data_intf *)
__get_free_page(GFP_KERNEL | __GFP_ZERO);
if (map->active.ring == NULL)
goto out_error;
map->active.ring->ring_order = PVCALLS_RING_ORDER;
bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
PVCALLS_RING_ORDER);
if (bytes == NULL)
goto out_error;
bytes = map->active.data.in;
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
map->active.ring->ref[i] = gnttab_grant_foreign_access(
pvcalls_front_dev->otherend_id,
@ -367,10 +392,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
pvcalls_front_dev->otherend_id,
pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
map->active.data.in = bytes;
map->active.data.out = bytes +
XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
if (ret)
goto out_error;
@ -391,8 +412,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
out_error:
if (*evtchn >= 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
free_page((unsigned long)map->active.ring);
return ret;
}
@ -412,17 +431,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
ret = alloc_active_ring(map);
if (ret < 0) {
pvcalls_exit_sock(sock);
return ret;
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
ret = create_active(map, &evtchn);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
@ -786,25 +812,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
}
}
map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
if (map2 == NULL) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
pvcalls_exit_sock(sock);
return -ENOMEM;
}
ret = alloc_active_ring(map2);
if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
kfree(map2);
pvcalls_exit_sock(sock);
return ret;
}
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
free_active_ring(map2);
kfree(map2);
pvcalls_exit_sock(sock);
return ret;
}
map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
if (map2 == NULL) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
pvcalls_exit_sock(sock);
return -ENOMEM;
}
ret = create_active(map2, &evtchn);
if (ret < 0) {
free_active_ring(map2);
kfree(map2);
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);