io_uring-6.4-2023-06-21

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmSTrvkQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpr4GEAC3XOr0HAzX5W+3i/ikRCw8knIHgw4yfYEO
 xlpV141PnUNvaqmyDBHuXrLMULwdiYCM30hSyfqen+Uxl7im9gzlkO94cMhFvp/u
 x3XH58iYKAivc8fYXmvl8mSaxK8j30p8NQNBl10tAlU6wki/B8Kd87Am0m3AlZb/
 7yNdCB5VFXaX0LeYs6CSxDP8m6cClR+bdo39UI703T3JQXOVi9MDhqUoILKLYTob
 4cST/eukXMOmbR8qo1Ii//gxwKIqP7TSusF8OZIX0xx3od03tQ9qW2r63gadD3vP
 Mp67myG3ed3P1dg6tK/uvK6SdcmgnLxkJXtVi+l0gl4DGsxpWvwQ8yeV0IegTBqp
 AMbybhgfQsb6RLba3FB2xwyuWVI8DVlqV/Qt8QvqXZWv5tZuPotkg6F7nwoAY1XK
 DI58wkxIAks9b+MPVQycJfrXWz82iuRR7ZyvuqHPgvHvSmNz9KPFWAjHvtvWgrKz
 eMtGhRRoBAtKl/1pDiZ13gS3mvS+0BA2UkeUEx1yaZBDpT2L5D19RQrsq9fE3FBU
 3aSktHRPDFi/mxNztAgrZKHIfUxdXLPB/0q+1ZLki2DaaUlnbOaF99+hPV3Uyz4M
 HkCGUFAH5B9+WuAD8Nv7GTwiNbf1gyqa+T5MR30R9x8RXhE7v4zDg986qW9JT7Sd
 0L87aplBgQ==
 =EQSw
 -----END PGP SIGNATURE-----

Merge tag 'io_uring-6.4-2023-06-21' of git://git.kernel.dk/linux

Pull io_uring fixes from Jens Axboe:
 "A fix for a race condition with poll removal and linked timeouts, and
  then a few followup fixes/tweaks for the msg_control patch from last
  week.

  Not super important, particularly the sparse fixup, as it was broken
  before that recent commit. But let's get it sorted for real for this
  release, rather than just have it broken a bit differently"

* tag 'io_uring-6.4-2023-06-21' of git://git.kernel.dk/linux:
  io_uring/net: use the correct msghdr union member in io_sendmsg_copy_hdr
  io_uring/net: disable partial retries for recvmsg with cmsg
  io_uring/net: clear msg_controllen on partial sendmsg retry
  io_uring/poll: serialize poll linked timer start with poll removal
This commit is contained in:
Linus Torvalds 2023-06-22 17:32:34 -07:00
commit c213de632f
2 changed files with 15 additions and 11 deletions

View file

@ -203,7 +203,7 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
&iomsg->free_iov);
/* save msg_control as sys_sendmsg() overwrites it */
sr->msg_control = iomsg->msg.msg_control;
sr->msg_control = iomsg->msg.msg_control_user;
return ret;
}
@ -302,7 +302,7 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
if (req_has_async_data(req)) {
kmsg = req->async_data;
kmsg->msg.msg_control = sr->msg_control;
kmsg->msg.msg_control_user = sr->msg_control;
} else {
ret = io_sendmsg_copy_hdr(req, &iomsg);
if (ret)
@ -326,6 +326,8 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
return io_setup_async_msg(req, kmsg, issue_flags);
if (ret > 0 && io_net_retry(sock, flags)) {
kmsg->msg.msg_controllen = 0;
kmsg->msg.msg_control = NULL;
sr->done_io += ret;
req->flags |= REQ_F_PARTIAL_IO;
return io_setup_async_msg(req, kmsg, issue_flags);
@ -787,16 +789,19 @@ int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
flags = sr->msg_flags;
if (force_nonblock)
flags |= MSG_DONTWAIT;
if (flags & MSG_WAITALL)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
kmsg->msg.msg_get_inq = 1;
if (req->flags & REQ_F_APOLL_MULTISHOT)
if (req->flags & REQ_F_APOLL_MULTISHOT) {
ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
&mshot_finished);
else
} else {
/* disable partial retry for recvmsg with cmsg attached */
if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
kmsg->uaddr, flags);
}
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {

View file

@ -977,8 +977,9 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
struct io_hash_bucket *bucket;
struct io_kiocb *preq;
int ret2, ret = 0;
struct io_tw_state ts = {};
struct io_tw_state ts = { .locked = true };
io_ring_submit_lock(ctx, issue_flags);
preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket);
ret2 = io_poll_disarm(preq);
if (bucket)
@ -990,12 +991,10 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
goto out;
}
io_ring_submit_lock(ctx, issue_flags);
preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket);
ret2 = io_poll_disarm(preq);
if (bucket)
spin_unlock(&bucket->lock);
io_ring_submit_unlock(ctx, issue_flags);
if (ret2) {
ret = ret2;
goto out;
@ -1019,7 +1018,7 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
if (poll_update->update_user_data)
preq->cqe.user_data = poll_update->new_user_data;
ret2 = io_poll_add(preq, issue_flags);
ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED);
/* successfully updated, don't complete poll request */
if (!ret2 || ret2 == -EIOCBQUEUED)
goto out;
@ -1027,9 +1026,9 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
req_set_fail(preq);
io_req_set_res(preq, -ECANCELED, 0);
ts.locked = !(issue_flags & IO_URING_F_UNLOCKED);
io_req_task_complete(preq, &ts);
out:
io_ring_submit_unlock(ctx, issue_flags);
if (ret < 0) {
req_set_fail(req);
return ret;