block-6.10-20240614

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmZsY6wQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpjVoD/9emDOqfWGbYQ2DQaIyXYbWaUHteBrg1d3S
 U8fRQsipHY0F3a+3eoY6Epjyt/8fq3jK4g/0/7hs+wYUtWgEXJMjl9lql7gZl4rS
 3kGlk9CmwFEm5lYQAYCIDNzKr6814naGi6yYhCnstyrsSzawjr3dYP3vr9soQL3h
 TvIv3PPRy6xg03mKEH+X2OSWMa4CB/85WrcKF2L/TrPD1/v3hbLZ2exfK1XX5nhl
 wtM9e7fUoP0bCOx/5GKd01uJJJjrC4dhsLDRTXM1ayVrOO0jREIBi27Ho95TdzfU
 MPwwWosGXgNzBCDKzZyBw1i1LFuHb+7waU/FgvpU5vXS8xaYBvm/4pWnDEGJlSAQ
 8Hwiwl0uiaok9zpMCDexiWl7LdNilcS2wMxKptdpENQ9mzC+doxWqMhQLY2eRprp
 eaBdLsHngS+GCiPePdZXuFMJ9Rz4/J9MEOBWmhicBDjwITLCdF6jGQ1/3tU/vQzp
 wvBjc2qjOQ46uRl+HgJVNe4E2G0BZdF1b17nYEpNRatmIWMCoydZtrLNuve2ZMkG
 icGP31nQe9YlgTMm/ayS2qgLD4yDFFpMeD2Ff3xXmtWZyyJ9L2UdOSUUeCZANmF8
 7OVbQrHAnHi61CINGf0JHVkx1xKy/o3Z06+FEypycxlE849k8sentSHbqWsjWzMP
 4jCICEwsHg==
 =p7jM
 -----END PGP SIGNATURE-----

Merge tag 'block-6.10-20240614' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - NVMe pull request via Keith:
     - Discard double free on error conditions (Chunguang)
     - Target Fixes (Daniel)
     - Namespace detachment regression fix (Keith)

 - Fix for an issue with flush requests and queuelist reuse (Chengming)

 - nbd sparse annotation fixes (Christoph)

 - unmap and free bio mapped data via submitter (Anuj)

 - loop discard/fallocate unsupported fix (Cyril)

 - Fix for the zoned write plugging added in this release (Damien)

 - sed-opal wrong address fix (Su)

* tag 'block-6.10-20240614' of git://git.kernel.dk/linux:
  loop: Disable fallocate() zero and discard if not supported
  nvme: fix namespace removal list
  nbd: Remove __force casts
  nvmet: always initialize cqe.result
  nvmet-passthru: propagate status from id override functions
  nvme: avoid double free special payload
  block: unmap and free user mapped integrity via submitter
  block: fix request.queuelist usage in flush
  block: Optimize disk zone resource cleanup
  block: sed-opal: avoid possible wrong address reference in read_sed_opal_key()
This commit is contained in:
Linus Torvalds 2024-06-14 11:41:50 -07:00
commit c286c21ff9
13 changed files with 100 additions and 53 deletions

View File

@ -144,16 +144,38 @@ void bio_integrity_free(struct bio *bio)
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_set *bs = bio->bi_pool;
if (bip->bip_flags & BIP_INTEGRITY_USER)
return;
if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
kfree(bvec_virt(bip->bip_vec));
else if (bip->bip_flags & BIP_INTEGRITY_USER)
bio_integrity_unmap_user(bip);
__bio_integrity_free(bs, bip);
bio->bi_integrity = NULL;
bio->bi_opf &= ~REQ_INTEGRITY;
}
/**
* bio_integrity_unmap_free_user - Unmap and free bio user integrity payload
* @bio: bio containing bip to be unmapped and freed
*
* Description: Used to unmap and free the user mapped integrity portion of a
* bio. Submitter attaching the user integrity buffer is responsible for
* unmapping and freeing it during completion.
*/
void bio_integrity_unmap_free_user(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_set *bs = bio->bi_pool;
if (WARN_ON_ONCE(!(bip->bip_flags & BIP_INTEGRITY_USER)))
return;
bio_integrity_unmap_user(bip);
__bio_integrity_free(bs, bip);
bio->bi_integrity = NULL;
bio->bi_opf &= ~REQ_INTEGRITY;
}
EXPORT_SYMBOL(bio_integrity_unmap_free_user);
/**
* bio_integrity_add_page - Attach integrity metadata
* @bio: bio to update

View File

@ -185,7 +185,7 @@ static void blk_flush_complete_seq(struct request *rq,
/* queue for flush */
if (list_empty(pending))
fq->flush_pending_since = jiffies;
list_move_tail(&rq->queuelist, pending);
list_add_tail(&rq->queuelist, pending);
break;
case REQ_FSEQ_DATA:
@ -263,6 +263,7 @@ static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
unsigned int seq = blk_flush_cur_seq(rq);
BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH);
list_del_init(&rq->queuelist);
blk_flush_complete_seq(rq, fq, seq, error);
}

View File

@ -1552,6 +1552,9 @@ static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk)
void disk_free_zone_resources(struct gendisk *disk)
{
if (!disk->zone_wplugs_pool)
return;
cancel_work_sync(&disk->zone_wplugs_work);
if (disk->zone_wplugs_wq) {

View File

@ -314,7 +314,7 @@ static int read_sed_opal_key(const char *key_name, u_char *buffer, int buflen)
&key_type_user, key_name, true);
if (IS_ERR(kref))
ret = PTR_ERR(kref);
return PTR_ERR(kref);
key = key_ref_to_ptr(kref);
down_read(&key->sem);

View File

@ -302,6 +302,21 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
return 0;
}
static void loop_clear_limits(struct loop_device *lo, int mode)
{
struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
if (mode & FALLOC_FL_ZERO_RANGE)
lim.max_write_zeroes_sectors = 0;
if (mode & FALLOC_FL_PUNCH_HOLE) {
lim.max_hw_discard_sectors = 0;
lim.discard_granularity = 0;
}
queue_limits_commit_update(lo->lo_queue, &lim);
}
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
int mode)
{
@ -320,6 +335,14 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
return -EIO;
/*
* We initially configure the limits in a hope that fallocate is
* supported and clear them here if that turns out not to be true.
*/
if (unlikely(ret == -EOPNOTSUPP))
loop_clear_limits(lo, mode);
return ret;
}

View File

@ -589,10 +589,11 @@ static inline int was_interrupted(int result)
}
/*
* Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns
* -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed.
* Returns BLK_STS_RESOURCE if the caller should retry after a delay.
* Returns BLK_STS_IOERR if sending failed.
*/
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
struct nbd_config *config = nbd->config;
@ -614,13 +615,13 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
type = req_to_nbd_cmd_type(req);
if (type == U32_MAX)
return -EIO;
return BLK_STS_IOERR;
if (rq_data_dir(req) == WRITE &&
(config->flags & NBD_FLAG_READ_ONLY)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Write on read-only\n");
return -EIO;
return BLK_STS_IOERR;
}
if (req->cmd_flags & REQ_FUA)
@ -674,11 +675,11 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
nsock->sent = sent;
}
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return (__force int)BLK_STS_RESOURCE;
return BLK_STS_RESOURCE;
}
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Send control failed (result %d)\n", result);
return -EAGAIN;
goto requeue;
}
send_pages:
if (type != NBD_CMD_WRITE)
@ -715,12 +716,12 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
nsock->pending = req;
nsock->sent = sent;
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return (__force int)BLK_STS_RESOURCE;
return BLK_STS_RESOURCE;
}
dev_err(disk_to_dev(nbd->disk),
"Send data failed (result %d)\n",
result);
return -EAGAIN;
goto requeue;
}
/*
* The completion might already have come in,
@ -737,7 +738,16 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
trace_nbd_payload_sent(req, handle);
nsock->pending = NULL;
nsock->sent = 0;
return 0;
__set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
return BLK_STS_OK;
requeue:
/* retry on a different socket */
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
nbd_requeue_cmd(cmd);
return BLK_STS_OK;
}
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
@ -1018,7 +1028,7 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
struct nbd_device *nbd = cmd->nbd;
struct nbd_config *config;
struct nbd_sock *nsock;
int ret;
blk_status_t ret;
lockdep_assert_held(&cmd->lock);
@ -1072,28 +1082,11 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
ret = BLK_STS_OK;
goto out;
}
/*
* Some failures are related to the link going down, so anything that
* returns EAGAIN can be retried on a different socket.
*/
ret = nbd_send_cmd(nbd, cmd, index);
/*
* Access to this flag is protected by cmd->lock, thus it's safe to set
* the flag after nbd_send_cmd() succeed to send request to server.
*/
if (!ret)
__set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
else if (ret == -EAGAIN) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
nbd_requeue_cmd(cmd);
ret = BLK_STS_OK;
}
out:
mutex_unlock(&nsock->tx_lock);
nbd_config_put(nbd);
return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret;
return ret;
}
static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,

View File

@ -998,6 +998,7 @@ void nvme_cleanup_cmd(struct request *req)
clear_bit_unlock(0, &ctrl->discard_page_busy);
else
kfree(bvec_virt(&req->special_vec));
req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
@ -3959,12 +3960,13 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
mutex_lock(&ctrl->namespaces_lock);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
if (ns->head->ns_id > nsid)
list_splice_init_rcu(&ns->list, &rm_list,
synchronize_rcu);
if (ns->head->ns_id > nsid) {
list_del_rcu(&ns->list);
synchronize_srcu(&ctrl->srcu);
list_add_tail_rcu(&ns->list, &rm_list);
}
}
mutex_unlock(&ctrl->namespaces_lock);
synchronize_srcu(&ctrl->srcu);
list_for_each_entry_safe(ns, next, &rm_list, list)
nvme_ns_remove(ns);

View File

@ -111,6 +111,13 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
return req;
}
static void nvme_unmap_bio(struct bio *bio)
{
if (bio_integrity(bio))
bio_integrity_unmap_free_user(bio);
blk_rq_unmap_user(bio);
}
static int nvme_map_user_request(struct request *req, u64 ubuffer,
unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags)
@ -157,7 +164,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
out_unmap:
if (bio)
blk_rq_unmap_user(bio);
nvme_unmap_bio(bio);
out:
blk_mq_free_request(req);
return ret;
@ -195,7 +202,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (bio)
blk_rq_unmap_user(bio);
nvme_unmap_bio(bio);
blk_mq_free_request(req);
if (effects)
@ -406,7 +413,7 @@ static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd,
struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
if (pdu->bio)
blk_rq_unmap_user(pdu->bio);
nvme_unmap_bio(pdu->bio);
io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags);
}
@ -432,7 +439,7 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
*/
if (blk_rq_is_poll(req)) {
if (pdu->bio)
blk_rq_unmap_user(pdu->bio);
nvme_unmap_bio(pdu->bio);
io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
} else {
io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);

View File

@ -957,6 +957,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->metadata_sg_cnt = 0;
req->transfer_len = 0;
req->metadata_len = 0;
req->cqe->result.u64 = 0;
req->cqe->status = 0;
req->cqe->sq_head = 0;
req->ns = NULL;

View File

@ -333,7 +333,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
__func__, ctrl->cntlid, req->sq->qid,
status, req->error_loc);
req->cqe->result.u64 = 0;
if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
@ -516,8 +515,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, d, al);
kfree(d);
done:
req->cqe->result.u64 = 0;
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
nvmet_auth_sq_free(req->sq);
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {

View File

@ -226,9 +226,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
if (status)
goto out;
/* zero out initial completion result, assign values as needed */
req->cqe->result.u32 = 0;
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
@ -305,9 +302,6 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (status)
goto out;
/* zero out initial completion result, assign values as needed */
req->cqe->result.u32 = 0;
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));

View File

@ -226,13 +226,13 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
req->cmd->common.opcode == nvme_admin_identify) {
switch (req->cmd->identify.cns) {
case NVME_ID_CNS_CTRL:
nvmet_passthru_override_id_ctrl(req);
status = nvmet_passthru_override_id_ctrl(req);
break;
case NVME_ID_CNS_NS:
nvmet_passthru_override_id_ns(req);
status = nvmet_passthru_override_id_ns(req);
break;
case NVME_ID_CNS_NS_DESC_LIST:
nvmet_passthru_override_id_descs(req);
status = nvmet_passthru_override_id_descs(req);
break;
}
} else if (status < 0)

View File

@ -731,6 +731,7 @@ static inline bool bioset_initialized(struct bio_set *bs)
bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed);
void bio_integrity_unmap_free_user(struct bio *bio);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern bool bio_integrity_prep(struct bio *);
@ -807,6 +808,9 @@ static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf,
{
return -EINVAL;
}
static inline void bio_integrity_unmap_free_user(struct bio *bio)
{
}
#endif /* CONFIG_BLK_DEV_INTEGRITY */