blk-integrity: improved sg segment mapping

Make the integrity mapping more like data mapping, blk_rq_map_sg. Use
the request to validate the segment count, and update the callers so
they don't have to.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Link: https://lore.kernel.org/r/20240913191746.2628196-1-kbusch@meta.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Keith Busch 2024-09-13 12:17:46 -07:00 committed by Jens Axboe
parent db5197b554
commit 76c313f658
4 changed files with 18 additions and 18 deletions

View file

@ -62,19 +62,20 @@ int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
* *
* Description: Map the integrity vectors in request into a * Description: Map the integrity vectors in request into a
* scatterlist. The scatterlist must be big enough to hold all * scatterlist. The scatterlist must be big enough to hold all
* elements. I.e. sized using blk_rq_count_integrity_sg(). * elements. I.e. sized using blk_rq_count_integrity_sg() or
* rq->nr_integrity_segments.
*/ */
int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
struct scatterlist *sglist)
{ {
struct bio_vec iv, ivprv = { NULL }; struct bio_vec iv, ivprv = { NULL };
struct request_queue *q = rq->q;
struct scatterlist *sg = NULL; struct scatterlist *sg = NULL;
struct bio *bio = rq->bio;
unsigned int segments = 0; unsigned int segments = 0;
struct bvec_iter iter; struct bvec_iter iter;
int prev = 0; int prev = 0;
bio_for_each_integrity_vec(iv, bio, iter) { bio_for_each_integrity_vec(iv, bio, iter) {
if (prev) { if (prev) {
if (!biovec_phys_mergeable(q, &ivprv, &iv)) if (!biovec_phys_mergeable(q, &ivprv, &iv))
goto new_segment; goto new_segment;
@ -102,6 +103,12 @@ int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
if (sg) if (sg)
sg_mark_end(sg); sg_mark_end(sg);
/*
* Something must have been wrong if the figured number of segment
* is bigger than number of req's physical integrity segments
*/
BUG_ON(segments > rq->nr_integrity_segments);
BUG_ON(segments > queue_max_integrity_segments(q));
return segments; return segments;
} }
EXPORT_SYMBOL(blk_rq_map_integrity_sg); EXPORT_SYMBOL(blk_rq_map_integrity_sg);

View file

@ -1504,8 +1504,8 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
goto out_unmap_sg; goto out_unmap_sg;
} }
req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q, req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq,
rq->bio, req->metadata_sgl->sg_table.sgl); req->metadata_sgl->sg_table.sgl);
*pi_count = ib_dma_map_sg(ibdev, *pi_count = ib_dma_map_sg(ibdev,
req->metadata_sgl->sg_table.sgl, req->metadata_sgl->sg_table.sgl,
req->metadata_sgl->nents, req->metadata_sgl->nents,

View file

@ -1163,7 +1163,6 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
if (blk_integrity_rq(rq)) { if (blk_integrity_rq(rq)) {
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
int ivecs;
if (WARN_ON_ONCE(!prot_sdb)) { if (WARN_ON_ONCE(!prot_sdb)) {
/* /*
@ -1175,19 +1174,15 @@ blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd)
goto out_free_sgtables; goto out_free_sgtables;
} }
ivecs = rq->nr_integrity_segments; if (sg_alloc_table_chained(&prot_sdb->table,
if (sg_alloc_table_chained(&prot_sdb->table, ivecs, rq->nr_integrity_segments,
prot_sdb->table.sgl, prot_sdb->table.sgl,
SCSI_INLINE_PROT_SG_CNT)) { SCSI_INLINE_PROT_SG_CNT)) {
ret = BLK_STS_RESOURCE; ret = BLK_STS_RESOURCE;
goto out_free_sgtables; goto out_free_sgtables;
} }
count = blk_rq_map_integrity_sg(rq->q, rq->bio, count = blk_rq_map_integrity_sg(rq, prot_sdb->table.sgl);
prot_sdb->table.sgl);
BUG_ON(count > ivecs);
BUG_ON(count > queue_max_integrity_segments(rq->q));
cmd->prot_sdb = prot_sdb; cmd->prot_sdb = prot_sdb;
cmd->prot_sdb->table.nents = count; cmd->prot_sdb->table.nents = count;
} }

View file

@ -25,8 +25,7 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t,
} }
#ifdef CONFIG_BLK_DEV_INTEGRITY #ifdef CONFIG_BLK_DEV_INTEGRITY
int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
struct scatterlist *);
int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
ssize_t bytes, u32 seed); ssize_t bytes, u32 seed);
@ -98,8 +97,7 @@ static inline int blk_rq_count_integrity_sg(struct request_queue *q,
{ {
return 0; return 0;
} }
static inline int blk_rq_map_integrity_sg(struct request_queue *q, static inline int blk_rq_map_integrity_sg(struct request *q,
struct bio *b,
struct scatterlist *s) struct scatterlist *s)
{ {
return 0; return 0;