mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
for-linus-20190407
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAlyqUMgQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpjOSEACCRDhrbXsfVdMFAuRzmXjVqELuJ0Sk3zfC 0OR9EcNpr4s5Wp5ztX6xxcddvnvB8LZhAzs1R9JuiYG4EIHGP7CMBZ0JJlcygJkx nVM1p7bSl7H/zDVKF8KMj/J7rjwXfY9FMKAopiFVSkS0cA1oz+PK96cDR8m2xeuV l0b6zgorjmNpn3TukEbFjvAjqskKhm8Xtjn5/wBGeWUnqZE9AZeI9OovuK5BOSBm qAs7lVB+MACtpbSjv4yWGcfwtqYUt9PbnsTog95uXXQDR1BPnv/btjeGdzpVtNH1 iiueCXR3bNqnoBo6MLgzWpnvA6UHcygXOTmRy17BoNg7uqtWiFxZn0HKxMOUYD6F RU4RP7AVwpZeziMO8I7VkdfasgiKGetDzm8vCJ4QtKly/+3iwMVVKHPnU7nV/cCm EmoqM5BLAT6hHuSxGaNBVVNavvr/CFcqjk+29UEnK8ZQ4c/Mkgwgc6gPbq59lTLN Kn0AeB2kDeOvpJ5LWOjVmy7vfVQ3um65ohNl9KvtZZJsX3xQoIaH+i70YE+zpOHT czKZ9ZC7HPIJuanPoEbGz/c+Js5un4/Rn+Ry9fa/3k3IFcd9N2bOc/AIm5LiAm3I FmSonn+SWgLGlwhiZZBHB45za0Wwq6AGGyTQpyT/ijjX9ouHBzb94iRWOH9htZuF JZDjpyRqEw== =8et7 -----END PGP SIGNATURE----- Merge tag 'for-linus-20190407' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: - Fixups for the pf/pcd queue handling (YueHaibing) - Revert of the three direct issue changes as they have been proven to cause an issue with dm-mpath (Bart) - Plug rq_count reset fix (Dongli) - io_uring double free in fileset registration error handling (me) - Make null_blk handle bad numa node passed in (John) - BFQ ifdef fix (Konstantin) - Flush queue leak fix (Shenghui) - Plug trace fix (Yufen) * tag 'for-linus-20190407' of git://git.kernel.dk/linux-block: xsysace: Fix error handling in ace_setup null_blk: prevent crash from bad home_node value block: Revert v5.0 blk_mq_request_issue_directly() changes paride/pcd: Fix potential NULL pointer dereference and mem leak blk-mq: do not reset plug->rq_count before the list is sorted paride/pf: Fix potential NULL pointer dereference io_uring: fix double free in case of fileset regitration failure blk-mq: add trace block plug and unplug for multiple queues block: use blk_free_flush_queue() to free hctx->fq in blk_mq_init_hctx block/bfq: fix ifdef for CONFIG_BFQ_GROUP_IOSCHED=y
This commit is contained in:
commit
429fba106e
11 changed files with 110 additions and 75 deletions
|
@ -674,7 +674,7 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
|
|||
* at least two nodes.
|
||||
*/
|
||||
return !(varied_queue_weights || multiple_classes_busy
|
||||
#ifdef BFQ_GROUP_IOSCHED_ENABLED
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
|| bfqd->num_groups_with_pending_reqs > 0
|
||||
#endif
|
||||
);
|
||||
|
|
|
@ -1012,7 +1012,7 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
|
|||
entity->on_st = true;
|
||||
}
|
||||
|
||||
#ifdef BFQ_GROUP_IOSCHED_ENABLED
|
||||
#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||
if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
|
||||
struct bfq_group *bfqg =
|
||||
container_of(entity, struct bfq_group, entity);
|
||||
|
|
|
@ -1245,8 +1245,6 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
|
|||
*/
|
||||
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
|
||||
{
|
||||
blk_qc_t unused;
|
||||
|
||||
if (blk_cloned_rq_check_limits(q, rq))
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
|
@ -1262,7 +1260,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
|
|||
* bypass a potential scheduler on the bottom device for
|
||||
* insert.
|
||||
*/
|
||||
return blk_mq_try_issue_directly(rq->mq_hctx, rq, &unused, true, true);
|
||||
return blk_mq_request_issue_directly(rq, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
|
||||
|
||||
|
|
|
@ -423,10 +423,12 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
|
|||
* busy in case of 'none' scheduler, and this way may save
|
||||
* us one extra enqueue & dequeue to sw queue.
|
||||
*/
|
||||
if (!hctx->dispatch_busy && !e && !run_queue_async)
|
||||
if (!hctx->dispatch_busy && !e && !run_queue_async) {
|
||||
blk_mq_try_issue_list_directly(hctx, list);
|
||||
else
|
||||
blk_mq_insert_requests(hctx, ctx, list);
|
||||
if (list_empty(list))
|
||||
return;
|
||||
}
|
||||
blk_mq_insert_requests(hctx, ctx, list);
|
||||
}
|
||||
|
||||
blk_mq_run_hw_queue(hctx, run_queue_async);
|
||||
|
|
129
block/blk-mq.c
129
block/blk-mq.c
|
@ -1711,11 +1711,12 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|||
unsigned int depth;
|
||||
|
||||
list_splice_init(&plug->mq_list, &list);
|
||||
plug->rq_count = 0;
|
||||
|
||||
if (plug->rq_count > 2 && plug->multiple_queues)
|
||||
list_sort(NULL, &list, plug_rq_cmp);
|
||||
|
||||
plug->rq_count = 0;
|
||||
|
||||
this_q = NULL;
|
||||
this_hctx = NULL;
|
||||
this_ctx = NULL;
|
||||
|
@ -1800,74 +1801,76 @@ static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
|
|||
return ret;
|
||||
}
|
||||
|
||||
blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq,
|
||||
blk_qc_t *cookie,
|
||||
bool bypass, bool last)
|
||||
bool bypass_insert, bool last)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
bool run_queue = true;
|
||||
blk_status_t ret = BLK_STS_RESOURCE;
|
||||
int srcu_idx;
|
||||
bool force = false;
|
||||
|
||||
hctx_lock(hctx, &srcu_idx);
|
||||
/*
|
||||
* hctx_lock is needed before checking quiesced flag.
|
||||
* RCU or SRCU read lock is needed before checking quiesced flag.
|
||||
*
|
||||
* When queue is stopped or quiesced, ignore 'bypass', insert
|
||||
* and return BLK_STS_OK to caller, and avoid driver to try to
|
||||
* dispatch again.
|
||||
* When queue is stopped or quiesced, ignore 'bypass_insert' from
|
||||
* blk_mq_request_issue_directly(), and return BLK_STS_OK to caller,
|
||||
* and avoid driver to try to dispatch again.
|
||||
*/
|
||||
if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) {
|
||||
if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) {
|
||||
run_queue = false;
|
||||
bypass = false;
|
||||
goto out_unlock;
|
||||
bypass_insert = false;
|
||||
goto insert;
|
||||
}
|
||||
|
||||
if (unlikely(q->elevator && !bypass))
|
||||
goto out_unlock;
|
||||
if (q->elevator && !bypass_insert)
|
||||
goto insert;
|
||||
|
||||
if (!blk_mq_get_dispatch_budget(hctx))
|
||||
goto out_unlock;
|
||||
goto insert;
|
||||
|
||||
if (!blk_mq_get_driver_tag(rq)) {
|
||||
blk_mq_put_dispatch_budget(hctx);
|
||||
goto out_unlock;
|
||||
goto insert;
|
||||
}
|
||||
|
||||
/*
|
||||
* Always add a request that has been through
|
||||
*.queue_rq() to the hardware dispatch list.
|
||||
*/
|
||||
force = true;
|
||||
ret = __blk_mq_issue_directly(hctx, rq, cookie, last);
|
||||
out_unlock:
|
||||
return __blk_mq_issue_directly(hctx, rq, cookie, last);
|
||||
insert:
|
||||
if (bypass_insert)
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
blk_mq_request_bypass_insert(rq, run_queue);
|
||||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq, blk_qc_t *cookie)
|
||||
{
|
||||
blk_status_t ret;
|
||||
int srcu_idx;
|
||||
|
||||
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
|
||||
|
||||
hctx_lock(hctx, &srcu_idx);
|
||||
|
||||
ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
|
||||
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
|
||||
blk_mq_request_bypass_insert(rq, true);
|
||||
else if (ret != BLK_STS_OK)
|
||||
blk_mq_end_request(rq, ret);
|
||||
|
||||
hctx_unlock(hctx, srcu_idx);
|
||||
}
|
||||
|
||||
blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
|
||||
{
|
||||
blk_status_t ret;
|
||||
int srcu_idx;
|
||||
blk_qc_t unused_cookie;
|
||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||
|
||||
hctx_lock(hctx, &srcu_idx);
|
||||
ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last);
|
||||
hctx_unlock(hctx, srcu_idx);
|
||||
switch (ret) {
|
||||
case BLK_STS_OK:
|
||||
break;
|
||||
case BLK_STS_DEV_RESOURCE:
|
||||
case BLK_STS_RESOURCE:
|
||||
if (force) {
|
||||
blk_mq_request_bypass_insert(rq, run_queue);
|
||||
/*
|
||||
* We have to return BLK_STS_OK for the DM
|
||||
* to avoid livelock. Otherwise, we return
|
||||
* the real result to indicate whether the
|
||||
* request is direct-issued successfully.
|
||||
*/
|
||||
ret = bypass ? BLK_STS_OK : ret;
|
||||
} else if (!bypass) {
|
||||
blk_mq_sched_insert_request(rq, false,
|
||||
run_queue, false);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (!bypass)
|
||||
blk_mq_end_request(rq, ret);
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1875,20 +1878,22 @@ blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
|||
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct list_head *list)
|
||||
{
|
||||
blk_qc_t unused;
|
||||
blk_status_t ret = BLK_STS_OK;
|
||||
|
||||
while (!list_empty(list)) {
|
||||
blk_status_t ret;
|
||||
struct request *rq = list_first_entry(list, struct request,
|
||||
queuelist);
|
||||
|
||||
list_del_init(&rq->queuelist);
|
||||
if (ret == BLK_STS_OK)
|
||||
ret = blk_mq_try_issue_directly(hctx, rq, &unused,
|
||||
false,
|
||||
ret = blk_mq_request_issue_directly(rq, list_empty(list));
|
||||
if (ret != BLK_STS_OK) {
|
||||
if (ret == BLK_STS_RESOURCE ||
|
||||
ret == BLK_STS_DEV_RESOURCE) {
|
||||
blk_mq_request_bypass_insert(rq,
|
||||
list_empty(list));
|
||||
else
|
||||
blk_mq_sched_insert_request(rq, false, true, false);
|
||||
break;
|
||||
}
|
||||
blk_mq_end_request(rq, ret);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1896,7 +1901,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
|||
* the driver there was more coming, but that turned out to
|
||||
* be a lie.
|
||||
*/
|
||||
if (ret != BLK_STS_OK && hctx->queue->mq_ops->commit_rqs)
|
||||
if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs)
|
||||
hctx->queue->mq_ops->commit_rqs(hctx);
|
||||
}
|
||||
|
||||
|
@ -2003,19 +2008,21 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
plug->rq_count--;
|
||||
}
|
||||
blk_add_rq_to_plug(plug, rq);
|
||||
trace_block_plug(q);
|
||||
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
|
||||
if (same_queue_rq) {
|
||||
data.hctx = same_queue_rq->mq_hctx;
|
||||
trace_block_unplug(q, 1, true);
|
||||
blk_mq_try_issue_directly(data.hctx, same_queue_rq,
|
||||
&cookie, false, true);
|
||||
&cookie);
|
||||
}
|
||||
} else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
|
||||
!data.hctx->dispatch_busy)) {
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
blk_mq_bio_to_request(rq, bio);
|
||||
blk_mq_try_issue_directly(data.hctx, rq, &cookie, false, true);
|
||||
blk_mq_try_issue_directly(data.hctx, rq, &cookie);
|
||||
} else {
|
||||
blk_mq_put_ctx(data.ctx);
|
||||
blk_mq_bio_to_request(rq, bio);
|
||||
|
@ -2332,7 +2339,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
|
|||
return 0;
|
||||
|
||||
free_fq:
|
||||
kfree(hctx->fq);
|
||||
blk_free_flush_queue(hctx->fq);
|
||||
exit_hctx:
|
||||
if (set->ops->exit_hctx)
|
||||
set->ops->exit_hctx(hctx, hctx_idx);
|
||||
|
|
|
@ -70,10 +70,8 @@ void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
|
|||
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
|
||||
struct list_head *list);
|
||||
|
||||
blk_status_t blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *rq,
|
||||
blk_qc_t *cookie,
|
||||
bool bypass, bool last);
|
||||
/* Used by blk_insert_cloned_request() to issue request directly */
|
||||
blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
|
||||
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
|
||||
struct list_head *list);
|
||||
|
||||
|
|
|
@ -1748,6 +1748,11 @@ static int __init null_init(void)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
|
||||
pr_err("null_blk: invalid home_node value\n");
|
||||
g_home_node = NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
if (g_queue_mode == NULL_Q_RQ) {
|
||||
pr_err("null_blk: legacy IO path no longer available\n");
|
||||
return -EINVAL;
|
||||
|
|
|
@ -314,6 +314,7 @@ static void pcd_init_units(void)
|
|||
disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
|
||||
1, BLK_MQ_F_SHOULD_MERGE);
|
||||
if (IS_ERR(disk->queue)) {
|
||||
put_disk(disk);
|
||||
disk->queue = NULL;
|
||||
continue;
|
||||
}
|
||||
|
@ -750,6 +751,8 @@ static int pcd_detect(void)
|
|||
|
||||
printk("%s: No CD-ROM drive found\n", name);
|
||||
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
|
||||
if (!cd->disk)
|
||||
continue;
|
||||
blk_cleanup_queue(cd->disk->queue);
|
||||
cd->disk->queue = NULL;
|
||||
blk_mq_free_tag_set(&cd->tag_set);
|
||||
|
@ -1010,8 +1013,14 @@ static int __init pcd_init(void)
|
|||
pcd_probe_capabilities();
|
||||
|
||||
if (register_blkdev(major, name)) {
|
||||
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
|
||||
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
|
||||
if (!cd->disk)
|
||||
continue;
|
||||
|
||||
blk_cleanup_queue(cd->disk->queue);
|
||||
blk_mq_free_tag_set(&cd->tag_set);
|
||||
put_disk(cd->disk);
|
||||
}
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -1032,6 +1041,9 @@ static void __exit pcd_exit(void)
|
|||
int unit;
|
||||
|
||||
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
|
||||
if (!cd->disk)
|
||||
continue;
|
||||
|
||||
if (cd->present) {
|
||||
del_gendisk(cd->disk);
|
||||
pi_release(cd->pi);
|
||||
|
|
|
@ -762,6 +762,8 @@ static int pf_detect(void)
|
|||
|
||||
printk("%s: No ATAPI disk detected\n", name);
|
||||
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
|
||||
if (!pf->disk)
|
||||
continue;
|
||||
blk_cleanup_queue(pf->disk->queue);
|
||||
pf->disk->queue = NULL;
|
||||
blk_mq_free_tag_set(&pf->tag_set);
|
||||
|
@ -1029,8 +1031,13 @@ static int __init pf_init(void)
|
|||
pf_busy = 0;
|
||||
|
||||
if (register_blkdev(major, name)) {
|
||||
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
|
||||
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
|
||||
if (!pf->disk)
|
||||
continue;
|
||||
blk_cleanup_queue(pf->disk->queue);
|
||||
blk_mq_free_tag_set(&pf->tag_set);
|
||||
put_disk(pf->disk);
|
||||
}
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -1051,6 +1058,9 @@ static void __exit pf_exit(void)
|
|||
int unit;
|
||||
unregister_blkdev(major, name);
|
||||
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
|
||||
if (!pf->disk)
|
||||
continue;
|
||||
|
||||
if (pf->present)
|
||||
del_gendisk(pf->disk);
|
||||
|
||||
|
|
|
@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace)
|
|||
return 0;
|
||||
|
||||
err_read:
|
||||
/* prevent double queue cleanup */
|
||||
ace->gd->queue = NULL;
|
||||
put_disk(ace->gd);
|
||||
err_alloc_disk:
|
||||
blk_cleanup_queue(ace->queue);
|
||||
|
|
|
@ -2215,6 +2215,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
|
|||
fput(ctx->user_files[i]);
|
||||
|
||||
kfree(ctx->user_files);
|
||||
ctx->user_files = NULL;
|
||||
ctx->nr_user_files = 0;
|
||||
return ret;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue