Introduce "soft" serseq variant.

With new ZFS prefetcher improvements it is no longer needed to fully
serialize reads to reach decent prediction hit rate.  Softer variant
only creates small time window to reduce races instead of completely
blocking following reads while previous is running.  It much less
hurts the performance in case of prediction miss.

MFC after:	1 month
This commit is contained in:
Alexander Motin 2021-04-06 17:27:16 -04:00
parent 13b3862ee8
commit ac503c194c
5 changed files with 32 additions and 18 deletions

View file

@ -13272,9 +13272,6 @@ ctl_serseq_done(union ctl_io *io)
{
struct ctl_lun *lun = CTL_LUN(io);
if (lun->be_lun->serseq == CTL_LUN_SERSEQ_OFF)
return;
/* This is racy, but should not be a problem. */
if (!TAILQ_EMPTY(&io->io_hdr.blocked_queue)) {
mtx_lock(&lun->lun_lock);

View file

@ -47,6 +47,7 @@
typedef enum {
CTL_LUN_SERSEQ_OFF,
CTL_LUN_SERSEQ_SOFT,
CTL_LUN_SERSEQ_READ,
CTL_LUN_SERSEQ_ON
} ctl_lun_serseq;

View file

@ -490,13 +490,12 @@ ctl_be_block_move_done(union ctl_io *io, bool samethr)
static void
ctl_be_block_biodone(struct bio *bio)
{
struct ctl_be_block_io *beio;
struct ctl_be_block_lun *be_lun;
struct ctl_be_block_io *beio = bio->bio_caller1;
struct ctl_be_block_lun *be_lun = beio->lun;
struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
union ctl_io *io;
int error;
beio = bio->bio_caller1;
be_lun = beio->lun;
io = beio->io;
DPRINTF("entered\n");
@ -576,7 +575,8 @@ ctl_be_block_biodone(struct bio *bio)
if ((ARGS(io)->flags & CTL_LLF_READ) &&
beio->beio_cont == NULL) {
ctl_set_success(&io->scsiio);
ctl_serseq_done(io);
if (cbe_lun->serseq >= CTL_LUN_SERSEQ_SOFT)
ctl_serseq_done(io);
}
ctl_datamove(io);
}
@ -636,6 +636,7 @@ static void
ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
struct ctl_be_block_io *beio)
{
struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
struct ctl_be_block_filedata *file_data;
union ctl_io *io;
struct uio xuio;
@ -679,6 +680,9 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
if (beio->bio_cmd == BIO_READ) {
vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
if (beio->beio_cont == NULL &&
cbe_lun->serseq == CTL_LUN_SERSEQ_SOFT)
ctl_serseq_done(io);
/*
* UFS pays attention to IO_DIRECT for reads. If the
* DIRECTIO option is configured into the kernel, it calls
@ -786,7 +790,8 @@ ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
if ((ARGS(io)->flags & CTL_LLF_READ) &&
beio->beio_cont == NULL) {
ctl_set_success(&io->scsiio);
ctl_serseq_done(io);
if (cbe_lun->serseq > CTL_LUN_SERSEQ_SOFT)
ctl_serseq_done(io);
}
ctl_datamove(io);
}
@ -863,6 +868,7 @@ static void
ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun,
struct ctl_be_block_io *beio)
{
struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
union ctl_io *io;
struct cdevsw *csw;
struct cdev *dev;
@ -904,9 +910,12 @@ ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun,
csw = devvn_refthread(be_lun->vn, &dev, &ref);
if (csw) {
if (beio->bio_cmd == BIO_READ)
if (beio->bio_cmd == BIO_READ) {
if (beio->beio_cont == NULL &&
cbe_lun->serseq == CTL_LUN_SERSEQ_SOFT)
ctl_serseq_done(io);
error = csw->d_read(dev, &xuio, flags);
else
} else
error = csw->d_write(dev, &xuio, flags);
dev_relthread(dev, ref);
} else
@ -952,7 +961,8 @@ ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun,
if ((ARGS(io)->flags & CTL_LLF_READ) &&
beio->beio_cont == NULL) {
ctl_set_success(&io->scsiio);
ctl_serseq_done(io);
if (cbe_lun->serseq > CTL_LUN_SERSEQ_SOFT)
ctl_serseq_done(io);
}
ctl_datamove(io);
}
@ -2187,12 +2197,14 @@ ctl_be_block_open(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
ctl_be_block_close(be_lun);
cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
if (be_lun->dispatch != ctl_be_block_dispatch_dev)
cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
cbe_lun->serseq = CTL_LUN_SERSEQ_SOFT;
value = dnvlist_get_string(cbe_lun->options, "serseq", NULL);
if (value != NULL && strcmp(value, "on") == 0)
cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
else if (value != NULL && strcmp(value, "read") == 0)
cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
else if (value != NULL && strcmp(value, "soft") == 0)
cbe_lun->serseq = CTL_LUN_SERSEQ_SOFT;
else if (value != NULL && strcmp(value, "off") == 0)
cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
return (0);

View file

@ -507,7 +507,8 @@ ctl_backend_ramdisk_rw(union ctl_io *io)
if ((ARGS(io)->flags & CTL_LLF_READ) &&
ARGS(io)->len <= PRIV(io)->len) {
ctl_set_success(&io->scsiio);
ctl_serseq_done(io);
if (cbe_lun->serseq >= CTL_LUN_SERSEQ_SOFT)
ctl_serseq_done(io);
}
ctl_datamove(io);
}
@ -1036,6 +1037,8 @@ ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
else if (value != NULL && strcmp(value, "read") == 0)
cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
else if (value != NULL && strcmp(value, "soft") == 0)
cbe_lun->serseq = CTL_LUN_SERSEQ_SOFT;
else if (value != NULL && strcmp(value, "off") == 0)
cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;

View file

@ -1,6 +1,6 @@
.\"
.\" Copyright (c) 2003 Silicon Graphics International Corp.
.\" Copyright (c) 2015-2020 Alexander Motin <mav@FreeBSD.org>
.\" Copyright (c) 2015-2021 Alexander Motin <mav@FreeBSD.org>
.\" Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org>
.\" All rights reserved.
.\"
@ -36,7 +36,7 @@
.\" $Id: //depot/users/kenm/FreeBSD-test2/usr.sbin/ctladm/ctladm.8#3 $
.\" $FreeBSD$
.\"
.Dd November 23, 2020
.Dd March 9, 2021
.Dt CTLADM 8
.Os
.Sh NAME
@ -919,8 +919,9 @@ appropriate commands and task attributes.
The default value is "restricted".
It improves data integrity, but may introduce some additional delays.
.It Va serseq
Set to "on" to serialize consecutive reads/writes.
Set to "read" to serialize consecutive reads.
Set to "on" to fully serialize consecutive reads/writes.
Set to "read" to fully serialize consecutive reads.
Set to "soft" to slightly serialize consecutive reads.
Set to "off" to allow them be issued in parallel.
Parallel issue of consecutive operations may confuse logic of the
backing file system, hurting performance; but it may improve performance