graid3: Fix teardown races

Port commit dc399583ba from g_mirror, which has an effectively
identical startup sequence.

This fixes a race that was occasionally causing panics during GEOM test
suite runs on riscv.

MFC after:	1 month
This commit is contained in:
Mark Johnston 2023-11-02 14:35:37 -04:00
parent 71b77a7172
commit 33cb9b3c3a
2 changed files with 45 additions and 36 deletions

View file

@ -104,6 +104,7 @@ static int g_raid3_destroy_geom(struct gctl_req *req, struct g_class *mp,
static g_taste_t g_raid3_taste;
static void g_raid3_init(struct g_class *mp);
static void g_raid3_fini(struct g_class *mp);
static void g_raid3_providergone(struct g_provider *pp);
struct g_class g_raid3_class = {
.name = G_RAID3_CLASS_NAME,
@ -112,7 +113,8 @@ struct g_class g_raid3_class = {
.taste = g_raid3_taste,
.destroy_geom = g_raid3_destroy_geom,
.init = g_raid3_init,
.fini = g_raid3_fini
.fini = g_raid3_fini,
.providergone = g_raid3_providergone,
};
static void g_raid3_destroy_provider(struct g_raid3_softc *sc);
@ -605,6 +607,34 @@ g_raid3_destroy_disk(struct g_raid3_disk *disk)
disk->d_state = G_RAID3_DISK_STATE_NODISK;
}
static void
g_raid3_free_device(struct g_raid3_softc *sc)
{
KASSERT(sc->sc_refcnt == 0,
("%s: non-zero refcount %u", __func__, sc->sc_refcnt));
if (!g_raid3_use_malloc) {
uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
}
mtx_destroy(&sc->sc_queue_mtx);
mtx_destroy(&sc->sc_events_mtx);
sx_xunlock(&sc->sc_lock);
sx_destroy(&sc->sc_lock);
free(sc->sc_disks, M_RAID3);
free(sc, M_RAID3);
}
static void
g_raid3_providergone(struct g_provider *pp)
{
struct g_raid3_softc *sc = pp->private;
if (--sc->sc_refcnt == 0)
g_raid3_free_device(sc);
}
static void
g_raid3_destroy_device(struct g_raid3_softc *sc)
{
@ -649,16 +679,9 @@ g_raid3_destroy_device(struct g_raid3_softc *sc)
g_wither_geom(sc->sc_sync.ds_geom, ENXIO);
G_RAID3_DEBUG(0, "Device %s destroyed.", gp->name);
g_wither_geom(gp, ENXIO);
if (--sc->sc_refcnt == 0)
g_raid3_free_device(sc);
g_topology_unlock();
if (!g_raid3_use_malloc) {
uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
}
mtx_destroy(&sc->sc_queue_mtx);
mtx_destroy(&sc->sc_events_mtx);
sx_xunlock(&sc->sc_lock);
sx_destroy(&sc->sc_lock);
}
static void
@ -1049,7 +1072,7 @@ g_raid3_scatter(struct bio *pbp)
off_t atom, cadd, padd, left;
int first;
sc = pbp->bio_to->geom->softc;
sc = pbp->bio_to->private;
bp = NULL;
if ((pbp->bio_pflags & G_RAID3_BIO_PFLAG_NOPARITY) == 0) {
/*
@ -1118,7 +1141,7 @@ g_raid3_gather(struct bio *pbp)
struct bio *xbp, *fbp, *cbp;
off_t atom, cadd, padd, left;
sc = pbp->bio_to->geom->softc;
sc = pbp->bio_to->private;
/*
* Find bio for which we have to calculate data.
* While going through this path, check if all requests
@ -1284,7 +1307,7 @@ g_raid3_regular_request(struct bio *cbp)
g_topology_assert_not();
pbp = cbp->bio_parent;
sc = pbp->bio_to->geom->softc;
sc = pbp->bio_to->private;
cbp->bio_from->index--;
if (cbp->bio_cmd == BIO_WRITE)
sc->sc_writes--;
@ -1431,7 +1454,7 @@ g_raid3_start(struct bio *bp)
{
struct g_raid3_softc *sc;
sc = bp->bio_to->geom->softc;
sc = bp->bio_to->private;
/*
* If sc == NULL or there are no valid disks, provider's error
* should be set and g_raid3_start() should not be called at all.
@ -1780,7 +1803,7 @@ g_raid3_register_request(struct bio *pbp)
int round_robin, verify;
ndisks = 0;
sc = pbp->bio_to->geom->softc;
sc = pbp->bio_to->private;
if ((pbp->bio_cflags & G_RAID3_BIO_CFLAG_REGSYNC) != 0 &&
sc->sc_syncdisk == NULL) {
g_io_deliver(pbp, EIO);
@ -2008,9 +2031,7 @@ g_raid3_try_destroy(struct g_raid3_softc *sc)
sc->sc_worker = NULL;
} else {
g_topology_unlock();
g_raid3_destroy_device(sc);
free(sc->sc_disks, M_RAID3);
free(sc, M_RAID3);
g_raid3_free_device(sc);
}
return (1);
}
@ -2344,6 +2365,8 @@ g_raid3_launch_provider(struct g_raid3_softc *sc)
}
pp->stripesize *= sc->sc_ndisks - 1;
pp->stripeoffset *= sc->sc_ndisks - 1;
pp->private = sc;
sc->sc_refcnt++;
sc->sc_provider = pp;
g_error_provider(pp, 0);
g_topology_unlock();
@ -3089,9 +3112,7 @@ g_raid3_access(struct g_provider *pp, int acr, int acw, int ace)
G_RAID3_DEBUG(2, "Access request for %s: r%dw%de%d.", pp->name, acr,
acw, ace);
sc = pp->geom->softc;
if (sc == NULL && acr <= 0 && acw <= 0 && ace <= 0)
return (0);
sc = pp->private;
KASSERT(sc != NULL, ("NULL softc (provider=%s).", pp->name));
dcr = pp->acr + acr;
@ -3160,6 +3181,7 @@ g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md)
sc->sc_idle = 1;
sc->sc_last_write = time_uptime;
sc->sc_writes = 0;
sc->sc_refcnt = 1;
for (n = 0; n < sc->sc_ndisks; n++) {
sc->sc_disks[n].d_softc = sc;
sc->sc_disks[n].d_no = n;
@ -3215,18 +3237,8 @@ g_raid3_create(struct g_class *mp, const struct g_raid3_metadata *md)
if (error != 0) {
G_RAID3_DEBUG(1, "Cannot create kernel thread for %s.",
sc->sc_name);
if (!g_raid3_use_malloc) {
uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_64K].sz_zone);
uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_16K].sz_zone);
uma_zdestroy(sc->sc_zones[G_RAID3_ZONE_4K].sz_zone);
}
g_destroy_geom(sc->sc_sync.ds_geom);
mtx_destroy(&sc->sc_events_mtx);
mtx_destroy(&sc->sc_queue_mtx);
sx_destroy(&sc->sc_lock);
g_destroy_geom(sc->sc_geom);
free(sc->sc_disks, M_RAID3);
free(sc, M_RAID3);
g_raid3_free_device(sc);
return (NULL);
}
@ -3252,8 +3264,6 @@ g_raid3_destroy(struct g_raid3_softc *sc, int how)
struct g_provider *pp;
g_topology_assert_not();
if (sc == NULL)
return (ENXIO);
sx_assert(&sc->sc_lock, SX_XLOCKED);
pp = sc->sc_provider;
@ -3302,8 +3312,6 @@ g_raid3_destroy(struct g_raid3_softc *sc, int how)
G_RAID3_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker);
sx_xlock(&sc->sc_lock);
g_raid3_destroy_device(sc);
free(sc->sc_disks, M_RAID3);
free(sc, M_RAID3);
return (0);
}

View file

@ -219,6 +219,7 @@ struct g_raid3_softc {
int sc_idle; /* DIRTY flags removed. */
time_t sc_last_write;
u_int sc_writes;
u_int sc_refcnt; /* Number of softc references. */
TAILQ_HEAD(, g_raid3_event) sc_events;
struct mtx sc_events_mtx;