/*- * Copyright (c) 2004 Pawel Jakub Dawidek * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(M_MIRROR, "mirror data", "GEOM_MIRROR Data"); SYSCTL_DECL(_kern_geom); SYSCTL_NODE(_kern_geom, OID_AUTO, mirror, CTLFLAG_RW, 0, "GEOM_MIRROR stuff"); static u_int g_mirror_debug = 4; SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, debug, CTLFLAG_RW, &g_mirror_debug, 0, "Debug level"); static u_int g_mirror_sync_block_size = 131072; SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, sync_block_size, CTLFLAG_RW, &g_mirror_sync_block_size, 0, "Synchronization block size"); static u_int g_mirror_timeout = 8; SYSCTL_UINT(_kern_geom_mirror, OID_AUTO, timeout, CTLFLAG_RW, &g_mirror_timeout, 0, "Time to wait on all mirror components"); static int g_mirror_destroy_geom(struct gctl_req *req, struct g_class *mp, struct g_geom *gp); static g_taste_t g_mirror_taste; struct g_class g_mirror_class = { .name = G_MIRROR_CLASS_NAME, .ctlreq = g_mirror_config, .taste = g_mirror_taste, .destroy_geom = g_mirror_destroy_geom }; static void g_mirror_destroy_disk(struct g_mirror_disk *disk); static void g_mirror_destroy_provider(struct g_mirror_softc *sc); static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state); static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force); static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp); static void g_mirror_sync_stop(struct g_mirror_disk *disk); static const char * g_mirror_disk_state2str(int state) { switch (state) { case G_MIRROR_DISK_STATE_NONE: return ("NONE"); case G_MIRROR_DISK_STATE_NEW: return ("NEW"); case G_MIRROR_DISK_STATE_ACTIVE: return ("ACTIVE"); case G_MIRROR_DISK_STATE_SYNCHRONIZING: return ("SYNCHRONIZING"); case G_MIRROR_DISK_STATE_DISCONNECTED: return ("DISCONNECTED"); case G_MIRROR_DISK_STATE_DESTROY: return ("DESTROY"); default: return ("INVALID"); } } static const char * g_mirror_device_state2str(int state) { switch (state) { case G_MIRROR_DEVICE_STATE_STARTING: return ("STARTING"); case G_MIRROR_DEVICE_STATE_RUNNING: return ("RUNNING"); default: return ("INVALID"); } } static const char * g_mirror_get_diskname(struct g_mirror_disk *disk) { if (disk->d_consumer == NULL || disk->d_consumer->provider == NULL) return ("[unknown]"); return (disk->d_name); } /* * --- Events handling functions --- * Events in geom_mirror are used to maintain disks and device status * from one thread to simplify locking. */ static void g_mirror_event_free(struct g_mirror_event *ep) { free(ep, M_MIRROR); } int g_mirror_event_send(void *arg, int state, int flags) { struct g_mirror_softc *sc; struct g_mirror_disk *disk; struct g_mirror_event *ep; int error; ep = malloc(sizeof(*ep), M_MIRROR, M_WAITOK); G_MIRROR_DEBUG(4, "%s: Sending event %p.", __func__, ep); if ((flags & G_MIRROR_EVENT_DEVICE) != 0) { disk = NULL; sc = arg; } else { disk = arg; sc = disk->d_softc; } ep->e_disk = disk; ep->e_state = state; ep->e_flags = flags; ep->e_error = 0; mtx_lock(&sc->sc_events_mtx); TAILQ_INSERT_TAIL(&sc->sc_events, ep, e_next); mtx_unlock(&sc->sc_events_mtx); G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); mtx_lock(&sc->sc_queue_mtx); wakeup(sc); mtx_unlock(&sc->sc_queue_mtx); if ((flags & G_MIRROR_EVENT_DONTWAIT) != 0) return (0); g_topology_assert(); G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, ep); g_topology_unlock(); while ((ep->e_flags & G_MIRROR_EVENT_DONE) == 0) { mtx_lock(&sc->sc_events_mtx); msleep(ep, &sc->sc_events_mtx, PRIBIO | PDROP, "mirror_event_wait", hz * 5); } g_topology_lock(); G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, ep); error = ep->e_error; g_mirror_event_free(ep); return (error); } static struct g_mirror_event * g_mirror_event_get(struct g_mirror_softc *sc) { struct g_mirror_event *ep; mtx_lock(&sc->sc_events_mtx); ep = TAILQ_FIRST(&sc->sc_events); if (ep != NULL) TAILQ_REMOVE(&sc->sc_events, ep, e_next); mtx_unlock(&sc->sc_events_mtx); return (ep); } static void g_mirror_event_cancel(struct g_mirror_disk *disk) { struct g_mirror_softc *sc; struct g_mirror_event *ep, *tmpep; g_topology_assert(); sc = disk->d_softc; mtx_lock(&sc->sc_events_mtx); TAILQ_FOREACH_SAFE(ep, &sc->sc_events, e_next, tmpep) { if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) continue; if (ep->e_disk != disk) continue; TAILQ_REMOVE(&sc->sc_events, ep, e_next); if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) g_mirror_event_free(ep); else { ep->e_error = ECANCELED; wakeup(ep); } } mtx_unlock(&sc->sc_events_mtx); } /* * Return the number of disks in given state. * If state is equal to -1, count all connected disks. */ u_int g_mirror_ndisks(struct g_mirror_softc *sc, int state) { struct g_mirror_disk *disk; u_int n = 0; LIST_FOREACH(disk, &sc->sc_disks, d_next) { if (state == -1 || disk->d_state == state) n++; } return (n); } /* * Find a disk in mirror by its disk ID. */ static struct g_mirror_disk * g_mirror_id2disk(struct g_mirror_softc *sc, uint32_t id) { struct g_mirror_disk *disk; LIST_FOREACH(disk, &sc->sc_disks, d_next) { if (disk->d_id == id) return (disk); } return (NULL); } static int g_mirror_connect_disk(struct g_mirror_disk *disk, struct g_provider *pp) { int error; g_topology_assert(); error = g_attach(disk->d_consumer, pp); if (error != 0) return (error); G_MIRROR_DEBUG(3, "Disk %s connected.", g_mirror_get_diskname(disk)); /* * Gain full disk access right now. * XXX: We don't support read-only devices. */ error = g_access(disk->d_consumer, 1, 1, 1); if (error != 0) return (error); G_MIRROR_DEBUG(3, "Disk %s opened.", g_mirror_get_diskname(disk)); return (0); } static void g_mirror_disconnect_disk(struct g_mirror_disk *disk) { struct g_consumer *cp; g_topology_assert(); cp = disk->d_consumer; if (cp->acr != 0 || cp->acw != 0 || cp->ace != 0) g_access(cp, -cp->ace, -cp->acw, -cp->ace); if (cp->provider != NULL) g_detach(cp); } /* * Initialize disk. This means allocate memory, create consumer, attach it * to the provider and open access (r1w1e1) to it. */ static struct g_mirror_disk * g_mirror_init_disk(struct g_mirror_softc *sc, struct g_provider *pp, struct g_mirror_metadata *md, int *errorp) { struct g_mirror_disk *disk; int error; disk = malloc(sizeof(*disk), M_MIRROR, M_NOWAIT | M_ZERO); if (disk == NULL) { error = ENOMEM; goto fail; } disk->d_softc = sc; disk->d_consumer = g_new_consumer(sc->sc_geom); disk->d_consumer->private = disk; error = g_mirror_connect_disk(disk, pp); if (error != 0) goto fail; disk->d_id = md->md_did; disk->d_count = 0; disk->d_state = G_MIRROR_DISK_STATE_NONE; disk->d_flags = md->md_flags & G_MIRROR_DISK_FLAG_MASK; disk->d_sync.ds_consumer = NULL; disk->d_sync.ds_offset = md->md_sync_offset; disk->d_sync.ds_offset_done = md->md_sync_offset; disk->d_sync.ds_syncid = md->md_syncid; if (errorp != NULL) *errorp = 0; return (disk); fail: if (errorp != NULL) *errorp = error; if (disk != NULL) { if (disk->d_consumer != NULL) { g_mirror_disconnect_disk(disk); g_destroy_consumer(disk->d_consumer); } free(disk, M_MIRROR); } return (NULL); } /* * Free the disk. */ static void g_mirror_free_disk(struct g_mirror_disk *disk) { g_topology_assert(); g_mirror_disconnect_disk(disk); g_destroy_consumer(disk->d_consumer); free(disk, M_MIRROR); } static void g_mirror_destroy_device(struct g_mirror_softc *sc) { struct g_mirror_disk *disk; struct g_mirror_event *ep; struct g_geom *gp; g_topology_assert(); gp = sc->sc_geom; if (sc->sc_provider != NULL) g_mirror_destroy_provider(sc); for (disk = LIST_FIRST(&sc->sc_disks); disk != NULL; disk = LIST_FIRST(&sc->sc_disks)) { g_mirror_destroy_disk(disk); } while ((ep = g_mirror_event_get(sc)) != NULL) { if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) g_mirror_event_free(ep); else { ep->e_error = ECANCELED; ep->e_flags |= G_MIRROR_EVENT_DONE; G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); mtx_lock(&sc->sc_events_mtx); wakeup(ep); mtx_unlock(&sc->sc_events_mtx); } } callout_drain(&sc->sc_callout); gp->softc = NULL; uma_zdestroy(sc->sc_sync.ds_zone); g_wither_geom(sc->sc_sync.ds_geom, ENXIO); mtx_destroy(&sc->sc_queue_mtx); mtx_destroy(&sc->sc_events_mtx); G_MIRROR_DEBUG(0, "Device %s destroyed.", gp->name); g_wither_geom(gp, ENXIO); } static void g_mirror_destroy_disk(struct g_mirror_disk *disk) { struct g_mirror_softc *sc; g_topology_assert(); LIST_REMOVE(disk, d_next); g_mirror_event_cancel(disk); sc = disk->d_softc; if (sc->sc_hint == disk) sc->sc_hint = NULL; switch (disk->d_state) { case G_MIRROR_DISK_STATE_SYNCHRONIZING: g_mirror_sync_stop(disk); /* FALLTHROUGH */ case G_MIRROR_DISK_STATE_NEW: case G_MIRROR_DISK_STATE_ACTIVE: g_mirror_free_disk(disk); break; default: KASSERT(0 == 1, ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state))); } } static void g_mirror_orphan(struct g_consumer *cp) { struct g_mirror_disk *disk; disk = cp->private; g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, G_MIRROR_EVENT_DONTWAIT); } /* * Function should return the next active disk on list. * It is possible that it will be the same disk as given. * If there are no active disks on list, NULL is returned. */ static __inline struct g_mirror_disk * g_mirror_find_next(struct g_mirror_softc *sc, struct g_mirror_disk *disk) { struct g_mirror_disk *dp; for (dp = LIST_NEXT(disk, d_next); dp != disk; dp = LIST_NEXT(dp, d_next)) { if (dp == NULL) dp = LIST_FIRST(&sc->sc_disks); if (dp->d_state == G_MIRROR_DISK_STATE_ACTIVE) break; } if (dp->d_state != G_MIRROR_DISK_STATE_ACTIVE) return (NULL); return (dp); } static struct g_mirror_disk * g_mirror_get_disk(struct g_mirror_softc *sc) { struct g_mirror_disk *disk; if (sc->sc_hint == NULL) { sc->sc_hint = LIST_FIRST(&sc->sc_disks); if (sc->sc_hint == NULL) return (NULL); } disk = sc->sc_hint; if (disk->d_state != G_MIRROR_DISK_STATE_ACTIVE) { disk = g_mirror_find_next(sc, disk); if (disk == NULL) return (NULL); } sc->sc_hint = g_mirror_find_next(sc, disk); return (disk); } static int g_mirror_clear_metadata(struct g_mirror_disk *disk) { struct g_mirror_softc *sc; off_t offset, length; u_char *sector; int error; sc = disk->d_softc; KASSERT(disk->d_consumer != NULL, ("NULL consumer (%s).", sc->sc_geom->name)); KASSERT(disk->d_consumer->provider != NULL, ("NULL provider (%s).", sc->sc_geom->name)); length = disk->d_consumer->provider->sectorsize; offset = disk->d_consumer->provider->mediasize - length; sector = malloc((size_t)length, M_MIRROR, M_NOWAIT | M_ZERO); if (sector == NULL) return (ENOMEM); error = g_write_data(disk->d_consumer, offset, sector, length); free(sector, M_MIRROR); if (error != 0) { G_MIRROR_DEBUG(0, "Cannot clear metadata on disk %s.", g_mirror_get_diskname(disk)); g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, G_MIRROR_EVENT_DONTWAIT); return (error); } G_MIRROR_DEBUG(4, "Metadata on %s cleared.", g_mirror_get_diskname(disk)); return (0); } void g_mirror_fill_metadata(struct g_mirror_softc *sc, struct g_mirror_disk *disk, struct g_mirror_metadata *md) { strlcpy(md->md_magic, G_MIRROR_MAGIC, sizeof(md->md_magic)); md->md_version = G_MIRROR_VERSION; strlcpy(md->md_name, sc->sc_name, sizeof(md->md_name)); md->md_mid = sc->sc_id; md->md_all = sc->sc_ndisks; md->md_slice = sc->sc_slice; md->md_balance = sc->sc_balance; md->md_mediasize = sc->sc_mediasize; md->md_sectorsize = sc->sc_sectorsize; md->md_flags = sc->sc_flags; if (disk == NULL) { md->md_did = arc4random(); md->md_syncid = 0; md->md_sync_offset = 0; } else { md->md_did = disk->d_id; md->md_syncid = disk->d_sync.ds_syncid; md->md_flags |= disk->d_flags; if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) md->md_sync_offset = disk->d_sync.ds_offset_done; else md->md_sync_offset = 0; } } void g_mirror_update_metadata(struct g_mirror_disk *disk) { struct g_mirror_softc *sc; struct g_mirror_metadata md; off_t offset, length; u_char *sector; int error; sc = disk->d_softc; KASSERT(disk->d_consumer != NULL, ("NULL consumer (%s).", sc->sc_geom->name)); KASSERT(disk->d_consumer->provider != NULL, ("NULL provider (%s).", sc->sc_geom->name)); length = disk->d_consumer->provider->sectorsize; offset = disk->d_consumer->provider->mediasize - length; sector = malloc((size_t)length, M_MIRROR, M_NOWAIT); if (sector == NULL) error = ENOMEM; else { g_mirror_fill_metadata(sc, disk, &md); mirror_metadata_encode(&md, sector); error = g_write_data(disk->d_consumer, offset, sector, length); free(sector, M_MIRROR); } if (error != 0) { G_MIRROR_DEBUG(0, "Cannot update metadata on disk %s.", g_mirror_get_diskname(disk)); g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, G_MIRROR_EVENT_DONTWAIT); return; } G_MIRROR_DEBUG(4, "Metadata on %s updated.", g_mirror_get_diskname(disk)); } static void g_mirror_bump_syncid(struct g_mirror_softc *sc) { struct g_mirror_disk *disk; KASSERT(g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0, ("%s called with no active disks (device=%s).", __func__, sc->sc_geom->name)); sc->sc_syncid++; LIST_FOREACH(disk, &sc->sc_disks, d_next) { if (disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { disk->d_sync.ds_syncid = sc->sc_syncid; g_mirror_update_metadata(disk); } } } static void g_mirror_done(struct bio *bp) { struct g_mirror_softc *sc; struct g_mirror_disk *disk; struct bio *pbp; g_topology_assert_not(); pbp = bp->bio_parent; sc = pbp->bio_to->geom->softc; pbp->bio_inbed++; if (bp->bio_error == 0) { G_MIRROR_LOGREQ(bp, "Request delivered."); g_destroy_bio(bp); if (pbp->bio_children == pbp->bio_inbed) { pbp->bio_completed = pbp->bio_length; G_MIRROR_LOGREQ(pbp, "Request delivered."); pbp->bio_error = 0; g_io_deliver(pbp, pbp->bio_error); } return; } /* An error occur, try to save situation. */ G_MIRROR_LOGREQ(bp, "Request failed (error=%d).", bp->bio_error); disk = bp->bio_caller1; g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, G_MIRROR_EVENT_DONTWAIT); switch (pbp->bio_cmd) { case BIO_READ: g_destroy_bio(bp); mtx_lock(&sc->sc_queue_mtx); bioq_disksort(&sc->sc_queue, pbp); G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); wakeup(sc); mtx_unlock(&sc->sc_queue_mtx); break; case BIO_DELETE: case BIO_WRITE: pbp->bio_inbed--; pbp->bio_children--; if (pbp->bio_error == 0) pbp->bio_error = bp->bio_error; g_destroy_bio(bp); if (pbp->bio_children == 0) { /* * All requests failed. */ } else if (pbp->bio_children == pbp->bio_inbed) { /* At least one request succeeded. */ pbp->bio_error = 0; pbp->bio_completed = pbp->bio_length; } else /* if (pbp->bio_inbed != pbp->bio_children) */ { KASSERT(pbp->bio_inbed < pbp->bio_children, ("bio_inbed (%u) is not less then bio_children (%u).", pbp->bio_inbed, pbp->bio_children)); break; } g_io_deliver(pbp, pbp->bio_error); break; default: KASSERT(1 == 0, ("Invalid request: %u.", pbp->bio_cmd)); } } static void g_mirror_sync_done(struct bio *bp) { struct g_mirror_softc *sc; G_MIRROR_LOGREQ(bp, "Synchronization request delivered."); sc = bp->bio_from->geom->softc; bp->bio_flags = BIO_FLAG1; mtx_lock(&sc->sc_queue_mtx); bioq_disksort(&sc->sc_queue, bp); wakeup(sc); mtx_unlock(&sc->sc_queue_mtx); } static void g_mirror_start(struct bio *bp) { struct g_mirror_softc *sc; sc = bp->bio_to->geom->softc; /* * If sc == NULL or there are no valid disks, provider's error * should be set and g_mirror_start() should not be called at all. */ KASSERT(sc != NULL && sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, ("Provider's error should be set (error=%d)(mirror=%s).", bp->bio_to->error, bp->bio_to->name)); G_MIRROR_LOGREQ(bp, "Request received."); switch (bp->bio_cmd) { case BIO_READ: case BIO_WRITE: case BIO_DELETE: break; case BIO_GETATTR: default: g_io_deliver(bp, EOPNOTSUPP); return; } mtx_lock(&sc->sc_queue_mtx); bioq_disksort(&sc->sc_queue, bp); G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); wakeup(sc); mtx_unlock(&sc->sc_queue_mtx); } /* * Send one synchronization request. */ static void g_mirror_sync_one(struct g_mirror_disk *disk) { struct g_mirror_softc *sc; struct bio *bp; sc = disk->d_softc; KASSERT(disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, ("Disk %s is not marked for synchronization.", g_mirror_get_diskname(disk))); bp = g_new_bio(); if (bp == NULL) return; bp->bio_parent = NULL; bp->bio_cmd = BIO_READ; bp->bio_offset = disk->d_sync.ds_offset; bp->bio_length = MIN(sc->sc_sync.ds_block, sc->sc_mediasize - bp->bio_offset); disk->d_sync.ds_offset += bp->bio_length; bp->bio_flags = 0; bp->bio_done = g_mirror_sync_done; bp->bio_data = uma_zalloc(sc->sc_sync.ds_zone, M_NOWAIT | M_ZERO); if (bp->bio_data == NULL) { g_destroy_bio(bp); return; } bp->bio_to = sc->sc_provider; bp->bio_caller1 = disk; G_MIRROR_LOGREQ(bp, "Sending synchronization request."); g_io_request(bp, disk->d_sync.ds_consumer); } static void g_mirror_sync_request(struct bio *bp) { struct g_mirror_softc *sc; struct g_mirror_disk *disk; disk = bp->bio_caller1; sc = disk->d_softc; /* * Synchronization request. */ switch (bp->bio_cmd) { case BIO_READ: if (bp->bio_error != 0) { uma_zfree(sc->sc_sync.ds_zone, bp->bio_data); g_destroy_bio(bp); return; } bp->bio_cmd = BIO_WRITE; bp->bio_flags = 0; G_MIRROR_LOGREQ(bp, "READ finished."); g_io_request(bp, disk->d_consumer); return; case BIO_WRITE: uma_zfree(sc->sc_sync.ds_zone, bp->bio_data); if (bp->bio_error != 0) { g_destroy_bio(bp); g_mirror_event_send(disk, G_MIRROR_DISK_STATE_DISCONNECTED, G_MIRROR_EVENT_DONTWAIT); return; } G_MIRROR_LOGREQ(bp, "WRITE finished."); g_destroy_bio(bp); disk->d_sync.ds_offset_done = bp->bio_offset + bp->bio_length; if (bp->bio_offset + bp->bio_length == sc->sc_provider->mediasize) { /* * Disk up-to-date, activate it. */ g_mirror_event_send(disk, G_MIRROR_DISK_STATE_ACTIVE, G_MIRROR_EVENT_DONTWAIT); return; } else if ((disk->d_sync.ds_offset_done % (sc->sc_sync.ds_block * 100)) == 0) { /* * Update offset_done on every 100 blocks. * XXX: This should be configurable. */ g_mirror_update_metadata(disk); } return; default: KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", bp->bio_cmd, sc->sc_geom->name)); } } static void g_mirror_register_request(struct bio *bp) { struct g_mirror_softc *sc; struct g_mirror_disk *disk; struct bio *cbp; sc = bp->bio_to->geom->softc; switch (bp->bio_cmd) { case BIO_READ: disk = g_mirror_get_disk(sc); if (disk == NULL) { if (bp->bio_error == 0) bp->bio_error = ENXIO; g_io_deliver(bp, bp->bio_error); return; } cbp = g_clone_bio(bp); if (cbp == NULL) { if (bp->bio_error == 0) bp->bio_error = ENOMEM; g_io_deliver(bp, bp->bio_error); return; } /* * Fill in the component buf structure. */ cbp->bio_done = g_mirror_done; cbp->bio_caller1 = disk; cbp->bio_to = disk->d_consumer->provider; cbp->bio_caller1 = disk; G_MIRROR_LOGREQ(cbp, "Sending request."); g_io_request(cbp, disk->d_consumer); return; case BIO_WRITE: case BIO_DELETE: { struct bio_queue_head queue; /* * Allocate all bios before sending any request, so we can * return ENOMEM in nice and clean way. */ bioq_init(&queue); LIST_FOREACH(disk, &sc->sc_disks, d_next) { switch (disk->d_state) { case G_MIRROR_DISK_STATE_ACTIVE: break; case G_MIRROR_DISK_STATE_SYNCHRONIZING: if (bp->bio_offset >= disk->d_sync.ds_offset) continue; break; default: continue; } cbp = g_clone_bio(bp); if (cbp == NULL) { for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) { bioq_remove(&queue, cbp); g_destroy_bio(cbp); } if (bp->bio_error == 0) bp->bio_error = ENOMEM; g_io_deliver(bp, bp->bio_error); return; } bioq_insert_tail(&queue, cbp); } LIST_FOREACH(disk, &sc->sc_disks, d_next) { switch (disk->d_state) { case G_MIRROR_DISK_STATE_ACTIVE: break; case G_MIRROR_DISK_STATE_SYNCHRONIZING: if (bp->bio_offset >= disk->d_sync.ds_offset) continue; break; default: continue; } cbp = bioq_first(&queue); KASSERT(cbp != NULL, ("NULL cbp! (device %s).", sc->sc_geom->name)); bioq_remove(&queue, cbp); cbp->bio_done = g_mirror_done; cbp->bio_caller1 = disk; cbp->bio_to = disk->d_consumer->provider; G_MIRROR_LOGREQ(cbp, "Sending request."); g_io_request(cbp, disk->d_consumer); } /* * Bump syncid on first write. */ if (sc->sc_bump_syncid) { sc->sc_bump_syncid = 0; g_mirror_bump_syncid(sc); } return; } default: KASSERT(1 == 0, ("Invalid command here: %u (device=%s)", bp->bio_cmd, sc->sc_geom->name)); } } /* * Worker thread. */ static void g_mirror_worker(void *arg) { struct g_mirror_softc *sc; struct g_mirror_disk *disk; struct g_mirror_event *ep; struct bio *bp; sc = arg; curthread->td_base_pri = PRIBIO; for (;;) { G_MIRROR_DEBUG(5, "%s: Let's see...", __func__); ep = g_mirror_event_get(sc); if (ep != NULL) { g_topology_lock(); if ((ep->e_flags & G_MIRROR_EVENT_DEVICE) != 0) { /* Update only device status. */ G_MIRROR_DEBUG(4, "Running event for device %s.", sc->sc_geom->name); ep->e_error = 0; g_mirror_update_device(sc, 1); } else { /* Update disk status. */ G_MIRROR_DEBUG(4, "Running event for disk %s.", g_mirror_get_diskname(ep->e_disk)); ep->e_error = g_mirror_update_disk(ep->e_disk, ep->e_state); if (ep->e_error == 0) g_mirror_update_device(sc, 0); } g_topology_unlock(); if ((ep->e_flags & G_MIRROR_EVENT_DONTWAIT) != 0) { KASSERT(ep->e_error == 0, ("Error cannot be handled.")); g_mirror_event_free(ep); } else { ep->e_flags |= G_MIRROR_EVENT_DONE; G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, ep); mtx_lock(&sc->sc_events_mtx); wakeup(ep); mtx_unlock(&sc->sc_events_mtx); } if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { end: if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_WAIT) != 0) { G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, &sc->sc_worker); wakeup(&sc->sc_worker); sc->sc_worker = NULL; } else { g_topology_lock(); g_mirror_destroy_device(sc); g_topology_unlock(); free(sc, M_MIRROR); } kthread_exit(0); } continue; } /* Get first request from the queue. */ mtx_lock(&sc->sc_queue_mtx); bp = bioq_first(&sc->sc_queue); if (bp == NULL) { mtx_unlock(&sc->sc_queue_mtx); if ((sc->sc_flags & G_MIRROR_DEVICE_FLAG_DESTROY) != 0) { goto end; } /* * No pending I/O request, so it is time for * synchronization. * XXX: It is possible that due to heavy device * load we will not be able to sync disks * at all, in that case we need to consider * forcing synchronization requests. */ LIST_FOREACH(disk, &sc->sc_disks, d_next) { if (disk->d_state != G_MIRROR_DISK_STATE_SYNCHRONIZING) { continue; } if (disk->d_sync.ds_offset >= sc->sc_provider->mediasize) { continue; } if (disk->d_sync.ds_offset > disk->d_sync.ds_offset_done) { continue; } g_mirror_sync_one(disk); } G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, sc); mtx_lock(&sc->sc_queue_mtx); msleep(sc, &sc->sc_queue_mtx, PRIBIO | PDROP, "mirror:wait", 0); G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, sc); continue; } bioq_remove(&sc->sc_queue, bp); mtx_unlock(&sc->sc_queue_mtx); if ((bp->bio_flags & BIO_FLAG1) != 0) g_mirror_sync_request(bp); else g_mirror_register_request(bp); } } static void g_mirror_sync_start(struct g_mirror_disk *disk) { struct g_mirror_softc *sc; int error; g_topology_assert(); sc = disk->d_softc; KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, ("Device not in RUNNING state (%s, %u).", sc->sc_geom->name, sc->sc_state)); KASSERT(disk->d_sync.ds_consumer == NULL, ("Sync consumer already exists (device=%s, disk=%s).", sc->sc_geom->name, g_mirror_get_diskname(disk))); disk->d_sync.ds_consumer = g_new_consumer(sc->sc_sync.ds_geom); error = g_attach(disk->d_sync.ds_consumer, disk->d_softc->sc_provider); KASSERT(error == 0, ("Cannot attach to %s.", disk->d_softc->sc_geom->name)); error = g_access(disk->d_sync.ds_consumer, 1, 0, 0); KASSERT(error == 0, ("Cannot access to %s.", disk->d_softc->sc_geom->name)); } static void g_mirror_sync_stop(struct g_mirror_disk *disk) { g_topology_assert(); KASSERT(disk->d_sync.ds_consumer != NULL, ("No sync consumer (device=%s, disk=%s).", disk->d_softc->sc_geom->name, g_mirror_get_diskname(disk))); g_access(disk->d_sync.ds_consumer, -1, 0, 0); g_detach(disk->d_sync.ds_consumer); g_destroy_consumer(disk->d_sync.ds_consumer); disk->d_sync.ds_consumer = NULL; } static void g_mirror_launch_provider(struct g_mirror_softc *sc) { struct g_provider *pp; g_topology_assert(); pp = g_new_providerf(sc->sc_geom, sc->sc_geom->name); pp->mediasize = sc->sc_mediasize; pp->sectorsize = sc->sc_sectorsize; sc->sc_provider = pp; g_error_provider(pp, 0); } static void g_mirror_destroy_provider(struct g_mirror_softc *sc) { struct g_mirror_disk *disk; struct bio *bp; g_topology_assert(); KASSERT(sc->sc_provider != NULL, ("NULL provider (device=%s).", sc->sc_geom->name)); g_error_provider(sc->sc_provider, ENXIO); mtx_lock(&sc->sc_queue_mtx); while ((bp = bioq_first(&sc->sc_queue)) != NULL) { bioq_remove(&sc->sc_queue, bp); g_io_deliver(bp, ENXIO); } mtx_unlock(&sc->sc_queue_mtx); sc->sc_provider->flags |= G_PF_WITHER; g_orphan_provider(sc->sc_provider, ENXIO); sc->sc_provider = NULL; LIST_FOREACH(disk, &sc->sc_disks, d_next) { if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) g_mirror_sync_stop(disk); } } static void g_mirror_go(void *arg) { struct g_mirror_softc *sc; sc = arg; G_MIRROR_DEBUG(0, "Force %s start due to timeout.", sc->sc_geom->name); g_mirror_event_send(sc, 0, G_MIRROR_EVENT_DONTWAIT | G_MIRROR_EVENT_DEVICE); } static u_int g_mirror_determine_state(struct g_mirror_disk *disk) { struct g_mirror_softc *sc; u_int state; sc = disk->d_softc; if (sc->sc_syncid == disk->d_sync.ds_syncid) { if ((disk->d_flags & G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { /* Disk does not need synchronization. */ state = G_MIRROR_DISK_STATE_ACTIVE; } else { /* * We can start synchronization from * the stored offset. */ state = G_MIRROR_DISK_STATE_SYNCHRONIZING; } } else if (sc->sc_syncid > disk->d_sync.ds_syncid) { /* * Reset all synchronization data for this disk, * because if it even was synchronized, it was * synchronized to disks with different syncid. */ disk->d_flags |= G_MIRROR_DISK_FLAG_SYNCHRONIZING; disk->d_sync.ds_offset = 0; disk->d_sync.ds_offset_done = 0; disk->d_sync.ds_syncid = sc->sc_syncid; state = G_MIRROR_DISK_STATE_SYNCHRONIZING; } else /* if (sc->sc_syncid < disk->d_sync.ds_syncid) */ { /* * Not good, NOT GOOD! * It means that mirror was started on stale disks * and more fresh disk just arrive. * If there were writes, mirror is fucked up, sorry. * I think the best choice here is don't touch * this disk and inform the user laudly. */ printf("Mirror %s was started before the freshest disk " "(%s) arrives!! It will not be connected to the " "running mirror.\n", sc->sc_geom->name, g_mirror_get_diskname(disk)); g_mirror_destroy_disk(disk); state = G_MIRROR_DISK_STATE_NONE; } G_MIRROR_DEBUG(1, "State for %s disk: %s.", g_mirror_get_diskname(disk), g_mirror_disk_state2str(state)); return (state); } /* * Update device state. */ static void g_mirror_update_device(struct g_mirror_softc *sc, boolean_t force) { struct g_mirror_disk *disk; u_int state; g_topology_assert(); switch (sc->sc_state) { case G_MIRROR_DEVICE_STATE_STARTING: { uint16_t syncid; KASSERT(sc->sc_provider == NULL, ("Non-NULL provider in STARTING state (%s).", sc->sc_geom->name)); /* * Are we ready? We are, if all disks are connected or * if we have any disks and 'force' is true. */ if ((force && g_mirror_ndisks(sc, -1) > 0) || sc->sc_ndisks == g_mirror_ndisks(sc, -1)) { ; } else if (g_mirror_ndisks(sc, -1) == 0) { /* * Disks went down in starting phase, so destroy * device. */ callout_drain(&sc->sc_callout); sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; return; } else { return; } /* * Activate all disks with the biggest syncid. */ if (force) { u_int ndisks; /* * If called with 'force' true, we're called from * timeout * procedure, so don't bother canceling * timeout. */ ndisks = 0; LIST_FOREACH(disk, &sc->sc_disks, d_next) { if ((disk->d_flags & G_MIRROR_DISK_FLAG_SYNCHRONIZING) == 0) { ndisks++; } } if (ndisks == 0) { int timeout; /* No valid disks still, wait some more. */ timeout = atomic_load_acq_int(&g_mirror_timeout); callout_reset(&sc->sc_callout, timeout * hz, g_mirror_go, sc); return; } } else { /* Cancel timeout. */ callout_drain(&sc->sc_callout); /* * If timeout procedure was in progress, check if * device was turned on already. */ if (sc->sc_state != G_MIRROR_DEVICE_STATE_STARTING) return; } /* Find disk with the biggest syncid. */ syncid = 0; LIST_FOREACH(disk, &sc->sc_disks, d_next) { if (disk->d_sync.ds_syncid > syncid) syncid = disk->d_sync.ds_syncid; } /* Reset hint. */ sc->sc_hint = NULL; sc->sc_syncid = syncid; if (force) { /* Remember to bump syncid on first write. */ sc->sc_bump_syncid = 1; } /* Create and run provider. */ g_mirror_launch_provider(sc); state = G_MIRROR_DEVICE_STATE_RUNNING; G_MIRROR_DEBUG(1, "Device %s state changed from %s to %s.", sc->sc_geom->name, g_mirror_device_state2str(sc->sc_state), g_mirror_device_state2str(state)); sc->sc_state = state; LIST_FOREACH(disk, &sc->sc_disks, d_next) { state = g_mirror_determine_state(disk); g_mirror_event_send(disk, state, G_MIRROR_EVENT_DONTWAIT); } break; } case G_MIRROR_DEVICE_STATE_RUNNING: KASSERT(sc->sc_provider != NULL, ("NULL provider in RUNNING state (%s).", sc->sc_geom->name)); /* * Basically, we should check if device may still run. * It may run if we have at least one active disk. */ if (g_mirror_ndisks(sc, G_MIRROR_DISK_STATE_ACTIVE) > 0) break; /* * No active disks, we need to destroy device. */ sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; break; default: KASSERT(1 == 0, ("Wrong device state (%s, %s).", sc->sc_geom->name, g_mirror_device_state2str(sc->sc_state))); } } /* * Update disk state and device state if needed. */ static int g_mirror_update_disk(struct g_mirror_disk *disk, u_int state) { struct g_mirror_softc *sc; g_topology_assert(); sc = disk->d_softc; again: G_MIRROR_DEBUG(3, "Changing disk %s state from %s to %s.", g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), g_mirror_disk_state2str(state)); switch (state) { case G_MIRROR_DISK_STATE_NEW: { struct g_mirror_disk *dp; /* * Possible scenarios: * 1. New disk arrive. */ /* Previous state should be NONE. */ KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NONE, ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state))); /* * Check for duplication. */ dp = g_mirror_id2disk(sc, disk->d_id); if (dp != NULL) { struct g_provider *pp; /* * We have a duplication. */ pp = disk->d_consumer->provider; g_mirror_free_disk(disk); G_MIRROR_DEBUG(1, "Disk %s have the same ID as disk %s!", pp->name, dp->d_consumer->provider->name); return (EEXIST); } G_MIRROR_DEBUG(1, "Disk %s state changed from %s to %s (device %s).", g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), g_mirror_disk_state2str(state), sc->sc_geom->name); disk->d_state = state; LIST_INSERT_HEAD(&sc->sc_disks, disk, d_next); if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) break; KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, ("Wrong device state (%s, %s, %s, %s).", sc->sc_geom->name, g_mirror_device_state2str(sc->sc_state), g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state))); state = g_mirror_determine_state(disk); if (state == G_MIRROR_DISK_STATE_NONE) return (0); goto again; } case G_MIRROR_DISK_STATE_ACTIVE: /* * Possible scenarios: * 1. New disk does not need synchronization. * 2. Synchronization process finished successfully. */ KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, ("Wrong device state (%s, %s, %s, %s).", sc->sc_geom->name, g_mirror_device_state2str(sc->sc_state), g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state))); /* Previous state should be NEW or SYNCHRONIZING. */ KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW || disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state))); if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; g_mirror_sync_stop(disk); } G_MIRROR_DEBUG(1, "Disk %s state changed from %s to %s (device %s).", g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), g_mirror_disk_state2str(state), sc->sc_geom->name); disk->d_state = state; disk->d_sync.ds_offset = 0; disk->d_sync.ds_offset_done = 0; disk->d_flags &= ~G_MIRROR_DISK_FLAG_SYNCHRONIZING; g_mirror_update_metadata(disk); break; case G_MIRROR_DISK_STATE_SYNCHRONIZING: /* * Possible scenarios: * 1. Disk which needs synchronization was connected. */ /* Previous state should be NEW. */ KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state))); KASSERT(sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING, ("Wrong device state (%s, %s, %s, %s).", sc->sc_geom->name, g_mirror_device_state2str(sc->sc_state), g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state))); G_MIRROR_DEBUG(1, "Disk %s state changed from %s to %s (device %s).", g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), g_mirror_disk_state2str(state), sc->sc_geom->name); disk->d_state = state; g_mirror_update_metadata(disk); if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) g_mirror_sync_start(disk); break; case G_MIRROR_DISK_STATE_DISCONNECTED: /* * Possible scenarios: * 1. Device wasn't running yet, but disk disappear. * 2. Disk was active and disapppear. * 3. Disk disappear during synchronization process. */ if (sc->sc_state == G_MIRROR_DEVICE_STATE_RUNNING) { /* Previous state should be ACTIVE or SYNCHRONIZING. */ KASSERT(disk->d_state == G_MIRROR_DISK_STATE_ACTIVE || disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING, ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state))); /* * Remember to bump syncid on first write, but only if * we're in RUNNING state. */ sc->sc_bump_syncid = 1; #ifdef INVARIANTS } else if (sc->sc_state == G_MIRROR_DEVICE_STATE_STARTING) { /* Previous state should be NEW. */ KASSERT(disk->d_state == G_MIRROR_DISK_STATE_NEW, ("Wrong disk state (%s, %s).", g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state))); } else { KASSERT(1 == 0, ("Wrong device state (%s, %s, %s, %s).", sc->sc_geom->name, g_mirror_device_state2str(sc->sc_state), g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state))); #endif } G_MIRROR_DEBUG(1, "Disk %s state changed from %s to %s (device %s).", g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), g_mirror_disk_state2str(state), sc->sc_geom->name); g_mirror_destroy_disk(disk); break; case G_MIRROR_DISK_STATE_DESTROY: { int error; error = g_mirror_clear_metadata(disk); if (error != 0) return (error); G_MIRROR_DEBUG(1, "Disk %s state changed from %s to %s (device %s).", g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state), g_mirror_disk_state2str(state), sc->sc_geom->name); g_mirror_destroy_disk(disk); sc->sc_ndisks--; LIST_FOREACH(disk, &sc->sc_disks, d_next) { g_mirror_update_metadata(disk); } break; } default: KASSERT(1 == 0, ("Unknown state (%u).", state)); } return (0); } static int g_mirror_read_metadata(struct g_consumer *cp, struct g_mirror_metadata *md) { struct g_provider *pp; u_char *buf; int error; g_topology_assert(); error = g_access(cp, 1, 0, 0); if (error != 0) return (error); pp = cp->provider; g_topology_unlock(); /* Metadata are stored on last sector. */ buf = g_read_data(cp, pp->mediasize - pp->sectorsize, pp->sectorsize, &error); g_topology_lock(); if (buf == NULL) { g_access(cp, -1, 0, 0); return (error); } if (error != 0) { g_access(cp, -1, 0, 0); g_free(buf); return (error); } error = g_access(cp, -1, 0, 0); KASSERT(error == 0, ("Cannot decrease access count for %s.", pp->name)); /* Decode metadata. */ mirror_metadata_decode(buf, md); g_free(buf); return (0); } /* * Add disk to the mirror. * * Algorithm: * * If bitmap for disk exists, start synchronization process. * NOTE: If on-disk synchronization ID and mirror synchronization ID are equal, * but bitmap exists, it is bogus. It means, that disk was disconnected * and its synchronization ID was set manually. We could assert this, but * don't be so cruel, life is cruel already, and just ignore this and * start synchronization process depending on bitmap. * * If there is no bitmap and synchronization IDs are different, whole disk * have to be synchronized. * NOTE: If synchronization ID on connected disk is bigger that mirror's * synchronization ID, it is bogus. It means, that someone change it * for this disk manually. Again we could assert this, but again we * have to remember that world is cruel enough and people are evil. * * The easiest situation to handle: synchronization IDs are equal and there is * no bitmap. It means that there were no writes while disk was disconnected. * Quite great, because we don't need any synchronization at all here. * But don't be so happy, remember that nobody likes you and this is the most * uncommon situation. * * ...I really should assert those "NOTE:" cases someday, I see no reason to be * better than cruel world, no reason at all. */ static int g_mirror_add_disk(struct g_mirror_softc *sc, struct g_provider *pp, struct g_mirror_metadata *md) { struct g_mirror_disk *disk; int error = 0; g_topology_assert(); G_MIRROR_DEBUG(2, "Adding disk %s.", pp->name); disk = g_mirror_init_disk(sc, pp, md, &error); if (disk == NULL) return (error); error = g_mirror_event_send(disk, G_MIRROR_DISK_STATE_NEW, G_MIRROR_EVENT_WAIT); return (error); } static struct g_geom * g_mirror_create(struct g_class *mp, const struct g_mirror_metadata *md) { struct g_mirror_softc *sc; struct g_geom *gp; int error, timeout; G_MIRROR_DEBUG(1, "Creating device %s.mirror (id=%u).", md->md_name, md->md_mid); /* Two disks is minimum. */ if (md->md_all <= 1) return (NULL); /* * Action geom. */ gp = g_new_geomf(mp, "%s.mirror", md->md_name); sc = malloc(sizeof(*sc), M_MIRROR, M_NOWAIT | M_ZERO); if (sc == NULL) { G_MIRROR_DEBUG(0, "Can't allocate memory for device %s.", gp->name); g_destroy_geom(gp); return (NULL); } gp->start = g_mirror_start; gp->spoiled = g_mirror_orphan; gp->orphan = g_mirror_orphan; gp->access = g_std_access; gp->dumpconf = g_mirror_dumpconf; strlcpy(sc->sc_name, md->md_name, sizeof(sc->sc_name)); sc->sc_id = md->md_mid; sc->sc_slice = md->md_slice; sc->sc_balance = md->md_balance; sc->sc_mediasize = md->md_mediasize; sc->sc_sectorsize = md->md_sectorsize; sc->sc_ndisks = md->md_all; sc->sc_bump_syncid = 0; bioq_init(&sc->sc_queue); mtx_init(&sc->sc_queue_mtx, "gmirror:queue", NULL, MTX_DEF); LIST_INIT(&sc->sc_disks); TAILQ_INIT(&sc->sc_events); mtx_init(&sc->sc_events_mtx, "gmirror:events", NULL, MTX_DEF); callout_init(&sc->sc_callout, CALLOUT_MPSAFE); sc->sc_state = G_MIRROR_DEVICE_STATE_STARTING; gp->softc = sc; sc->sc_geom = gp; sc->sc_provider = NULL; /* * Synchronization geom. */ gp = g_new_geomf(mp, "%s.mirror.sync", md->md_name); gp->softc = sc; gp->spoiled = g_mirror_orphan; gp->orphan = g_mirror_orphan; sc->sc_sync.ds_geom = gp; sc->sc_sync.ds_block = atomic_load_acq_int(&g_mirror_sync_block_size); sc->sc_sync.ds_zone = uma_zcreate("gmirror:sync", sc->sc_sync.ds_block, NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); error = kthread_create(g_mirror_worker, sc, &sc->sc_worker, 0, 0, "g_mirror %s", md->md_name); if (error != 0) { G_MIRROR_DEBUG(1, "Cannot create kernel thread for %s.", sc->sc_geom->name); uma_zdestroy(sc->sc_sync.ds_zone); g_destroy_geom(sc->sc_sync.ds_geom); mtx_destroy(&sc->sc_events_mtx); mtx_destroy(&sc->sc_queue_mtx); g_destroy_geom(sc->sc_geom); free(sc, M_MIRROR); return (NULL); } G_MIRROR_DEBUG(0, "Device %s created (id=%u).", sc->sc_geom->name, sc->sc_id); /* * Run timeout. */ timeout = atomic_load_acq_int(&g_mirror_timeout); callout_reset(&sc->sc_callout, timeout * hz, g_mirror_go, sc); return (sc->sc_geom); } int g_mirror_destroy(struct g_mirror_softc *sc, boolean_t force) { struct g_provider *pp; g_topology_assert(); if (sc == NULL) return (ENXIO); pp = sc->sc_provider; if (pp != NULL && (pp->acr != 0 || pp->acw != 0 || pp->ace != 0)) { if (force) { G_MIRROR_DEBUG(0, "Device %s is still open, so it " "can't be definitely removed.", pp->name); } else { G_MIRROR_DEBUG(1, "Device %s is still open (r%dw%de%d).", pp->name, pp->acr, pp->acw, pp->ace); return (EBUSY); } } sc->sc_flags |= G_MIRROR_DEVICE_FLAG_DESTROY; sc->sc_flags |= G_MIRROR_DEVICE_FLAG_WAIT; G_MIRROR_DEBUG(4, "%s: Waking up %p.", __func__, sc); mtx_lock(&sc->sc_queue_mtx); wakeup(sc); mtx_unlock(&sc->sc_queue_mtx); G_MIRROR_DEBUG(4, "%s: Sleeping %p.", __func__, &sc->sc_worker); while (sc->sc_worker != NULL) tsleep(&sc->sc_worker, PRIBIO, "mirror:destroy", hz / 5); G_MIRROR_DEBUG(4, "%s: Woken up %p.", __func__, &sc->sc_worker); g_mirror_destroy_device(sc); free(sc, M_MIRROR); return (0); } static void g_mirror_dump_metadata(struct g_mirror_metadata *md) { printf(" magic: %s\n", md->md_magic); printf(" version: %u\n", (u_int)md->md_version); printf(" name: %s\n", md->md_name); printf(" mid: %u\n", (u_int)md->md_mid); printf(" did: %u\n", (u_int)md->md_did); printf(" all: %u\n", (u_int)md->md_all); printf(" syncid: %u\n", (u_int)md->md_syncid); printf(" slice: %u\n", (u_int)md->md_slice); printf(" balance: %u\n", (u_int)md->md_balance); printf(" mediasize: %jd\n", (intmax_t)md->md_mediasize); printf("sectorsize: %u\n", (u_int)md->md_sectorsize); printf("syncoffset: %jd\n", (intmax_t)md->md_sync_offset); printf(" flags: %ju\n", (uintmax_t)md->md_flags); } static void g_mirror_taste_orphan(struct g_consumer *cp) { KASSERT(1 == 0, ("%s called while tasting %s.", __func__, cp->provider->name)); } /* XXX: Temporary. */ int g_mirror_stop = 0; static struct g_geom * g_mirror_taste(struct g_class *mp, struct g_provider *pp, int flags __unused) { struct g_mirror_metadata md; struct g_mirror_softc *sc; struct g_consumer *cp; struct g_geom *gp; int error; g_trace(G_T_TOPOLOGY, "%s(%s, %s)", __func__, mp->name, pp->name); g_topology_assert(); if (g_mirror_stop) return (NULL); /* * XXX: Hack to force mirror to use NOP providers only for now. */ if (strcmp(pp->geom->class->name, "NOP") != 0) return (NULL); G_MIRROR_DEBUG(3, "Tasting %s.", pp->name); gp = g_new_geomf(mp, "mirror:taste"); /* * This orphan function should be never called. */ gp->orphan = g_mirror_taste_orphan; cp = g_new_consumer(gp); g_attach(cp, pp); error = g_mirror_read_metadata(cp, &md); g_wither_geom(gp, ENXIO); if (error != 0) return (NULL); gp = NULL; if (strcmp(md.md_magic, G_MIRROR_MAGIC) != 0) return (NULL); if (md.md_version > G_MIRROR_VERSION) { printf("geom_mirror.ko module is too old to handle %s.\n", pp->name); return (NULL); } if ((md.md_flags & G_MIRROR_DISK_FLAG_INACTIVE) != 0) { G_MIRROR_DEBUG(1, "Disk %s marked as inactive, skipping.", pp->name); return (NULL); } g_mirror_dump_metadata(&md); /* * Let's check if device already exists. */ LIST_FOREACH(gp, &mp->geom, geom) { sc = gp->softc; if (sc == NULL) continue; if (sc->sc_sync.ds_geom == gp) continue; if (strcmp(md.md_name, sc->sc_name) != 0) continue; if (md.md_mid != sc->sc_id) { G_MIRROR_DEBUG(0, "Device %s already configured.", gp->name); return (NULL); } break; } if (gp == NULL) { gp = g_mirror_create(mp, &md); if (gp == NULL) { G_MIRROR_DEBUG(0, "Cannot create device %s.mirror", md.md_name); return (NULL); } sc = gp->softc; } G_MIRROR_DEBUG(1, "Adding disk %s to %s.", pp->name, gp->name); error = g_mirror_add_disk(sc, pp, &md); if (error != 0) { G_MIRROR_DEBUG(0, "Cannot add disk %s to %s (error=%d).", pp->name, gp->name, error); if (LIST_EMPTY(&sc->sc_disks)) g_mirror_destroy(sc, 1); return (NULL); } return (gp); } static int g_mirror_destroy_geom(struct gctl_req *req __unused, struct g_class *mp __unused, struct g_geom *gp) { return (g_mirror_destroy(gp->softc, 0)); } static void g_mirror_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp, struct g_consumer *cp, struct g_provider *pp) { struct g_mirror_softc *sc; struct g_mirror_disk *disk; sc = gp->softc; if (sc == NULL || pp == NULL) return; sbuf_printf(sb, "%s%zu\n", indent, sc->sc_id); sbuf_printf(sb, "%s%u\n", indent, sc->sc_syncid); sbuf_printf(sb, "%s%zu\n", indent, sc->sc_slice); sbuf_printf(sb, "%s%s\n", indent, balance_name(sc->sc_balance)); sbuf_printf(sb, "%s%ju\n", indent, (uintmax_t)sc->sc_flags); sbuf_printf(sb, "%s%u\n", indent, sc->sc_ndisks); sbuf_printf(sb, "%s", indent); /* XXX: Disks list can change during our operation. */ LIST_FOREACH(disk, &sc->sc_disks, d_next) { sbuf_printf(sb, "%s(%s", g_mirror_get_diskname(disk), g_mirror_disk_state2str(disk->d_state)); if (disk->d_state == G_MIRROR_DISK_STATE_SYNCHRONIZING) { sbuf_printf(sb, " %u%%", (u_int)((sc->sc_provider->mediasize * 100) / disk->d_sync.ds_offset_done)); } sbuf_printf(sb, ")"); if (LIST_NEXT(disk, d_next) != NULL) sbuf_printf(sb, " "); } sbuf_printf(sb, "\n"); } DECLARE_GEOM_CLASS(g_mirror_class, g_mirror);