--- uts/common/fs/zfs/arc.c +++ uts/common/fs/zfs/arc.c @@ -757,6 +757,7 @@ hdr->b_spa = spa; hdr->b_state = arc.anon; hdr->b_arc_access = 0; + mutex_init(&hdr->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); buf = kmem_cache_alloc(buf_cache, KM_SLEEP); buf->b_hdr = hdr; buf->b_data = NULL; @@ -906,6 +907,7 @@ kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); hdr->b_freeze_cksum = NULL; } + mutex_destroy(&hdr->b_freeze_lock); ASSERT(!list_link_active(&hdr->b_arc_node)); ASSERT3P(hdr->b_hash_next, ==, NULL); --- uts/common/fs/zfs/spa.c +++ uts/common/fs/zfs/spa.c @@ -126,9 +126,7 @@ rw_init(&spa->spa_traverse_lock, NULL, RW_DEFAULT, NULL); - mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); - mutex_init(&spa->spa_config_cache_lock, NULL, MUTEX_DEFAULT, NULL); - mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_uberblock_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_config_lock.scl_lock, NULL, MUTEX_DEFAULT, NULL); @@ -167,8 +165,6 @@ list_destroy(&spa->spa_dirty_list); - rw_destroy(&spa->spa_traverse_lock); - for (t = 0; t < ZIO_TYPES; t++) { taskq_destroy(spa->spa_zio_issue_taskq[t]); taskq_destroy(spa->spa_zio_intr_taskq[t]); @@ -188,6 +184,14 @@ avl_destroy(&spa->spa_errlist_scrub); avl_destroy(&spa->spa_errlist_last); + rw_destroy(&spa->spa_traverse_lock); + mutex_destroy(&spa->spa_uberblock_lock); + mutex_destroy(&spa->spa_errlog_lock); + mutex_destroy(&spa->spa_errlist_lock); + mutex_destroy(&spa->spa_config_lock.scl_lock); + mutex_destroy(&spa->spa_sync_bplist.bpl_lock); + mutex_destroy(&spa->spa_history_lock); + spa->spa_state = POOL_STATE_UNINITIALIZED; } --- uts/common/fs/zfs/spa_misc.c +++ uts/common/fs/zfs/spa_misc.c @@ -232,6 +232,10 @@ spa->spa_freeze_txg = UINT64_MAX; spa->spa_final_txg = UINT64_MAX; + mutex_init(&spa->spa_config_cache_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); + mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); + refcount_create(&spa->spa_refcount); refcount_create(&spa->spa_config_lock.scl_count); @@ -276,14 +280,9 @@ refcount_destroy(&spa->spa_refcount); refcount_destroy(&spa->spa_config_lock.scl_count); - mutex_destroy(&spa->spa_sync_bplist.bpl_lock); - mutex_destroy(&spa->spa_config_lock.scl_lock); - mutex_destroy(&spa->spa_errlist_lock); - mutex_destroy(&spa->spa_errlog_lock); mutex_destroy(&spa->spa_scrub_lock); + mutex_destroy(&spa->spa_async_lock); mutex_destroy(&spa->spa_config_cache_lock); - mutex_destroy(&spa->spa_async_lock); - mutex_destroy(&spa->spa_history_lock); kmem_free(spa, sizeof (spa_t)); } @@ -1007,6 +1006,8 @@ avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), offsetof(spa_t, spa_avl)); + mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); + avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_spare_t), offsetof(spa_spare_t, spare_avl)); @@ -1035,4 +1036,5 @@ cv_destroy(&spa_namespace_cv); mutex_destroy(&spa_namespace_lock); + mutex_destroy(&spa_spare_lock); } --- uts/common/fs/zfs/zap_micro.c +++ uts/common/fs/zfs/zap_micro.c @@ -208,6 +208,8 @@ winner = dmu_buf_set_user(db, zap, &zap->zap_m.zap_phys, zap_evict); if (winner != NULL) { + if (!zap->zap_ismicro) + mutex_destroy(&zap->zap_f.zap_num_entries_mtx); kmem_free(zap, sizeof (zap_t)); return (winner); } @@ -465,6 +467,8 @@ if (zap->zap_ismicro) mze_destroy(zap); + else + mutex_destroy(&zap->zap_f.zap_num_entries_mtx); kmem_free(zap, sizeof (zap_t)); } --- uts/common/fs/zfs/zfs_rlock.c +++ uts/common/fs/zfs/zfs_rlock.c @@ -517,10 +517,14 @@ /* writer locks can't be shared or split */ avl_remove(&zp->z_range_avl, rl); mutex_exit(&zp->z_range_lock); - if (rl->r_write_wanted) + if (rl->r_write_wanted) { cv_broadcast(&rl->r_wr_cv); - if (rl->r_read_wanted) + cv_destroy(&rl->r_wr_cv); + } + if (rl->r_read_wanted) { cv_broadcast(&rl->r_rd_cv); + cv_destroy(&rl->r_rd_cv); + } kmem_free(rl, sizeof (rl_t)); } else { /* --- uts/common/fs/zfs/zfs_vfsops.c +++ uts/common/fs/zfs/zfs_vfsops.c @@ -602,6 +602,9 @@ if (error) { if (zfsvfs->z_os) dmu_objset_close(zfsvfs->z_os); + rw_destroy(&zfsvfs->z_um_lock); + mutex_destroy(&zfsvfs->z_znodes_lock); + mutex_destroy(&zfsvfs->z_delete_head.z_mutex); kmem_free(zfsvfs, sizeof (zfsvfs_t)); } else { atomic_add_32(&zfs_active_fs_count, 1); @@ -1153,7 +1156,13 @@ zfs_freevfs(vfs_t *vfsp) { zfsvfs_t *zfsvfs = vfsp->vfs_data; + int i; + for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) + mutex_destroy(&zfsvfs->z_hold_mtx[i]); + rw_destroy(&zfsvfs->z_um_lock); + mutex_destroy(&zfsvfs->z_znodes_lock); + mutex_destroy(&zfsvfs->z_delete_head.z_mutex); kmem_free(zfsvfs, sizeof (zfsvfs_t)); atomic_add_32(&zfs_active_fs_count, -1); --- uts/common/fs/zfs/zfs_znode.c +++ uts/common/fs/zfs/zfs_znode.c @@ -108,6 +108,7 @@ rw_destroy(&zp->z_map_lock); rw_destroy(&zp->z_parent_lock); mutex_destroy(&zp->z_acl_lock); + mutex_destroy(&zp->z_range_lock); avl_destroy(&zp->z_range_avl); ASSERT(zp->z_dbuf_held == 0); @@ -320,7 +321,6 @@ */ list_create(&zfsvfs->z_delete_head.z_znodes, sizeof (znode_t), offsetof(znode_t, z_list_node)); - /* Mutex never destroyed. */ mutex_init(&zfsvfs->z_delete_head.z_mutex, NULL, MUTEX_DEFAULT, NULL); return (0); --- uts/common/fs/zfs/zvol.c +++ uts/common/fs/zfs/zvol.c @@ -495,6 +495,7 @@ zv->zv_zilog = NULL; dmu_objset_close(zv->zv_objset); zv->zv_objset = NULL; + rw_destroy(&zv->zv_dslock); ddi_soft_state_free(zvol_state, zv->zv_minor);