FreeBSD ZFS
The Zettabyte File System

spa_misc.c

Go to the documentation of this file.
00001 /*
00002  * CDDL HEADER START
00003  *
00004  * The contents of this file are subject to the terms of the
00005  * Common Development and Distribution License (the "License").
00006  * You may not use this file except in compliance with the License.
00007  *
00008  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
00009  * or http://www.opensolaris.org/os/licensing.
00010  * See the License for the specific language governing permissions
00011  * and limitations under the License.
00012  *
00013  * When distributing Covered Code, include this CDDL HEADER in each
00014  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
00015  * If applicable, add the following below this CDDL HEADER, with the
00016  * fields enclosed by brackets "[]" replaced with your own identifying
00017  * information: Portions Copyright [yyyy] [name of copyright owner]
00018  *
00019  * CDDL HEADER END
00020  */
00021 /*
00022  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
00023  * Copyright (c) 2012 by Delphix. All rights reserved.
00024  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
00025  */
00026 
00027 #include <sys/zfs_context.h>
00028 #include <sys/spa_impl.h>
00029 #include <sys/zio.h>
00030 #include <sys/zio_checksum.h>
00031 #include <sys/zio_compress.h>
00032 #include <sys/dmu.h>
00033 #include <sys/dmu_tx.h>
00034 #include <sys/zap.h>
00035 #include <sys/zil.h>
00036 #include <sys/vdev_impl.h>
00037 #include <sys/metaslab.h>
00038 #include <sys/uberblock_impl.h>
00039 #include <sys/txg.h>
00040 #include <sys/avl.h>
00041 #include <sys/unique.h>
00042 #include <sys/dsl_pool.h>
00043 #include <sys/dsl_dir.h>
00044 #include <sys/dsl_prop.h>
00045 #include <sys/dsl_scan.h>
00046 #include <sys/fs/zfs.h>
00047 #include <sys/metaslab_impl.h>
00048 #include <sys/arc.h>
00049 #include <sys/ddt.h>
00050 #include "zfs_prop.h"
00051 #include "zfeature_common.h"
00052 
00206 static avl_tree_t spa_namespace_avl;
00207 kmutex_t spa_namespace_lock;
00208 static kcondvar_t spa_namespace_cv;
00209 static int spa_active_count;
00210 int spa_max_replication_override = SPA_DVAS_PER_BP;
00211 
00212 static kmutex_t spa_spare_lock;
00213 static avl_tree_t spa_spare_avl;
00214 static kmutex_t spa_l2cache_lock;
00215 static avl_tree_t spa_l2cache_avl;
00216 
00217 kmem_cache_t *spa_buffer_pool;
00218 int spa_mode_global;
00219 
00220 #ifdef ZFS_DEBUG
00221 /* Everything except dprintf is on by default in debug builds */
00222 int zfs_flags = ~ZFS_DEBUG_DPRINTF;
00223 #else
00224 int zfs_flags = 0;
00225 #endif
00226 
00234 int zfs_recover = 0;
00235 SYSCTL_DECL(_vfs_zfs);
00236 TUNABLE_INT("vfs.zfs.recover", &zfs_recover);
00237 SYSCTL_INT(_vfs_zfs, OID_AUTO, recover, CTLFLAG_RDTUN, &zfs_recover, 0,
00238     "Try to recover from otherwise-fatal errors.");
00239 
00240 
00241 /*
00242  * ==========================================================================
00243  * SPA config locking
00244  * ==========================================================================
00245  */
00246 static void
00247 spa_config_lock_init(spa_t *spa)
00248 {
00249         for (int i = 0; i < SCL_LOCKS; i++) {
00250                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
00251                 mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
00252                 cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
00253                 refcount_create(&scl->scl_count);
00254                 scl->scl_writer = NULL;
00255                 scl->scl_write_wanted = 0;
00256         }
00257 }
00258 
00259 static void
00260 spa_config_lock_destroy(spa_t *spa)
00261 {
00262         for (int i = 0; i < SCL_LOCKS; i++) {
00263                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
00264                 mutex_destroy(&scl->scl_lock);
00265                 cv_destroy(&scl->scl_cv);
00266                 refcount_destroy(&scl->scl_count);
00267                 ASSERT(scl->scl_writer == NULL);
00268                 ASSERT(scl->scl_write_wanted == 0);
00269         }
00270 }
00271 
00272 int
00273 spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
00274 {
00275         for (int i = 0; i < SCL_LOCKS; i++) {
00276                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
00277                 if (!(locks & (1 << i)))
00278                         continue;
00279                 mutex_enter(&scl->scl_lock);
00280                 if (rw == RW_READER) {
00281                         if (scl->scl_writer || scl->scl_write_wanted) {
00282                                 mutex_exit(&scl->scl_lock);
00283                                 spa_config_exit(spa, locks ^ (1 << i), tag);
00284                                 return (0);
00285                         }
00286                 } else {
00287                         ASSERT(scl->scl_writer != curthread);
00288                         if (!refcount_is_zero(&scl->scl_count)) {
00289                                 mutex_exit(&scl->scl_lock);
00290                                 spa_config_exit(spa, locks ^ (1 << i), tag);
00291                                 return (0);
00292                         }
00293                         scl->scl_writer = curthread;
00294                 }
00295                 (void) refcount_add(&scl->scl_count, tag);
00296                 mutex_exit(&scl->scl_lock);
00297         }
00298         return (1);
00299 }
00300 
00301 void
00302 spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
00303 {
00304         int wlocks_held = 0;
00305 
00306         for (int i = 0; i < SCL_LOCKS; i++) {
00307                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
00308                 if (scl->scl_writer == curthread)
00309                         wlocks_held |= (1 << i);
00310                 if (!(locks & (1 << i)))
00311                         continue;
00312                 mutex_enter(&scl->scl_lock);
00313                 if (rw == RW_READER) {
00314                         while (scl->scl_writer || scl->scl_write_wanted) {
00315                                 cv_wait(&scl->scl_cv, &scl->scl_lock);
00316                         }
00317                 } else {
00318                         ASSERT(scl->scl_writer != curthread);
00319                         while (!refcount_is_zero(&scl->scl_count)) {
00320                                 scl->scl_write_wanted++;
00321                                 cv_wait(&scl->scl_cv, &scl->scl_lock);
00322                                 scl->scl_write_wanted--;
00323                         }
00324                         scl->scl_writer = curthread;
00325                 }
00326                 (void) refcount_add(&scl->scl_count, tag);
00327                 mutex_exit(&scl->scl_lock);
00328         }
00329         ASSERT(wlocks_held <= locks);
00330 }
00331 
00332 void
00333 spa_config_exit(spa_t *spa, int locks, void *tag)
00334 {
00335         for (int i = SCL_LOCKS - 1; i >= 0; i--) {
00336                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
00337                 if (!(locks & (1 << i)))
00338                         continue;
00339                 mutex_enter(&scl->scl_lock);
00340                 ASSERT(!refcount_is_zero(&scl->scl_count));
00341                 if (refcount_remove(&scl->scl_count, tag) == 0) {
00342                         ASSERT(scl->scl_writer == NULL ||
00343                             scl->scl_writer == curthread);
00344                         scl->scl_writer = NULL; /* OK in either case */
00345                         cv_broadcast(&scl->scl_cv);
00346                 }
00347                 mutex_exit(&scl->scl_lock);
00348         }
00349 }
00350 
00351 int
00352 spa_config_held(spa_t *spa, int locks, krw_t rw)
00353 {
00354         int locks_held = 0;
00355 
00356         for (int i = 0; i < SCL_LOCKS; i++) {
00357                 spa_config_lock_t *scl = &spa->spa_config_lock[i];
00358                 if (!(locks & (1 << i)))
00359                         continue;
00360                 if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
00361                     (rw == RW_WRITER && scl->scl_writer == curthread))
00362                         locks_held |= 1 << i;
00363         }
00364 
00365         return (locks_held);
00366 }
00367 
00368 /*
00369  * ==========================================================================
00370  * SPA namespace functions
00371  * ==========================================================================
00372  */
00373 
00378 spa_t *
00379 spa_lookup(const char *name)
00380 {
00381         static spa_t search;    /* spa_t is large; don't allocate on stack */
00382         spa_t *spa;
00383         avl_index_t where;
00384         char c;
00385         char *cp;
00386 
00387         ASSERT(MUTEX_HELD(&spa_namespace_lock));
00388 
00389         /*
00390          * If it's a full dataset name, figure out the pool name and
00391          * just use that.
00392          */
00393         cp = strpbrk(name, "/@");
00394         if (cp) {
00395                 c = *cp;
00396                 *cp = '\0';
00397         }
00398 
00399         (void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
00400         spa = avl_find(&spa_namespace_avl, &search, &where);
00401 
00402         if (cp)
00403                 *cp = c;
00404 
00405         return (spa);
00406 }
00407 
00413 spa_t *
00414 spa_add(const char *name, nvlist_t *config, const char *altroot)
00415 {
00416         spa_t *spa;
00417         spa_config_dirent_t *dp;
00418 
00419         ASSERT(MUTEX_HELD(&spa_namespace_lock));
00420 
00421         spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
00422 
00423         mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
00424         mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
00425         mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
00426         mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
00427         mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
00428         mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
00429         mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
00430         mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
00431         mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
00432 
00433         cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
00434         cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
00435         cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
00436         cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
00437 
00438         for (int t = 0; t < TXG_SIZE; t++)
00439                 bplist_create(&spa->spa_free_bplist[t]);
00440 
00441         (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
00442         spa->spa_state = POOL_STATE_UNINITIALIZED;
00443         spa->spa_freeze_txg = UINT64_MAX;
00444         spa->spa_final_txg = UINT64_MAX;
00445         spa->spa_load_max_txg = UINT64_MAX;
00446         spa->spa_proc = &p0;
00447         spa->spa_proc_state = SPA_PROC_NONE;
00448 
00449         refcount_create(&spa->spa_refcount);
00450         spa_config_lock_init(spa);
00451 
00452         avl_add(&spa_namespace_avl, spa);
00453 
00454         /*
00455          * Set the alternate root, if there is one.
00456          */
00457         if (altroot) {
00458                 spa->spa_root = spa_strdup(altroot);
00459                 spa_active_count++;
00460         }
00461 
00462         /*
00463          * Every pool starts with the default cachefile
00464          */
00465         list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
00466             offsetof(spa_config_dirent_t, scd_link));
00467 
00468         dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
00469         dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
00470         list_insert_head(&spa->spa_config_list, dp);
00471 
00472         VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
00473             KM_SLEEP) == 0);
00474 
00475         if (config != NULL) {
00476                 nvlist_t *features;
00477 
00478                 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
00479                     &features) == 0) {
00480                         VERIFY(nvlist_dup(features, &spa->spa_label_features,
00481                             0) == 0);
00482                 }
00483 
00484                 VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
00485         }
00486 
00487         if (spa->spa_label_features == NULL) {
00488                 VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
00489                     KM_SLEEP) == 0);
00490         }
00491 
00492         return (spa);
00493 }
00494 
00500 void
00501 spa_remove(spa_t *spa)
00502 {
00503         spa_config_dirent_t *dp;
00504 
00505         ASSERT(MUTEX_HELD(&spa_namespace_lock));
00506         ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
00507 
00508         nvlist_free(spa->spa_config_splitting);
00509 
00510         avl_remove(&spa_namespace_avl, spa);
00511         cv_broadcast(&spa_namespace_cv);
00512 
00513         if (spa->spa_root) {
00514                 spa_strfree(spa->spa_root);
00515                 spa_active_count--;
00516         }
00517 
00518         while ((dp = list_head(&spa->spa_config_list)) != NULL) {
00519                 list_remove(&spa->spa_config_list, dp);
00520                 if (dp->scd_path != NULL)
00521                         spa_strfree(dp->scd_path);
00522                 kmem_free(dp, sizeof (spa_config_dirent_t));
00523         }
00524 
00525         list_destroy(&spa->spa_config_list);
00526 
00527         nvlist_free(spa->spa_label_features);
00528         nvlist_free(spa->spa_load_info);
00529         spa_config_set(spa, NULL);
00530 
00531         refcount_destroy(&spa->spa_refcount);
00532 
00533         spa_config_lock_destroy(spa);
00534 
00535         for (int t = 0; t < TXG_SIZE; t++)
00536                 bplist_destroy(&spa->spa_free_bplist[t]);
00537 
00538         cv_destroy(&spa->spa_async_cv);
00539         cv_destroy(&spa->spa_proc_cv);
00540         cv_destroy(&spa->spa_scrub_io_cv);
00541         cv_destroy(&spa->spa_suspend_cv);
00542 
00543         mutex_destroy(&spa->spa_async_lock);
00544         mutex_destroy(&spa->spa_errlist_lock);
00545         mutex_destroy(&spa->spa_errlog_lock);
00546         mutex_destroy(&spa->spa_history_lock);
00547         mutex_destroy(&spa->spa_proc_lock);
00548         mutex_destroy(&spa->spa_props_lock);
00549         mutex_destroy(&spa->spa_scrub_lock);
00550         mutex_destroy(&spa->spa_suspend_lock);
00551         mutex_destroy(&spa->spa_vdev_top_lock);
00552 
00553         kmem_free(spa, sizeof (spa_t));
00554 }
00555 
00562 spa_t *
00563 spa_next(spa_t *prev)
00564 {
00565         ASSERT(MUTEX_HELD(&spa_namespace_lock));
00566 
00567         if (prev)
00568                 return (AVL_NEXT(&spa_namespace_avl, prev));
00569         else
00570                 return (avl_first(&spa_namespace_avl));
00571 }
00572 
00573 /*
00574  * ==========================================================================
00575  * SPA refcount functions
00576  * ==========================================================================
00577  */
00578 
00583 void
00584 spa_open_ref(spa_t *spa, void *tag)
00585 {
00586         ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
00587             MUTEX_HELD(&spa_namespace_lock));
00588         (void) refcount_add(&spa->spa_refcount, tag);
00589 }
00590 
00595 void
00596 spa_close(spa_t *spa, void *tag)
00597 {
00598         ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
00599             MUTEX_HELD(&spa_namespace_lock));
00600         (void) refcount_remove(&spa->spa_refcount, tag);
00601 }
00602 
00608 boolean_t
00609 spa_refcount_zero(spa_t *spa)
00610 {
00611         ASSERT(MUTEX_HELD(&spa_namespace_lock));
00612 
00613         return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
00614 }
00615 
00616 /*
00617  * ==========================================================================
00618  * SPA spare and l2cache tracking
00619  * ==========================================================================
00620  */
00621 
00622 /*
00623  * Hot spares and cache devices are tracked using the same code below,
00624  * for 'auxiliary' devices.
00625  */
00626 
00627 typedef struct spa_aux {
00628         uint64_t        aux_guid;
00629         uint64_t        aux_pool;
00630         avl_node_t      aux_avl;
00631         int             aux_count;
00632 } spa_aux_t;
00633 
00634 static int
00635 spa_aux_compare(const void *a, const void *b)
00636 {
00637         const spa_aux_t *sa = a;
00638         const spa_aux_t *sb = b;
00639 
00640         if (sa->aux_guid < sb->aux_guid)
00641                 return (-1);
00642         else if (sa->aux_guid > sb->aux_guid)
00643                 return (1);
00644         else
00645                 return (0);
00646 }
00647 
00648 void
00649 spa_aux_add(vdev_t *vd, avl_tree_t *avl)
00650 {
00651         avl_index_t where;
00652         spa_aux_t search;
00653         spa_aux_t *aux;
00654 
00655         search.aux_guid = vd->vdev_guid;
00656         if ((aux = avl_find(avl, &search, &where)) != NULL) {
00657                 aux->aux_count++;
00658         } else {
00659                 aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
00660                 aux->aux_guid = vd->vdev_guid;
00661                 aux->aux_count = 1;
00662                 avl_insert(avl, aux, where);
00663         }
00664 }
00665 
00666 void
00667 spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
00668 {
00669         spa_aux_t search;
00670         spa_aux_t *aux;
00671         avl_index_t where;
00672 
00673         search.aux_guid = vd->vdev_guid;
00674         aux = avl_find(avl, &search, &where);
00675 
00676         ASSERT(aux != NULL);
00677 
00678         if (--aux->aux_count == 0) {
00679                 avl_remove(avl, aux);
00680                 kmem_free(aux, sizeof (spa_aux_t));
00681         } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
00682                 aux->aux_pool = 0ULL;
00683         }
00684 }
00685 
00686 boolean_t
00687 spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
00688 {
00689         spa_aux_t search, *found;
00690 
00691         search.aux_guid = guid;
00692         found = avl_find(avl, &search, NULL);
00693 
00694         if (pool) {
00695                 if (found)
00696                         *pool = found->aux_pool;
00697                 else
00698                         *pool = 0ULL;
00699         }
00700 
00701         if (refcnt) {
00702                 if (found)
00703                         *refcnt = found->aux_count;
00704                 else
00705                         *refcnt = 0;
00706         }
00707 
00708         return (found != NULL);
00709 }
00710 
00711 void
00712 spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
00713 {
00714         spa_aux_t search, *found;
00715         avl_index_t where;
00716 
00717         search.aux_guid = vd->vdev_guid;
00718         found = avl_find(avl, &search, &where);
00719         ASSERT(found != NULL);
00720         ASSERT(found->aux_pool == 0ULL);
00721 
00722         found->aux_pool = spa_guid(vd->vdev_spa);
00723 }
00724 
00750 static int
00751 spa_spare_compare(const void *a, const void *b)
00752 {
00753         return (spa_aux_compare(a, b));
00754 }
00755 
00756 void
00757 spa_spare_add(vdev_t *vd)
00758 {
00759         mutex_enter(&spa_spare_lock);
00760         ASSERT(!vd->vdev_isspare);
00761         spa_aux_add(vd, &spa_spare_avl);
00762         vd->vdev_isspare = B_TRUE;
00763         mutex_exit(&spa_spare_lock);
00764 }
00765 
00766 void
00767 spa_spare_remove(vdev_t *vd)
00768 {
00769         mutex_enter(&spa_spare_lock);
00770         ASSERT(vd->vdev_isspare);
00771         spa_aux_remove(vd, &spa_spare_avl);
00772         vd->vdev_isspare = B_FALSE;
00773         mutex_exit(&spa_spare_lock);
00774 }
00775 
00776 boolean_t
00777 spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
00778 {
00779         boolean_t found;
00780 
00781         mutex_enter(&spa_spare_lock);
00782         found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
00783         mutex_exit(&spa_spare_lock);
00784 
00785         return (found);
00786 }
00787 
00788 void
00789 spa_spare_activate(vdev_t *vd)
00790 {
00791         mutex_enter(&spa_spare_lock);
00792         ASSERT(vd->vdev_isspare);
00793         spa_aux_activate(vd, &spa_spare_avl);
00794         mutex_exit(&spa_spare_lock);
00795 }
00796 
00806 static int
00807 spa_l2cache_compare(const void *a, const void *b)
00808 {
00809         return (spa_aux_compare(a, b));
00810 }
00811 
00812 void
00813 spa_l2cache_add(vdev_t *vd)
00814 {
00815         mutex_enter(&spa_l2cache_lock);
00816         ASSERT(!vd->vdev_isl2cache);
00817         spa_aux_add(vd, &spa_l2cache_avl);
00818         vd->vdev_isl2cache = B_TRUE;
00819         mutex_exit(&spa_l2cache_lock);
00820 }
00821 
00822 void
00823 spa_l2cache_remove(vdev_t *vd)
00824 {
00825         mutex_enter(&spa_l2cache_lock);
00826         ASSERT(vd->vdev_isl2cache);
00827         spa_aux_remove(vd, &spa_l2cache_avl);
00828         vd->vdev_isl2cache = B_FALSE;
00829         mutex_exit(&spa_l2cache_lock);
00830 }
00831 
00832 boolean_t
00833 spa_l2cache_exists(uint64_t guid, uint64_t *pool)
00834 {
00835         boolean_t found;
00836 
00837         mutex_enter(&spa_l2cache_lock);
00838         found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
00839         mutex_exit(&spa_l2cache_lock);
00840 
00841         return (found);
00842 }
00843 
00844 void
00845 spa_l2cache_activate(vdev_t *vd)
00846 {
00847         mutex_enter(&spa_l2cache_lock);
00848         ASSERT(vd->vdev_isl2cache);
00849         spa_aux_activate(vd, &spa_l2cache_avl);
00850         mutex_exit(&spa_l2cache_lock);
00851 }
00852 
00853 /*
00854  * ==========================================================================
00855  * SPA vdev locking
00856  * ==========================================================================
00857  */
00858 
00865 uint64_t
00866 spa_vdev_enter(spa_t *spa)
00867 {
00868         mutex_enter(&spa->spa_vdev_top_lock);
00869         mutex_enter(&spa_namespace_lock);
00870         return (spa_vdev_config_enter(spa));
00871 }
00872 
00878 uint64_t
00879 spa_vdev_config_enter(spa_t *spa)
00880 {
00881         ASSERT(MUTEX_HELD(&spa_namespace_lock));
00882 
00883         spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
00884 
00885         return (spa_last_synced_txg(spa) + 1);
00886 }
00887 
00892 void
00893 spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
00894 {
00895         ASSERT(MUTEX_HELD(&spa_namespace_lock));
00896 
00897         int config_changed = B_FALSE;
00898 
00899         ASSERT(txg > spa_last_synced_txg(spa));
00900 
00901         spa->spa_pending_vdev = NULL;
00902 
00903         /*
00904          * Reassess the DTLs.
00905          */
00906         vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
00907 
00908         if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
00909                 config_changed = B_TRUE;
00910                 spa->spa_config_generation++;
00911         }
00912 
00913         /*
00914          * Verify the metaslab classes.
00915          */
00916         ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
00917         ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
00918 
00919         spa_config_exit(spa, SCL_ALL, spa);
00920 
00921         /*
00922          * Panic the system if the specified tag requires it.  This
00923          * is useful for ensuring that configurations are updated
00924          * transactionally.
00925          */
00926         if (zio_injection_enabled)
00927                 zio_handle_panic_injection(spa, tag, 0);
00928 
00929         /*
00930          * Note: this txg_wait_synced() is important because it ensures
00931          * that there won't be more than one config change per txg.
00932          * This allows us to use the txg as the generation number.
00933          */
00934         if (error == 0)
00935                 txg_wait_synced(spa->spa_dsl_pool, txg);
00936 
00937         if (vd != NULL) {
00938                 ASSERT(!vd->vdev_detached || vd->vdev_dtl_smo.smo_object == 0);
00939                 spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
00940                 vdev_free(vd);
00941                 spa_config_exit(spa, SCL_ALL, spa);
00942         }
00943 
00944         /*
00945          * If the config changed, update the config cache.
00946          */
00947         if (config_changed)
00948                 spa_config_sync(spa, B_FALSE, B_TRUE);
00949 }
00950 
00957 int
00958 spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
00959 {
00960         spa_vdev_config_exit(spa, vd, txg, error, FTAG);
00961         mutex_exit(&spa_namespace_lock);
00962         mutex_exit(&spa->spa_vdev_top_lock);
00963 
00964         return (error);
00965 }
00966 
00970 void
00971 spa_vdev_state_enter(spa_t *spa, int oplocks)
00972 {
00973         int locks = SCL_STATE_ALL | oplocks;
00974 
00975         /*
00976          * Root pools may need to read of the underlying devfs filesystem
00977          * when opening up a vdev.  Unfortunately if we're holding the
00978          * SCL_ZIO lock it will result in a deadlock when we try to issue
00979          * the read from the root filesystem.  Instead we "prefetch"
00980          * the associated vnodes that we need prior to opening the
00981          * underlying devices and cache them so that we can prevent
00982          * any I/O when we are doing the actual open.
00983          */
00984         if (spa_is_root(spa)) {
00985                 int low = locks & ~(SCL_ZIO - 1);
00986                 int high = locks & ~low;
00987 
00988                 spa_config_enter(spa, high, spa, RW_WRITER);
00989                 vdev_hold(spa->spa_root_vdev);
00990                 spa_config_enter(spa, low, spa, RW_WRITER);
00991         } else {
00992                 spa_config_enter(spa, locks, spa, RW_WRITER);
00993         }
00994         spa->spa_vdev_locks = locks;
00995 }
00996 
00997 int
00998 spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
00999 {
01000         boolean_t config_changed = B_FALSE;
01001 
01002         if (vd != NULL || error == 0)
01003                 vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev,
01004                     0, 0, B_FALSE);
01005 
01006         if (vd != NULL) {
01007                 vdev_state_dirty(vd->vdev_top);
01008                 config_changed = B_TRUE;
01009                 spa->spa_config_generation++;
01010         }
01011 
01012         if (spa_is_root(spa))
01013                 vdev_rele(spa->spa_root_vdev);
01014 
01015         ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
01016         spa_config_exit(spa, spa->spa_vdev_locks, spa);
01017 
01018         /*
01019          * If anything changed, wait for it to sync.  This ensures that,
01020          * from the system administrator's perspective, zpool(1M) commands
01021          * are synchronous.  This is important for things like zpool offline:
01022          * when the command completes, you expect no further I/O from ZFS.
01023          */
01024         if (vd != NULL)
01025                 txg_wait_synced(spa->spa_dsl_pool, 0);
01026 
01027         /*
01028          * If the config changed, update the config cache.
01029          */
01030         if (config_changed) {
01031                 mutex_enter(&spa_namespace_lock);
01032                 spa_config_sync(spa, B_FALSE, B_TRUE);
01033                 mutex_exit(&spa_namespace_lock);
01034         }
01035 
01036         return (error);
01037 }
01038 
01039 /*
01040  * ==========================================================================
01041  * Miscellaneous functions
01042  * ==========================================================================
01043  */
01044 
01045 void
01046 spa_activate_mos_feature(spa_t *spa, const char *feature)
01047 {
01048         (void) nvlist_add_boolean(spa->spa_label_features, feature);
01049         vdev_config_dirty(spa->spa_root_vdev);
01050 }
01051 
01052 void
01053 spa_deactivate_mos_feature(spa_t *spa, const char *feature)
01054 {
01055         (void) nvlist_remove_all(spa->spa_label_features, feature);
01056         vdev_config_dirty(spa->spa_root_vdev);
01057 }
01058 
01062 int
01063 spa_rename(const char *name, const char *newname)
01064 {
01065         spa_t *spa;
01066         int err;
01067 
01068         /*
01069          * Lookup the spa_t and grab the config lock for writing.  We need to
01070          * actually open the pool so that we can sync out the necessary labels.
01071          * It's OK to call spa_open() with the namespace lock held because we
01072          * allow recursive calls for other reasons.
01073          */
01074         mutex_enter(&spa_namespace_lock);
01075         if ((err = spa_open(name, &spa, FTAG)) != 0) {
01076                 mutex_exit(&spa_namespace_lock);
01077                 return (err);
01078         }
01079 
01080         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
01081 
01082         avl_remove(&spa_namespace_avl, spa);
01083         (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
01084         avl_add(&spa_namespace_avl, spa);
01085 
01086         /*
01087          * Sync all labels to disk with the new names by marking the root vdev
01088          * dirty and waiting for it to sync.  It will pick up the new pool name
01089          * during the sync.
01090          */
01091         vdev_config_dirty(spa->spa_root_vdev);
01092 
01093         spa_config_exit(spa, SCL_ALL, FTAG);
01094 
01095         txg_wait_synced(spa->spa_dsl_pool, 0);
01096 
01097         /*
01098          * Sync the updated config cache.
01099          */
01100         spa_config_sync(spa, B_FALSE, B_TRUE);
01101 
01102         spa_close(spa, FTAG);
01103 
01104         mutex_exit(&spa_namespace_lock);
01105 
01106         return (0);
01107 }
01108 
01114 spa_t *
01115 spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
01116 {
01117         spa_t *spa;
01118         avl_tree_t *t = &spa_namespace_avl;
01119 
01120         ASSERT(MUTEX_HELD(&spa_namespace_lock));
01121 
01122         for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
01123                 if (spa->spa_state == POOL_STATE_UNINITIALIZED)
01124                         continue;
01125                 if (spa->spa_root_vdev == NULL)
01126                         continue;
01127                 if (spa_guid(spa) == pool_guid) {
01128                         if (device_guid == 0)
01129                                 break;
01130 
01131                         if (vdev_lookup_by_guid(spa->spa_root_vdev,
01132                             device_guid) != NULL)
01133                                 break;
01134 
01135                         /*
01136                          * Check any devices we may be in the process of adding.
01137                          */
01138                         if (spa->spa_pending_vdev) {
01139                                 if (vdev_lookup_by_guid(spa->spa_pending_vdev,
01140                                     device_guid) != NULL)
01141                                         break;
01142                         }
01143                 }
01144         }
01145 
01146         return (spa);
01147 }
01148 
01152 boolean_t
01153 spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
01154 {
01155         return (spa_by_guid(pool_guid, device_guid) != NULL);
01156 }
01157 
01158 char *
01159 spa_strdup(const char *s)
01160 {
01161         size_t len;
01162         char *new;
01163 
01164         len = strlen(s);
01165         new = kmem_alloc(len + 1, KM_SLEEP);
01166         bcopy(s, new, len);
01167         new[len] = '\0';
01168 
01169         return (new);
01170 }
01171 
01172 void
01173 spa_strfree(char *s)
01174 {
01175         kmem_free(s, strlen(s) + 1);
01176 }
01177 
01178 uint64_t
01179 spa_get_random(uint64_t range)
01180 {
01181         uint64_t r;
01182 
01183         ASSERT(range != 0);
01184 
01185         (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
01186 
01187         return (r % range);
01188 }
01189 
01190 uint64_t
01191 spa_generate_guid(spa_t *spa)
01192 {
01193         uint64_t guid = spa_get_random(-1ULL);
01194 
01195         if (spa != NULL) {
01196                 while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
01197                         guid = spa_get_random(-1ULL);
01198         } else {
01199                 while (guid == 0 || spa_guid_exists(guid, 0))
01200                         guid = spa_get_random(-1ULL);
01201         }
01202 
01203         return (guid);
01204 }
01205 
01206 void
01207 sprintf_blkptr(char *buf, const blkptr_t *bp)
01208 {
01209         char type[256];
01210         char *checksum = NULL;
01211         char *compress = NULL;
01212 
01213         if (bp != NULL) {
01214                 if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
01215                         dmu_object_byteswap_t bswap =
01216                             DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
01217                         (void) snprintf(type, sizeof (type), "bswap %s %s",
01218                             DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
01219                             "metadata" : "data",
01220                             dmu_ot_byteswap[bswap].ob_name);
01221                 } else {
01222                         (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
01223                             sizeof (type));
01224                 }
01225                 checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
01226                 compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
01227         }
01228 
01229         SPRINTF_BLKPTR(snprintf, ' ', buf, bp, type, checksum, compress);
01230 }
01231 
01232 void
01233 spa_freeze(spa_t *spa)
01234 {
01235         uint64_t freeze_txg = 0;
01236 
01237         spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
01238         if (spa->spa_freeze_txg == UINT64_MAX) {
01239                 freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
01240                 spa->spa_freeze_txg = freeze_txg;
01241         }
01242         spa_config_exit(spa, SCL_ALL, FTAG);
01243         if (freeze_txg != 0)
01244                 txg_wait_synced(spa_get_dsl(spa), freeze_txg);
01245 }
01246 
01247 void
01248 zfs_panic_recover(const char *fmt, ...)
01249 {
01250         va_list adx;
01251 
01252         va_start(adx, fmt);
01253         vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
01254         va_end(adx);
01255 }
01256 
01261 uint64_t
01262 zfs_strtonum(const char *str, char **nptr)
01263 {
01264         uint64_t val = 0;
01265         char c;
01266         int digit;
01267 
01268         while ((c = *str) != '\0') {
01269                 if (c >= '0' && c <= '9')
01270                         digit = c - '0';
01271                 else if (c >= 'a' && c <= 'f')
01272                         digit = 10 + c - 'a';
01273                 else
01274                         break;
01275 
01276                 val *= 16;
01277                 val += digit;
01278 
01279                 str++;
01280         }
01281 
01282         if (nptr)
01283                 *nptr = (char *)str;
01284 
01285         return (val);
01286 }
01287 
01288 /*
01289  * ==========================================================================
01290  * Accessor functions
01291  * ==========================================================================
01292  */
01293 
01294 boolean_t
01295 spa_shutting_down(spa_t *spa)
01296 {
01297         return (spa->spa_async_suspended);
01298 }
01299 
01300 dsl_pool_t *
01301 spa_get_dsl(spa_t *spa)
01302 {
01303         return (spa->spa_dsl_pool);
01304 }
01305 
01306 boolean_t
01307 spa_is_initializing(spa_t *spa)
01308 {
01309         return (spa->spa_is_initializing);
01310 }
01311 
01312 blkptr_t *
01313 spa_get_rootblkptr(spa_t *spa)
01314 {
01315         return (&spa->spa_ubsync.ub_rootbp);
01316 }
01317 
01318 void
01319 spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
01320 {
01321         spa->spa_uberblock.ub_rootbp = *bp;
01322 }
01323 
01324 void
01325 spa_altroot(spa_t *spa, char *buf, size_t buflen)
01326 {
01327         if (spa->spa_root == NULL)
01328                 buf[0] = '\0';
01329         else
01330                 (void) strncpy(buf, spa->spa_root, buflen);
01331 }
01332 
01333 int
01334 spa_sync_pass(spa_t *spa)
01335 {
01336         return (spa->spa_sync_pass);
01337 }
01338 
01339 char *
01340 spa_name(spa_t *spa)
01341 {
01342         return (spa->spa_name);
01343 }
01344 
01345 uint64_t
01346 spa_guid(spa_t *spa)
01347 {
01348         dsl_pool_t *dp = spa_get_dsl(spa);
01349         uint64_t guid;
01350 
01351         /*
01352          * If we fail to parse the config during spa_load(), we can go through
01353          * the error path (which posts an ereport) and end up here with no root
01354          * vdev.  We stash the original pool guid in 'spa_config_guid' to handle
01355          * this case.
01356          */
01357         if (spa->spa_root_vdev == NULL)
01358                 return (spa->spa_config_guid);
01359 
01360         guid = spa->spa_last_synced_guid != 0 ?
01361             spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
01362 
01363         /*
01364          * Return the most recently synced out guid unless we're
01365          * in syncing context.
01366          */
01367         if (dp && dsl_pool_sync_context(dp))
01368                 return (spa->spa_root_vdev->vdev_guid);
01369         else
01370                 return (guid);
01371 }
01372 
01373 uint64_t
01374 spa_load_guid(spa_t *spa)
01375 {
01376         /*
01377          * This is a GUID that exists solely as a reference for the
01378          * purposes of the arc.  It is generated at load time, and
01379          * is never written to persistent storage.
01380          */
01381         return (spa->spa_load_guid);
01382 }
01383 
01384 uint64_t
01385 spa_last_synced_txg(spa_t *spa)
01386 {
01387         return (spa->spa_ubsync.ub_txg);
01388 }
01389 
01390 uint64_t
01391 spa_first_txg(spa_t *spa)
01392 {
01393         return (spa->spa_first_txg);
01394 }
01395 
01396 uint64_t
01397 spa_syncing_txg(spa_t *spa)
01398 {
01399         return (spa->spa_syncing_txg);
01400 }
01401 
01402 pool_state_t
01403 spa_state(spa_t *spa)
01404 {
01405         return (spa->spa_state);
01406 }
01407 
01408 spa_load_state_t
01409 spa_load_state(spa_t *spa)
01410 {
01411         return (spa->spa_load_state);
01412 }
01413 
01414 uint64_t
01415 spa_freeze_txg(spa_t *spa)
01416 {
01417         return (spa->spa_freeze_txg);
01418 }
01419 
01420 /* ARGSUSED */
01421 uint64_t
01422 spa_get_asize(spa_t *spa, uint64_t lsize)
01423 {
01424         /*
01425          * The worst case is single-sector max-parity RAID-Z blocks, in which
01426          * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
01427          * times the size; so just assume that.  Add to this the fact that
01428          * we can have up to 3 DVAs per bp, and one more factor of 2 because
01429          * the block may be dittoed with up to 3 DVAs by ddt_sync().
01430          */
01431         return (lsize * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2);
01432 }
01433 
01434 uint64_t
01435 spa_get_dspace(spa_t *spa)
01436 {
01437         return (spa->spa_dspace);
01438 }
01439 
01440 void
01441 spa_update_dspace(spa_t *spa)
01442 {
01443         spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
01444             ddt_get_dedup_dspace(spa);
01445 }
01446 
01451 uint8_t
01452 spa_get_failmode(spa_t *spa)
01453 {
01454         return (spa->spa_failmode);
01455 }
01456 
01457 boolean_t
01458 spa_suspended(spa_t *spa)
01459 {
01460         return (spa->spa_suspended);
01461 }
01462 
01463 uint64_t
01464 spa_version(spa_t *spa)
01465 {
01466         return (spa->spa_ubsync.ub_version);
01467 }
01468 
01469 boolean_t
01470 spa_deflate(spa_t *spa)
01471 {
01472         return (spa->spa_deflate);
01473 }
01474 
01475 metaslab_class_t *
01476 spa_normal_class(spa_t *spa)
01477 {
01478         return (spa->spa_normal_class);
01479 }
01480 
01481 metaslab_class_t *
01482 spa_log_class(spa_t *spa)
01483 {
01484         return (spa->spa_log_class);
01485 }
01486 
01487 int
01488 spa_max_replication(spa_t *spa)
01489 {
01490         /*
01491          * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
01492          * handle BPs with more than one DVA allocated.  Set our max
01493          * replication level accordingly.
01494          */
01495         if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
01496                 return (1);
01497         return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
01498 }
01499 
01500 int
01501 spa_prev_software_version(spa_t *spa)
01502 {
01503         return (spa->spa_prev_software_version);
01504 }
01505 
01506 uint64_t
01507 dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
01508 {
01509         uint64_t asize = DVA_GET_ASIZE(dva);
01510         uint64_t dsize = asize;
01511 
01512         ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
01513 
01514         if (asize != 0 && spa->spa_deflate) {
01515                 vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
01516                 dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio;
01517         }
01518 
01519         return (dsize);
01520 }
01521 
01522 uint64_t
01523 bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
01524 {
01525         uint64_t dsize = 0;
01526 
01527         for (int d = 0; d < SPA_DVAS_PER_BP; d++)
01528                 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
01529 
01530         return (dsize);
01531 }
01532 
01533 uint64_t
01534 bp_get_dsize(spa_t *spa, const blkptr_t *bp)
01535 {
01536         uint64_t dsize = 0;
01537 
01538         spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
01539 
01540         for (int d = 0; d < SPA_DVAS_PER_BP; d++)
01541                 dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
01542 
01543         spa_config_exit(spa, SCL_VDEV, FTAG);
01544 
01545         return (dsize);
01546 }
01547 
01548 /*
01549  * ==========================================================================
01550  * Initialization and Termination
01551  * ==========================================================================
01552  */
01553 
01554 static int
01555 spa_name_compare(const void *a1, const void *a2)
01556 {
01557         const spa_t *s1 = a1;
01558         const spa_t *s2 = a2;
01559         int s;
01560 
01561         s = strcmp(s1->spa_name, s2->spa_name);
01562         if (s > 0)
01563                 return (1);
01564         if (s < 0)
01565                 return (-1);
01566         return (0);
01567 }
01568 
01569 int
01570 spa_busy(void)
01571 {
01572         return (spa_active_count);
01573 }
01574 
01575 void
01576 spa_boot_init()
01577 {
01578         spa_config_load();
01579 }
01580 
01581 void
01582 spa_init(int mode)
01583 {
01584         mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
01585         mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
01586         mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
01587         cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
01588 
01589         avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
01590             offsetof(spa_t, spa_avl));
01591 
01592         avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
01593             offsetof(spa_aux_t, aux_avl));
01594 
01595         avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
01596             offsetof(spa_aux_t, aux_avl));
01597 
01598         spa_mode_global = mode;
01599 
01600 #ifdef illumos
01601 #ifndef _KERNEL
01602         if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
01603                 arc_procfd = open("/proc/self/ctl", O_WRONLY);
01604                 if (arc_procfd == -1) {
01605                         perror("could not enable watchpoints: "
01606                             "opening /proc/self/ctl failed: ");
01607                 } else {
01608                         arc_watch = B_TRUE;
01609                 }
01610         }
01611 #endif
01612 #endif /* illumos */
01613         refcount_sysinit();
01614         unique_init();
01615         zio_init();
01616         dmu_init();
01617         zil_init();
01618         vdev_cache_stat_init();
01619         zfs_prop_init();
01620         zpool_prop_init();
01621         zpool_feature_init();
01622         spa_config_load();
01623         l2arc_start();
01624 }
01625 
01626 void
01627 spa_fini(void)
01628 {
01629         l2arc_stop();
01630 
01631         spa_evict_all();
01632 
01633         vdev_cache_stat_fini();
01634         zil_fini();
01635         dmu_fini();
01636         zio_fini();
01637         unique_fini();
01638         refcount_fini();
01639 
01640         avl_destroy(&spa_namespace_avl);
01641         avl_destroy(&spa_spare_avl);
01642         avl_destroy(&spa_l2cache_avl);
01643 
01644         cv_destroy(&spa_namespace_cv);
01645         mutex_destroy(&spa_namespace_lock);
01646         mutex_destroy(&spa_spare_lock);
01647         mutex_destroy(&spa_l2cache_lock);
01648 }
01649 
01655 boolean_t
01656 spa_has_slogs(spa_t *spa)
01657 {
01658         return (spa->spa_log_class->mc_rotor != NULL);
01659 }
01660 
01661 spa_log_state_t
01662 spa_get_log_state(spa_t *spa)
01663 {
01664         return (spa->spa_log_state);
01665 }
01666 
01667 void
01668 spa_set_log_state(spa_t *spa, spa_log_state_t state)
01669 {
01670         spa->spa_log_state = state;
01671 }
01672 
01673 boolean_t
01674 spa_is_root(spa_t *spa)
01675 {
01676         return (spa->spa_is_root);
01677 }
01678 
01679 boolean_t
01680 spa_writeable(spa_t *spa)
01681 {
01682         return (!!(spa->spa_mode & FWRITE));
01683 }
01684 
01685 int
01686 spa_mode(spa_t *spa)
01687 {
01688         return (spa->spa_mode);
01689 }
01690 
01691 uint64_t
01692 spa_bootfs(spa_t *spa)
01693 {
01694         return (spa->spa_bootfs);
01695 }
01696 
01697 uint64_t
01698 spa_delegation(spa_t *spa)
01699 {
01700         return (spa->spa_delegation);
01701 }
01702 
01703 objset_t *
01704 spa_meta_objset(spa_t *spa)
01705 {
01706         return (spa->spa_meta_objset);
01707 }
01708 
01709 enum zio_checksum
01710 spa_dedup_checksum(spa_t *spa)
01711 {
01712         return (spa->spa_dedup_checksum);
01713 }
01714 
01718 void
01719 spa_scan_stat_init(spa_t *spa)
01720 {
01721         /* data not stored on disk */
01722         spa->spa_scan_pass_start = gethrestime_sec();
01723         spa->spa_scan_pass_exam = 0;
01724         vdev_scan_stat_init(spa->spa_root_vdev);
01725 }
01726 
01730 int
01731 spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
01732 {
01733         dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
01734 
01735         if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
01736                 return (ENOENT);
01737         bzero(ps, sizeof (pool_scan_stat_t));
01738 
01739         /* data stored on disk */
01740         ps->pss_func = scn->scn_phys.scn_func;
01741         ps->pss_start_time = scn->scn_phys.scn_start_time;
01742         ps->pss_end_time = scn->scn_phys.scn_end_time;
01743         ps->pss_to_examine = scn->scn_phys.scn_to_examine;
01744         ps->pss_examined = scn->scn_phys.scn_examined;
01745         ps->pss_to_process = scn->scn_phys.scn_to_process;
01746         ps->pss_processed = scn->scn_phys.scn_processed;
01747         ps->pss_errors = scn->scn_phys.scn_errors;
01748         ps->pss_state = scn->scn_phys.scn_state;
01749 
01750         /* data not stored on disk */
01751         ps->pss_pass_start = spa->spa_scan_pass_start;
01752         ps->pss_pass_exam = spa->spa_scan_pass_exam;
01753 
01754         return (0);
01755 }
01756 
01757 boolean_t
01758 spa_debug_enabled(spa_t *spa)
01759 {
01760         return (spa->spa_debug);
01761 }
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Defines