FreeBSD ZFS
The Zettabyte File System

dnode.c

Go to the documentation of this file.
00001 /*
00002  * CDDL HEADER START
00003  *
00004  * The contents of this file are subject to the terms of the
00005  * Common Development and Distribution License (the "License").
00006  * You may not use this file except in compliance with the License.
00007  *
00008  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
00009  * or http://www.opensolaris.org/os/licensing.
00010  * See the License for the specific language governing permissions
00011  * and limitations under the License.
00012  *
00013  * When distributing Covered Code, include this CDDL HEADER in each
00014  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
00015  * If applicable, add the following below this CDDL HEADER, with the
00016  * fields enclosed by brackets "[]" replaced with your own identifying
00017  * information: Portions Copyright [yyyy] [name of copyright owner]
00018  *
00019  * CDDL HEADER END
00020  */
00021 /*
00022  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
00023  * Copyright (c) 2012 by Delphix. All rights reserved.
00024  */
00025 
00026 #include <sys/zfs_context.h>
00027 #include <sys/dbuf.h>
00028 #include <sys/dnode.h>
00029 #include <sys/dmu.h>
00030 #include <sys/dmu_impl.h>
00031 #include <sys/dmu_tx.h>
00032 #include <sys/dmu_objset.h>
00033 #include <sys/dsl_dir.h>
00034 #include <sys/dsl_dataset.h>
00035 #include <sys/spa.h>
00036 #include <sys/zio.h>
00037 #include <sys/dmu_zfetch.h>
00038 
00039 static int free_range_compar(const void *node1, const void *node2);
00040 
00041 static kmem_cache_t *dnode_cache;
00046 #ifdef  DEBUG
00047 #define DNODE_STATS
00048 #endif  /* DEBUG */
00049 
00050 #ifdef  DNODE_STATS
00051 #define DNODE_STAT_ADD(stat)                    ((stat)++)
00052 #else
00053 #define DNODE_STAT_ADD(stat)                    /* nothing */
00054 #endif  /* DNODE_STATS */
00055 
00056 static dnode_phys_t dnode_phys_zero;
00057 
00058 int zfs_default_bs = SPA_MINBLOCKSHIFT;
00059 int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
00060 
00061 #ifdef sun
00062 static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
00063 #endif
00064 
00065 /* ARGSUSED */
00066 static int
00067 dnode_cons(void *arg, void *unused, int kmflag)
00068 {
00069         dnode_t *dn = arg;
00070         int i;
00071 
00072         rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL);
00073         mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
00074         mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
00075         cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
00076 
00077         refcount_create(&dn->dn_holds);
00078         refcount_create(&dn->dn_tx_holds);
00079         list_link_init(&dn->dn_link);
00080 
00081         bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
00082         bzero(&dn->dn_next_nlevels[0], sizeof (dn->dn_next_nlevels));
00083         bzero(&dn->dn_next_indblkshift[0], sizeof (dn->dn_next_indblkshift));
00084         bzero(&dn->dn_next_bonustype[0], sizeof (dn->dn_next_bonustype));
00085         bzero(&dn->dn_rm_spillblk[0], sizeof (dn->dn_rm_spillblk));
00086         bzero(&dn->dn_next_bonuslen[0], sizeof (dn->dn_next_bonuslen));
00087         bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz));
00088 
00089         for (i = 0; i < TXG_SIZE; i++) {
00090                 list_link_init(&dn->dn_dirty_link[i]);
00091                 avl_create(&dn->dn_ranges[i], free_range_compar,
00092                     sizeof (free_range_t),
00093                     offsetof(struct free_range, fr_node));
00094                 list_create(&dn->dn_dirty_records[i],
00095                     sizeof (dbuf_dirty_record_t),
00096                     offsetof(dbuf_dirty_record_t, dr_dirty_node));
00097         }
00098 
00099         dn->dn_allocated_txg = 0;
00100         dn->dn_free_txg = 0;
00101         dn->dn_assigned_txg = 0;
00102         dn->dn_dirtyctx = 0;
00103         dn->dn_dirtyctx_firstset = NULL;
00104         dn->dn_bonus = NULL;
00105         dn->dn_have_spill = B_FALSE;
00106         dn->dn_zio = NULL;
00107         dn->dn_oldused = 0;
00108         dn->dn_oldflags = 0;
00109         dn->dn_olduid = 0;
00110         dn->dn_oldgid = 0;
00111         dn->dn_newuid = 0;
00112         dn->dn_newgid = 0;
00113         dn->dn_id_flags = 0;
00114 
00115         dn->dn_dbufs_count = 0;
00116         list_create(&dn->dn_dbufs, sizeof (dmu_buf_impl_t),
00117             offsetof(dmu_buf_impl_t, db_link));
00118 
00119         dn->dn_moved = 0;
00120         POINTER_INVALIDATE(&dn->dn_objset);
00121         return (0);
00122 }
00123 
00124 /* ARGSUSED */
00125 static void
00126 dnode_dest(void *arg, void *unused)
00127 {
00128         int i;
00129         dnode_t *dn = arg;
00130 
00131         rw_destroy(&dn->dn_struct_rwlock);
00132         mutex_destroy(&dn->dn_mtx);
00133         mutex_destroy(&dn->dn_dbufs_mtx);
00134         cv_destroy(&dn->dn_notxholds);
00135         refcount_destroy(&dn->dn_holds);
00136         refcount_destroy(&dn->dn_tx_holds);
00137         ASSERT(!list_link_active(&dn->dn_link));
00138 
00139         for (i = 0; i < TXG_SIZE; i++) {
00140                 ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
00141                 avl_destroy(&dn->dn_ranges[i]);
00142                 list_destroy(&dn->dn_dirty_records[i]);
00143                 ASSERT0(dn->dn_next_nblkptr[i]);
00144                 ASSERT0(dn->dn_next_nlevels[i]);
00145                 ASSERT0(dn->dn_next_indblkshift[i]);
00146                 ASSERT0(dn->dn_next_bonustype[i]);
00147                 ASSERT0(dn->dn_rm_spillblk[i]);
00148                 ASSERT0(dn->dn_next_bonuslen[i]);
00149                 ASSERT0(dn->dn_next_blksz[i]);
00150         }
00151 
00152         ASSERT0(dn->dn_allocated_txg);
00153         ASSERT0(dn->dn_free_txg);
00154         ASSERT0(dn->dn_assigned_txg);
00155         ASSERT0(dn->dn_dirtyctx);
00156         ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
00157         ASSERT3P(dn->dn_bonus, ==, NULL);
00158         ASSERT(!dn->dn_have_spill);
00159         ASSERT3P(dn->dn_zio, ==, NULL);
00160         ASSERT0(dn->dn_oldused);
00161         ASSERT0(dn->dn_oldflags);
00162         ASSERT0(dn->dn_olduid);
00163         ASSERT0(dn->dn_oldgid);
00164         ASSERT0(dn->dn_newuid);
00165         ASSERT0(dn->dn_newgid);
00166         ASSERT0(dn->dn_id_flags);
00167 
00168         ASSERT0(dn->dn_dbufs_count);
00169         list_destroy(&dn->dn_dbufs);
00170 }
00171 
00172 void
00173 dnode_init(void)
00174 {
00175         ASSERT(dnode_cache == NULL);
00176         dnode_cache = kmem_cache_create("dnode_t",
00177             sizeof (dnode_t),
00178             0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
00179         kmem_cache_set_move(dnode_cache, dnode_move);
00180 }
00181 
00182 void
00183 dnode_fini(void)
00184 {
00185         kmem_cache_destroy(dnode_cache);
00186         dnode_cache = NULL;
00187 }
00188 
00189 
00190 #ifdef ZFS_DEBUG
00191 void
00192 dnode_verify(dnode_t *dn)
00193 {
00194         int drop_struct_lock = FALSE;
00195 
00196         ASSERT(dn->dn_phys);
00197         ASSERT(dn->dn_objset);
00198         ASSERT(dn->dn_handle->dnh_dnode == dn);
00199 
00200         ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
00201 
00202         if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
00203                 return;
00204 
00205         if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
00206                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
00207                 drop_struct_lock = TRUE;
00208         }
00209         if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
00210                 int i;
00211                 ASSERT3U(dn->dn_indblkshift, >=, 0);
00212                 ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
00213                 if (dn->dn_datablkshift) {
00214                         ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
00215                         ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
00216                         ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
00217                 }
00218                 ASSERT3U(dn->dn_nlevels, <=, 30);
00219                 ASSERT(DMU_OT_IS_VALID(dn->dn_type));
00220                 ASSERT3U(dn->dn_nblkptr, >=, 1);
00221                 ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
00222                 ASSERT3U(dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
00223                 ASSERT3U(dn->dn_datablksz, ==,
00224                     dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
00225                 ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
00226                 ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
00227                     dn->dn_bonuslen, <=, DN_MAX_BONUSLEN);
00228                 for (i = 0; i < TXG_SIZE; i++) {
00229                         ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
00230                 }
00231         }
00232         if (dn->dn_phys->dn_type != DMU_OT_NONE)
00233                 ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
00234         ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL);
00235         if (dn->dn_dbuf != NULL) {
00236                 ASSERT3P(dn->dn_phys, ==,
00237                     (dnode_phys_t *)dn->dn_dbuf->db.db_data +
00238                     (dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
00239         }
00240         if (drop_struct_lock)
00241                 rw_exit(&dn->dn_struct_rwlock);
00242 }
00243 #endif
00244 
00245 void
00246 dnode_byteswap(dnode_phys_t *dnp)
00247 {
00248         uint64_t *buf64 = (void*)&dnp->dn_blkptr;
00249         int i;
00250 
00251         if (dnp->dn_type == DMU_OT_NONE) {
00252                 bzero(dnp, sizeof (dnode_phys_t));
00253                 return;
00254         }
00255 
00256         dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
00257         dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
00258         dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
00259         dnp->dn_used = BSWAP_64(dnp->dn_used);
00260 
00261         /*
00262          * dn_nblkptr is only one byte, so it's OK to read it in either
00263          * byte order.  We can't read dn_bouslen.
00264          */
00265         ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
00266         ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
00267         for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
00268                 buf64[i] = BSWAP_64(buf64[i]);
00269 
00270         /*
00271          * OK to check dn_bonuslen for zero, because it won't matter if
00272          * we have the wrong byte order.  This is necessary because the
00273          * dnode dnode is smaller than a regular dnode.
00274          */
00275         if (dnp->dn_bonuslen != 0) {
00276                 /*
00277                  * Note that the bonus length calculated here may be
00278                  * longer than the actual bonus buffer.  This is because
00279                  * we always put the bonus buffer after the last block
00280                  * pointer (instead of packing it against the end of the
00281                  * dnode buffer).
00282                  */
00283                 int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t);
00284                 size_t len = DN_MAX_BONUSLEN - off;
00285                 ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype));
00286                 dmu_object_byteswap_t byteswap =
00287                     DMU_OT_BYTESWAP(dnp->dn_bonustype);
00288                 dmu_ot_byteswap[byteswap].ob_func(dnp->dn_bonus + off, len);
00289         }
00290 
00291         /* Swap SPILL block if we have one */
00292         if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
00293                 byteswap_uint64_array(&dnp->dn_spill, sizeof (blkptr_t));
00294 
00295 }
00296 
00297 void
00298 dnode_buf_byteswap(void *vbuf, size_t size)
00299 {
00300         dnode_phys_t *buf = vbuf;
00301         int i;
00302 
00303         ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
00304         ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
00305 
00306         size >>= DNODE_SHIFT;
00307         for (i = 0; i < size; i++) {
00308                 dnode_byteswap(buf);
00309                 buf++;
00310         }
00311 }
00312 
00313 static int
00314 free_range_compar(const void *node1, const void *node2)
00315 {
00316         const free_range_t *rp1 = node1;
00317         const free_range_t *rp2 = node2;
00318 
00319         if (rp1->fr_blkid < rp2->fr_blkid)
00320                 return (-1);
00321         else if (rp1->fr_blkid > rp2->fr_blkid)
00322                 return (1);
00323         else return (0);
00324 }
00325 
00326 void
00327 dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
00328 {
00329         ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
00330 
00331         dnode_setdirty(dn, tx);
00332         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
00333         ASSERT3U(newsize, <=, DN_MAX_BONUSLEN -
00334             (dn->dn_nblkptr-1) * sizeof (blkptr_t));
00335         dn->dn_bonuslen = newsize;
00336         if (newsize == 0)
00337                 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
00338         else
00339                 dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
00340         rw_exit(&dn->dn_struct_rwlock);
00341 }
00342 
00343 void
00344 dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
00345 {
00346         ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
00347         dnode_setdirty(dn, tx);
00348         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
00349         dn->dn_bonustype = newtype;
00350         dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
00351         rw_exit(&dn->dn_struct_rwlock);
00352 }
00353 
00354 void
00355 dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
00356 {
00357         ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
00358         ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
00359         dnode_setdirty(dn, tx);
00360         dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK;
00361         dn->dn_have_spill = B_FALSE;
00362 }
00363 
00364 static void
00365 dnode_setdblksz(dnode_t *dn, int size)
00366 {
00367         ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE));
00368         ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
00369         ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
00370         ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
00371             1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
00372         dn->dn_datablksz = size;
00373         dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
00374         dn->dn_datablkshift = ISP2(size) ? highbit(size - 1) : 0;
00375 }
00376 
00377 static dnode_t *
00378 dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
00379     uint64_t object, dnode_handle_t *dnh)
00380 {
00381         dnode_t *dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
00382 
00383         ASSERT(!POINTER_IS_VALID(dn->dn_objset));
00384         dn->dn_moved = 0;
00385 
00386         /*
00387          * Defer setting dn_objset until the dnode is ready to be a candidate
00388          * for the dnode_move() callback.
00389          */
00390         dn->dn_object = object;
00391         dn->dn_dbuf = db;
00392         dn->dn_handle = dnh;
00393         dn->dn_phys = dnp;
00394 
00395         if (dnp->dn_datablkszsec) {
00396                 dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
00397         } else {
00398                 dn->dn_datablksz = 0;
00399                 dn->dn_datablkszsec = 0;
00400                 dn->dn_datablkshift = 0;
00401         }
00402         dn->dn_indblkshift = dnp->dn_indblkshift;
00403         dn->dn_nlevels = dnp->dn_nlevels;
00404         dn->dn_type = dnp->dn_type;
00405         dn->dn_nblkptr = dnp->dn_nblkptr;
00406         dn->dn_checksum = dnp->dn_checksum;
00407         dn->dn_compress = dnp->dn_compress;
00408         dn->dn_bonustype = dnp->dn_bonustype;
00409         dn->dn_bonuslen = dnp->dn_bonuslen;
00410         dn->dn_maxblkid = dnp->dn_maxblkid;
00411         dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0);
00412         dn->dn_id_flags = 0;
00413 
00414         dmu_zfetch_init(&dn->dn_zfetch, dn);
00415 
00416         ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
00417 
00418         mutex_enter(&os->os_lock);
00419         list_insert_head(&os->os_dnodes, dn);
00420         membar_producer();
00421         /*
00422          * Everything else must be valid before assigning dn_objset makes the
00423          * dnode eligible for dnode_move().
00424          */
00425         dn->dn_objset = os;
00426         mutex_exit(&os->os_lock);
00427 
00428         arc_space_consume(sizeof (dnode_t), ARC_SPACE_OTHER);
00429         return (dn);
00430 }
00431 
00435 static void
00436 dnode_destroy(dnode_t *dn)
00437 {
00438         objset_t *os = dn->dn_objset;
00439 
00440         ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
00441 
00442         mutex_enter(&os->os_lock);
00443         POINTER_INVALIDATE(&dn->dn_objset);
00444         list_remove(&os->os_dnodes, dn);
00445         mutex_exit(&os->os_lock);
00446 
00447         /* the dnode can no longer move, so we can release the handle */
00448         zrl_remove(&dn->dn_handle->dnh_zrlock);
00449 
00450         dn->dn_allocated_txg = 0;
00451         dn->dn_free_txg = 0;
00452         dn->dn_assigned_txg = 0;
00453 
00454         dn->dn_dirtyctx = 0;
00455         if (dn->dn_dirtyctx_firstset != NULL) {
00456                 kmem_free(dn->dn_dirtyctx_firstset, 1);
00457                 dn->dn_dirtyctx_firstset = NULL;
00458         }
00459         if (dn->dn_bonus != NULL) {
00460                 mutex_enter(&dn->dn_bonus->db_mtx);
00461                 dbuf_evict(dn->dn_bonus);
00462                 dn->dn_bonus = NULL;
00463         }
00464         dn->dn_zio = NULL;
00465 
00466         dn->dn_have_spill = B_FALSE;
00467         dn->dn_oldused = 0;
00468         dn->dn_oldflags = 0;
00469         dn->dn_olduid = 0;
00470         dn->dn_oldgid = 0;
00471         dn->dn_newuid = 0;
00472         dn->dn_newgid = 0;
00473         dn->dn_id_flags = 0;
00474 
00475         dmu_zfetch_rele(&dn->dn_zfetch);
00476         kmem_cache_free(dnode_cache, dn);
00477         arc_space_return(sizeof (dnode_t), ARC_SPACE_OTHER);
00478 }
00479 
00480 void
00481 dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
00482     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
00483 {
00484         int i;
00485 
00486         if (blocksize == 0)
00487                 blocksize = 1 << zfs_default_bs;
00488         else if (blocksize > SPA_MAXBLOCKSIZE)
00489                 blocksize = SPA_MAXBLOCKSIZE;
00490         else
00491                 blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
00492 
00493         if (ibs == 0)
00494                 ibs = zfs_default_ibs;
00495 
00496         ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
00497 
00498         dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d\n", dn->dn_objset,
00499             dn->dn_object, tx->tx_txg, blocksize, ibs);
00500 
00501         ASSERT(dn->dn_type == DMU_OT_NONE);
00502         ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
00503         ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
00504         ASSERT(ot != DMU_OT_NONE);
00505         ASSERT(DMU_OT_IS_VALID(ot));
00506         ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
00507             (bonustype == DMU_OT_SA && bonuslen == 0) ||
00508             (bonustype != DMU_OT_NONE && bonuslen != 0));
00509         ASSERT(DMU_OT_IS_VALID(bonustype));
00510         ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
00511         ASSERT(dn->dn_type == DMU_OT_NONE);
00512         ASSERT0(dn->dn_maxblkid);
00513         ASSERT0(dn->dn_allocated_txg);
00514         ASSERT0(dn->dn_assigned_txg);
00515         ASSERT(refcount_is_zero(&dn->dn_tx_holds));
00516         ASSERT3U(refcount_count(&dn->dn_holds), <=, 1);
00517         ASSERT3P(list_head(&dn->dn_dbufs), ==, NULL);
00518 
00519         for (i = 0; i < TXG_SIZE; i++) {
00520                 ASSERT0(dn->dn_next_nblkptr[i]);
00521                 ASSERT0(dn->dn_next_nlevels[i]);
00522                 ASSERT0(dn->dn_next_indblkshift[i]);
00523                 ASSERT0(dn->dn_next_bonuslen[i]);
00524                 ASSERT0(dn->dn_next_bonustype[i]);
00525                 ASSERT0(dn->dn_rm_spillblk[i]);
00526                 ASSERT0(dn->dn_next_blksz[i]);
00527                 ASSERT(!list_link_active(&dn->dn_dirty_link[i]));
00528                 ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
00529                 ASSERT0(avl_numnodes(&dn->dn_ranges[i]));
00530         }
00531 
00532         dn->dn_type = ot;
00533         dnode_setdblksz(dn, blocksize);
00534         dn->dn_indblkshift = ibs;
00535         dn->dn_nlevels = 1;
00536         if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
00537                 dn->dn_nblkptr = 1;
00538         else
00539                 dn->dn_nblkptr = 1 +
00540                     ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
00541         dn->dn_bonustype = bonustype;
00542         dn->dn_bonuslen = bonuslen;
00543         dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
00544         dn->dn_compress = ZIO_COMPRESS_INHERIT;
00545         dn->dn_dirtyctx = 0;
00546 
00547         dn->dn_free_txg = 0;
00548         if (dn->dn_dirtyctx_firstset) {
00549                 kmem_free(dn->dn_dirtyctx_firstset, 1);
00550                 dn->dn_dirtyctx_firstset = NULL;
00551         }
00552 
00553         dn->dn_allocated_txg = tx->tx_txg;
00554         dn->dn_id_flags = 0;
00555 
00556         dnode_setdirty(dn, tx);
00557         dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
00558         dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
00559         dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
00560         dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
00561 }
00562 
00563 void
00564 dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
00565     dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
00566 {
00567         int nblkptr;
00568 
00569         ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
00570         ASSERT3U(blocksize, <=, SPA_MAXBLOCKSIZE);
00571         ASSERT0(blocksize % SPA_MINBLOCKSIZE);
00572         ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
00573         ASSERT(tx->tx_txg != 0);
00574         ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
00575             (bonustype != DMU_OT_NONE && bonuslen != 0) ||
00576             (bonustype == DMU_OT_SA && bonuslen == 0));
00577         ASSERT(DMU_OT_IS_VALID(bonustype));
00578         ASSERT3U(bonuslen, <=, DN_MAX_BONUSLEN);
00579 
00580         /* clean up any unreferenced dbufs */
00581         dnode_evict_dbufs(dn);
00582 
00583         dn->dn_id_flags = 0;
00584 
00585         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
00586         dnode_setdirty(dn, tx);
00587         if (dn->dn_datablksz != blocksize) {
00588                 /* change blocksize */
00589                 ASSERT(dn->dn_maxblkid == 0 &&
00590                     (BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
00591                     dnode_block_freed(dn, 0)));
00592                 dnode_setdblksz(dn, blocksize);
00593                 dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize;
00594         }
00595         if (dn->dn_bonuslen != bonuslen)
00596                 dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen;
00597 
00598         if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
00599                 nblkptr = 1;
00600         else
00601                 nblkptr = 1 + ((DN_MAX_BONUSLEN - bonuslen) >> SPA_BLKPTRSHIFT);
00602         if (dn->dn_bonustype != bonustype)
00603                 dn->dn_next_bonustype[tx->tx_txg&TXG_MASK] = bonustype;
00604         if (dn->dn_nblkptr != nblkptr)
00605                 dn->dn_next_nblkptr[tx->tx_txg&TXG_MASK] = nblkptr;
00606         if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
00607                 dbuf_rm_spill(dn, tx);
00608                 dnode_rm_spill(dn, tx);
00609         }
00610         rw_exit(&dn->dn_struct_rwlock);
00611 
00612         /* change type */
00613         dn->dn_type = ot;
00614 
00615         /* change bonus size and type */
00616         mutex_enter(&dn->dn_mtx);
00617         dn->dn_bonustype = bonustype;
00618         dn->dn_bonuslen = bonuslen;
00619         dn->dn_nblkptr = nblkptr;
00620         dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
00621         dn->dn_compress = ZIO_COMPRESS_INHERIT;
00622         ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
00623 
00624         /* fix up the bonus db_size */
00625         if (dn->dn_bonus) {
00626                 dn->dn_bonus->db.db_size =
00627                     DN_MAX_BONUSLEN - (dn->dn_nblkptr-1) * sizeof (blkptr_t);
00628                 ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
00629         }
00630 
00631         dn->dn_allocated_txg = tx->tx_txg;
00632         mutex_exit(&dn->dn_mtx);
00633 }
00634 
00635 #ifdef  DNODE_STATS
00636 static struct {
00637         uint64_t dms_dnode_invalid;
00638         uint64_t dms_dnode_recheck1;
00639         uint64_t dms_dnode_recheck2;
00640         uint64_t dms_dnode_special;
00641         uint64_t dms_dnode_handle;
00642         uint64_t dms_dnode_rwlock;
00643         uint64_t dms_dnode_active;
00644 } dnode_move_stats;
00645 #endif  /* DNODE_STATS */
00646 
00647 static void
00648 dnode_move_impl(dnode_t *odn, dnode_t *ndn)
00649 {
00650         int i;
00651 
00652         ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
00653         ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
00654         ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
00655         ASSERT(!RW_LOCK_HELD(&odn->dn_zfetch.zf_rwlock));
00656 
00657         /* Copy fields. */
00658         ndn->dn_objset = odn->dn_objset;
00659         ndn->dn_object = odn->dn_object;
00660         ndn->dn_dbuf = odn->dn_dbuf;
00661         ndn->dn_handle = odn->dn_handle;
00662         ndn->dn_phys = odn->dn_phys;
00663         ndn->dn_type = odn->dn_type;
00664         ndn->dn_bonuslen = odn->dn_bonuslen;
00665         ndn->dn_bonustype = odn->dn_bonustype;
00666         ndn->dn_nblkptr = odn->dn_nblkptr;
00667         ndn->dn_checksum = odn->dn_checksum;
00668         ndn->dn_compress = odn->dn_compress;
00669         ndn->dn_nlevels = odn->dn_nlevels;
00670         ndn->dn_indblkshift = odn->dn_indblkshift;
00671         ndn->dn_datablkshift = odn->dn_datablkshift;
00672         ndn->dn_datablkszsec = odn->dn_datablkszsec;
00673         ndn->dn_datablksz = odn->dn_datablksz;
00674         ndn->dn_maxblkid = odn->dn_maxblkid;
00675         bcopy(&odn->dn_next_nblkptr[0], &ndn->dn_next_nblkptr[0],
00676             sizeof (odn->dn_next_nblkptr));
00677         bcopy(&odn->dn_next_nlevels[0], &ndn->dn_next_nlevels[0],
00678             sizeof (odn->dn_next_nlevels));
00679         bcopy(&odn->dn_next_indblkshift[0], &ndn->dn_next_indblkshift[0],
00680             sizeof (odn->dn_next_indblkshift));
00681         bcopy(&odn->dn_next_bonustype[0], &ndn->dn_next_bonustype[0],
00682             sizeof (odn->dn_next_bonustype));
00683         bcopy(&odn->dn_rm_spillblk[0], &ndn->dn_rm_spillblk[0],
00684             sizeof (odn->dn_rm_spillblk));
00685         bcopy(&odn->dn_next_bonuslen[0], &ndn->dn_next_bonuslen[0],
00686             sizeof (odn->dn_next_bonuslen));
00687         bcopy(&odn->dn_next_blksz[0], &ndn->dn_next_blksz[0],
00688             sizeof (odn->dn_next_blksz));
00689         for (i = 0; i < TXG_SIZE; i++) {
00690                 list_move_tail(&ndn->dn_dirty_records[i],
00691                     &odn->dn_dirty_records[i]);
00692         }
00693         bcopy(&odn->dn_ranges[0], &ndn->dn_ranges[0], sizeof (odn->dn_ranges));
00694         ndn->dn_allocated_txg = odn->dn_allocated_txg;
00695         ndn->dn_free_txg = odn->dn_free_txg;
00696         ndn->dn_assigned_txg = odn->dn_assigned_txg;
00697         ndn->dn_dirtyctx = odn->dn_dirtyctx;
00698         ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
00699         ASSERT(refcount_count(&odn->dn_tx_holds) == 0);
00700         refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
00701         ASSERT(list_is_empty(&ndn->dn_dbufs));
00702         list_move_tail(&ndn->dn_dbufs, &odn->dn_dbufs);
00703         ndn->dn_dbufs_count = odn->dn_dbufs_count;
00704         ndn->dn_bonus = odn->dn_bonus;
00705         ndn->dn_have_spill = odn->dn_have_spill;
00706         ndn->dn_zio = odn->dn_zio;
00707         ndn->dn_oldused = odn->dn_oldused;
00708         ndn->dn_oldflags = odn->dn_oldflags;
00709         ndn->dn_olduid = odn->dn_olduid;
00710         ndn->dn_oldgid = odn->dn_oldgid;
00711         ndn->dn_newuid = odn->dn_newuid;
00712         ndn->dn_newgid = odn->dn_newgid;
00713         ndn->dn_id_flags = odn->dn_id_flags;
00714         dmu_zfetch_init(&ndn->dn_zfetch, NULL);
00715         list_move_tail(&ndn->dn_zfetch.zf_stream, &odn->dn_zfetch.zf_stream);
00716         ndn->dn_zfetch.zf_dnode = odn->dn_zfetch.zf_dnode;
00717         ndn->dn_zfetch.zf_stream_cnt = odn->dn_zfetch.zf_stream_cnt;
00718         ndn->dn_zfetch.zf_alloc_fail = odn->dn_zfetch.zf_alloc_fail;
00719 
00720         /*
00721          * Update back pointers. Updating the handle fixes the back pointer of
00722          * every descendant dbuf as well as the bonus dbuf.
00723          */
00724         ASSERT(ndn->dn_handle->dnh_dnode == odn);
00725         ndn->dn_handle->dnh_dnode = ndn;
00726         if (ndn->dn_zfetch.zf_dnode == odn) {
00727                 ndn->dn_zfetch.zf_dnode = ndn;
00728         }
00729 
00730         /*
00731          * Invalidate the original dnode by clearing all of its back pointers.
00732          */
00733         odn->dn_dbuf = NULL;
00734         odn->dn_handle = NULL;
00735         list_create(&odn->dn_dbufs, sizeof (dmu_buf_impl_t),
00736             offsetof(dmu_buf_impl_t, db_link));
00737         odn->dn_dbufs_count = 0;
00738         odn->dn_bonus = NULL;
00739         odn->dn_zfetch.zf_dnode = NULL;
00740 
00741         /*
00742          * Set the low bit of the objset pointer to ensure that dnode_move()
00743          * recognizes the dnode as invalid in any subsequent callback.
00744          */
00745         POINTER_INVALIDATE(&odn->dn_objset);
00746 
00747         /*
00748          * Satisfy the destructor.
00749          */
00750         for (i = 0; i < TXG_SIZE; i++) {
00751                 list_create(&odn->dn_dirty_records[i],
00752                     sizeof (dbuf_dirty_record_t),
00753                     offsetof(dbuf_dirty_record_t, dr_dirty_node));
00754                 odn->dn_ranges[i].avl_root = NULL;
00755                 odn->dn_ranges[i].avl_numnodes = 0;
00756                 odn->dn_next_nlevels[i] = 0;
00757                 odn->dn_next_indblkshift[i] = 0;
00758                 odn->dn_next_bonustype[i] = 0;
00759                 odn->dn_rm_spillblk[i] = 0;
00760                 odn->dn_next_bonuslen[i] = 0;
00761                 odn->dn_next_blksz[i] = 0;
00762         }
00763         odn->dn_allocated_txg = 0;
00764         odn->dn_free_txg = 0;
00765         odn->dn_assigned_txg = 0;
00766         odn->dn_dirtyctx = 0;
00767         odn->dn_dirtyctx_firstset = NULL;
00768         odn->dn_have_spill = B_FALSE;
00769         odn->dn_zio = NULL;
00770         odn->dn_oldused = 0;
00771         odn->dn_oldflags = 0;
00772         odn->dn_olduid = 0;
00773         odn->dn_oldgid = 0;
00774         odn->dn_newuid = 0;
00775         odn->dn_newgid = 0;
00776         odn->dn_id_flags = 0;
00777 
00778         /*
00779          * Mark the dnode.
00780          */
00781         ndn->dn_moved = 1;
00782         odn->dn_moved = (uint8_t)-1;
00783 }
00784 
00785 #ifdef sun
00786 #ifdef  _KERNEL
00787 /*ARGSUSED*/
00788 static kmem_cbrc_t
00789 dnode_move(void *buf, void *newbuf, size_t size, void *arg)
00790 {
00791         dnode_t *odn = buf, *ndn = newbuf;
00792         objset_t *os;
00793         int64_t refcount;
00794         uint32_t dbufs;
00795 
00796         /*
00797          * The dnode is on the objset's list of known dnodes if the objset
00798          * pointer is valid. We set the low bit of the objset pointer when
00799          * freeing the dnode to invalidate it, and the memory patterns written
00800          * by kmem (baddcafe and deadbeef) set at least one of the two low bits.
00801          * A newly created dnode sets the objset pointer last of all to indicate
00802          * that the dnode is known and in a valid state to be moved by this
00803          * function.
00804          */
00805         os = odn->dn_objset;
00806         if (!POINTER_IS_VALID(os)) {
00807                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_invalid);
00808                 return (KMEM_CBRC_DONT_KNOW);
00809         }
00810 
00811         /*
00812          * Ensure that the objset does not go away during the move.
00813          */
00814         rw_enter(&os_lock, RW_WRITER);
00815         if (os != odn->dn_objset) {
00816                 rw_exit(&os_lock);
00817                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_recheck1);
00818                 return (KMEM_CBRC_DONT_KNOW);
00819         }
00820 
00821         /*
00822          * If the dnode is still valid, then so is the objset. We know that no
00823          * valid objset can be freed while we hold os_lock, so we can safely
00824          * ensure that the objset remains in use.
00825          */
00826         mutex_enter(&os->os_lock);
00827 
00828         /*
00829          * Recheck the objset pointer in case the dnode was removed just before
00830          * acquiring the lock.
00831          */
00832         if (os != odn->dn_objset) {
00833                 mutex_exit(&os->os_lock);
00834                 rw_exit(&os_lock);
00835                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_recheck2);
00836                 return (KMEM_CBRC_DONT_KNOW);
00837         }
00838 
00839         /*
00840          * At this point we know that as long as we hold os->os_lock, the dnode
00841          * cannot be freed and fields within the dnode can be safely accessed.
00842          * The objset listing this dnode cannot go away as long as this dnode is
00843          * on its list.
00844          */
00845         rw_exit(&os_lock);
00846         if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) {
00847                 mutex_exit(&os->os_lock);
00848                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_special);
00849                 return (KMEM_CBRC_NO);
00850         }
00851         ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */
00852 
00853         /*
00854          * Lock the dnode handle to prevent the dnode from obtaining any new
00855          * holds. This also prevents the descendant dbufs and the bonus dbuf
00856          * from accessing the dnode, so that we can discount their holds. The
00857          * handle is safe to access because we know that while the dnode cannot
00858          * go away, neither can its handle. Once we hold dnh_zrlock, we can
00859          * safely move any dnode referenced only by dbufs.
00860          */
00861         if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) {
00862                 mutex_exit(&os->os_lock);
00863                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_handle);
00864                 return (KMEM_CBRC_LATER);
00865         }
00866 
00867         /*
00868          * Ensure a consistent view of the dnode's holds and the dnode's dbufs.
00869          * We need to guarantee that there is a hold for every dbuf in order to
00870          * determine whether the dnode is actively referenced. Falsely matching
00871          * a dbuf to an active hold would lead to an unsafe move. It's possible
00872          * that a thread already having an active dnode hold is about to add a
00873          * dbuf, and we can't compare hold and dbuf counts while the add is in
00874          * progress.
00875          */
00876         if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) {
00877                 zrl_exit(&odn->dn_handle->dnh_zrlock);
00878                 mutex_exit(&os->os_lock);
00879                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_rwlock);
00880                 return (KMEM_CBRC_LATER);
00881         }
00882 
00883         /*
00884          * A dbuf may be removed (evicted) without an active dnode hold. In that
00885          * case, the dbuf count is decremented under the handle lock before the
00886          * dbuf's hold is released. This order ensures that if we count the hold
00887          * after the dbuf is removed but before its hold is released, we will
00888          * treat the unmatched hold as active and exit safely. If we count the
00889          * hold before the dbuf is removed, the hold is discounted, and the
00890          * removal is blocked until the move completes.
00891          */
00892         refcount = refcount_count(&odn->dn_holds);
00893         ASSERT(refcount >= 0);
00894         dbufs = odn->dn_dbufs_count;
00895 
00896         /* We can't have more dbufs than dnode holds. */
00897         ASSERT3U(dbufs, <=, refcount);
00898         DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount,
00899             uint32_t, dbufs);
00900 
00901         if (refcount > dbufs) {
00902                 rw_exit(&odn->dn_struct_rwlock);
00903                 zrl_exit(&odn->dn_handle->dnh_zrlock);
00904                 mutex_exit(&os->os_lock);
00905                 DNODE_STAT_ADD(dnode_move_stats.dms_dnode_active);
00906                 return (KMEM_CBRC_LATER);
00907         }
00908 
00909         rw_exit(&odn->dn_struct_rwlock);
00910 
00911         /*
00912          * At this point we know that anyone with a hold on the dnode is not
00913          * actively referencing it. The dnode is known and in a valid state to
00914          * move. We're holding the locks needed to execute the critical section.
00915          */
00916         dnode_move_impl(odn, ndn);
00917 
00918         list_link_replace(&odn->dn_link, &ndn->dn_link);
00919         /* If the dnode was safe to move, the refcount cannot have changed. */
00920         ASSERT(refcount == refcount_count(&ndn->dn_holds));
00921         ASSERT(dbufs == ndn->dn_dbufs_count);
00922         zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
00923         mutex_exit(&os->os_lock);
00924 
00925         return (KMEM_CBRC_YES);
00926 }
00927 #endif  /* _KERNEL */
00928 #endif  /* sun */
00929 
00930 void
00931 dnode_special_close(dnode_handle_t *dnh)
00932 {
00933         dnode_t *dn = dnh->dnh_dnode;
00934 
00935         /*
00936          * Wait for final references to the dnode to clear.  This can
00937          * only happen if the arc is asyncronously evicting state that
00938          * has a hold on this dnode while we are trying to evict this
00939          * dnode.
00940          */
00941         while (refcount_count(&dn->dn_holds) > 0)
00942                 delay(1);
00943         zrl_add(&dnh->dnh_zrlock);
00944         dnode_destroy(dn); /* implicit zrl_remove() */
00945         zrl_destroy(&dnh->dnh_zrlock);
00946         dnh->dnh_dnode = NULL;
00947 }
00948 
00949 dnode_t *
00950 dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object,
00951     dnode_handle_t *dnh)
00952 {
00953         dnode_t *dn = dnode_create(os, dnp, NULL, object, dnh);
00954         dnh->dnh_dnode = dn;
00955         zrl_init(&dnh->dnh_zrlock);
00956         DNODE_VERIFY(dn);
00957         return (dn);
00958 }
00959 
00960 static void
00961 dnode_buf_pageout(dmu_buf_t *db, void *arg)
00962 {
00963         dnode_children_t *children_dnodes = arg;
00964         int i;
00965         int epb = db->db_size >> DNODE_SHIFT;
00966 
00967         ASSERT(epb == children_dnodes->dnc_count);
00968 
00969         for (i = 0; i < epb; i++) {
00970                 dnode_handle_t *dnh = &children_dnodes->dnc_children[i];
00971                 dnode_t *dn;
00972 
00973                 /*
00974                  * The dnode handle lock guards against the dnode moving to
00975                  * another valid address, so there is no need here to guard
00976                  * against changes to or from NULL.
00977                  */
00978                 if (dnh->dnh_dnode == NULL) {
00979                         zrl_destroy(&dnh->dnh_zrlock);
00980                         continue;
00981                 }
00982 
00983                 zrl_add(&dnh->dnh_zrlock);
00984                 dn = dnh->dnh_dnode;
00985                 /*
00986                  * If there are holds on this dnode, then there should
00987                  * be holds on the dnode's containing dbuf as well; thus
00988                  * it wouldn't be eligible for eviction and this function
00989                  * would not have been called.
00990                  */
00991                 ASSERT(refcount_is_zero(&dn->dn_holds));
00992                 ASSERT(refcount_is_zero(&dn->dn_tx_holds));
00993 
00994                 dnode_destroy(dn); /* implicit zrl_remove() */
00995                 zrl_destroy(&dnh->dnh_zrlock);
00996                 dnh->dnh_dnode = NULL;
00997         }
00998         kmem_free(children_dnodes, sizeof (dnode_children_t) +
00999             (epb - 1) * sizeof (dnode_handle_t));
01000 }
01001 
01010 int
01011 dnode_hold_impl(objset_t *os, uint64_t object, int flag,
01012     void *tag, dnode_t **dnp)
01013 {
01014         int epb, idx, err;
01015         int drop_struct_lock = FALSE;
01016         int type;
01017         uint64_t blk;
01018         dnode_t *mdn, *dn;
01019         dmu_buf_impl_t *db;
01020         dnode_children_t *children_dnodes;
01021         dnode_handle_t *dnh;
01022 
01023         /*
01024          * If you are holding the spa config lock as writer, you shouldn't
01025          * be asking the DMU to do *anything* unless it's the root pool
01026          * which may require us to read from the root filesystem while
01027          * holding some (not all) of the locks as writer.
01028          */
01029         ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
01030             (spa_is_root(os->os_spa) &&
01031             spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
01032 
01033         if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT) {
01034                 dn = (object == DMU_USERUSED_OBJECT) ?
01035                     DMU_USERUSED_DNODE(os) : DMU_GROUPUSED_DNODE(os);
01036                 if (dn == NULL)
01037                         return (ENOENT);
01038                 type = dn->dn_type;
01039                 if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
01040                         return (ENOENT);
01041                 if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
01042                         return (EEXIST);
01043                 DNODE_VERIFY(dn);
01044                 (void) refcount_add(&dn->dn_holds, tag);
01045                 *dnp = dn;
01046                 return (0);
01047         }
01048 
01049         if (object == 0 || object >= DN_MAX_OBJECT)
01050                 return (EINVAL);
01051 
01052         mdn = DMU_META_DNODE(os);
01053         ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
01054 
01055         DNODE_VERIFY(mdn);
01056 
01057         if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
01058                 rw_enter(&mdn->dn_struct_rwlock, RW_READER);
01059                 drop_struct_lock = TRUE;
01060         }
01061 
01062         blk = dbuf_whichblock(mdn, object * sizeof (dnode_phys_t));
01063 
01064         db = dbuf_hold(mdn, blk, FTAG);
01065         if (drop_struct_lock)
01066                 rw_exit(&mdn->dn_struct_rwlock);
01067         if (db == NULL)
01068                 return (EIO);
01069         err = dbuf_read(db, NULL, DB_RF_CANFAIL);
01070         if (err) {
01071                 dbuf_rele(db, FTAG);
01072                 return (err);
01073         }
01074 
01075         ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
01076         epb = db->db.db_size >> DNODE_SHIFT;
01077 
01078         idx = object & (epb-1);
01079 
01080         ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE);
01081         children_dnodes = dmu_buf_get_user(&db->db);
01082         if (children_dnodes == NULL) {
01083                 int i;
01084                 dnode_children_t *winner;
01085                 children_dnodes = kmem_zalloc(sizeof (dnode_children_t) +
01086                     (epb - 1) * sizeof (dnode_handle_t), KM_SLEEP);
01087                 children_dnodes->dnc_count = epb;
01088                 dnh = &children_dnodes->dnc_children[0];
01089                 for (i = 0; i < epb; i++) {
01090                         zrl_init(&dnh[i].dnh_zrlock);
01091                         dnh[i].dnh_dnode = NULL;
01092                 }
01093                 if (winner = dmu_buf_set_user(&db->db, children_dnodes, NULL,
01094                     dnode_buf_pageout)) {
01095                         kmem_free(children_dnodes, sizeof (dnode_children_t) +
01096                             (epb - 1) * sizeof (dnode_handle_t));
01097                         children_dnodes = winner;
01098                 }
01099         }
01100         ASSERT(children_dnodes->dnc_count == epb);
01101 
01102         dnh = &children_dnodes->dnc_children[idx];
01103         zrl_add(&dnh->dnh_zrlock);
01104         if ((dn = dnh->dnh_dnode) == NULL) {
01105                 dnode_phys_t *phys = (dnode_phys_t *)db->db.db_data+idx;
01106                 dnode_t *winner;
01107 
01108                 dn = dnode_create(os, phys, db, object, dnh);
01109                 winner = atomic_cas_ptr(&dnh->dnh_dnode, NULL, dn);
01110                 if (winner != NULL) {
01111                         zrl_add(&dnh->dnh_zrlock);
01112                         dnode_destroy(dn); /* implicit zrl_remove() */
01113                         dn = winner;
01114                 }
01115         }
01116 
01117         mutex_enter(&dn->dn_mtx);
01118         type = dn->dn_type;
01119         if (dn->dn_free_txg ||
01120             ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE) ||
01121             ((flag & DNODE_MUST_BE_FREE) &&
01122             (type != DMU_OT_NONE || !refcount_is_zero(&dn->dn_holds)))) {
01123                 mutex_exit(&dn->dn_mtx);
01124                 zrl_remove(&dnh->dnh_zrlock);
01125                 dbuf_rele(db, FTAG);
01126                 return (type == DMU_OT_NONE ? ENOENT : EEXIST);
01127         }
01128         mutex_exit(&dn->dn_mtx);
01129 
01130         if (refcount_add(&dn->dn_holds, tag) == 1)
01131                 dbuf_add_ref(db, dnh);
01132         /* Now we can rely on the hold to prevent the dnode from moving. */
01133         zrl_remove(&dnh->dnh_zrlock);
01134 
01135         DNODE_VERIFY(dn);
01136         ASSERT3P(dn->dn_dbuf, ==, db);
01137         ASSERT3U(dn->dn_object, ==, object);
01138         dbuf_rele(db, FTAG);
01139 
01140         *dnp = dn;
01141         return (0);
01142 }
01143 
01147 int
01148 dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp)
01149 {
01150         return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, tag, dnp));
01151 }
01152 
01159 boolean_t
01160 dnode_add_ref(dnode_t *dn, void *tag)
01161 {
01162         mutex_enter(&dn->dn_mtx);
01163         if (refcount_is_zero(&dn->dn_holds)) {
01164                 mutex_exit(&dn->dn_mtx);
01165                 return (FALSE);
01166         }
01167         VERIFY(1 < refcount_add(&dn->dn_holds, tag));
01168         mutex_exit(&dn->dn_mtx);
01169         return (TRUE);
01170 }
01171 
01172 void
01173 dnode_rele(dnode_t *dn, void *tag)
01174 {
01175         uint64_t refs;
01176         /* Get while the hold prevents the dnode from moving. */
01177         dmu_buf_impl_t *db = dn->dn_dbuf;
01178         dnode_handle_t *dnh = dn->dn_handle;
01179 
01180         mutex_enter(&dn->dn_mtx);
01181         refs = refcount_remove(&dn->dn_holds, tag);
01182         mutex_exit(&dn->dn_mtx);
01183 
01184         /*
01185          * It's unsafe to release the last hold on a dnode by dnode_rele() or
01186          * indirectly by dbuf_rele() while relying on the dnode handle to
01187          * prevent the dnode from moving, since releasing the last hold could
01188          * result in the dnode's parent dbuf evicting its dnode handles. For
01189          * that reason anyone calling dnode_rele() or dbuf_rele() without some
01190          * other direct or indirect hold on the dnode must first drop the dnode
01191          * handle.
01192          */
01193         ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread);
01194 
01195         /* NOTE: the DNODE_DNODE does not have a dn_dbuf */
01196         if (refs == 0 && db != NULL) {
01197                 /*
01198                  * Another thread could add a hold to the dnode handle in
01199                  * dnode_hold_impl() while holding the parent dbuf. Since the
01200                  * hold on the parent dbuf prevents the handle from being
01201                  * destroyed, the hold on the handle is OK. We can't yet assert
01202                  * that the handle has zero references, but that will be
01203                  * asserted anyway when the handle gets destroyed.
01204                  */
01205                 dbuf_rele(db, dnh);
01206         }
01207 }
01208 
01209 void
01210 dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
01211 {
01212         objset_t *os = dn->dn_objset;
01213         uint64_t txg = tx->tx_txg;
01214 
01215         if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
01216                 dsl_dataset_dirty(os->os_dsl_dataset, tx);
01217                 return;
01218         }
01219 
01220         DNODE_VERIFY(dn);
01221 
01222 #ifdef ZFS_DEBUG
01223         mutex_enter(&dn->dn_mtx);
01224         ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
01225         ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
01226         mutex_exit(&dn->dn_mtx);
01227 #endif
01228 
01229         /*
01230          * Determine old uid/gid when necessary
01231          */
01232         dmu_objset_userquota_get_ids(dn, B_TRUE, tx);
01233 
01234         mutex_enter(&os->os_lock);
01235 
01236         /*
01237          * If we are already marked dirty, we're done.
01238          */
01239         if (list_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
01240                 mutex_exit(&os->os_lock);
01241                 return;
01242         }
01243 
01244         ASSERT(!refcount_is_zero(&dn->dn_holds) || list_head(&dn->dn_dbufs));
01245         ASSERT(dn->dn_datablksz != 0);
01246         ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]);
01247         ASSERT0(dn->dn_next_blksz[txg&TXG_MASK]);
01248         ASSERT0(dn->dn_next_bonustype[txg&TXG_MASK]);
01249 
01250         dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
01251             dn->dn_object, txg);
01252 
01253         if (dn->dn_free_txg > 0 && dn->dn_free_txg <= txg) {
01254                 list_insert_tail(&os->os_free_dnodes[txg&TXG_MASK], dn);
01255         } else {
01256                 list_insert_tail(&os->os_dirty_dnodes[txg&TXG_MASK], dn);
01257         }
01258 
01259         mutex_exit(&os->os_lock);
01260 
01261         /*
01262          * The dnode maintains a hold on its containing dbuf as
01263          * long as there are holds on it.  Each instantiated child
01264          * dbuf maintains a hold on the dnode.  When the last child
01265          * drops its hold, the dnode will drop its hold on the
01266          * containing dbuf. We add a "dirty hold" here so that the
01267          * dnode will hang around after we finish processing its
01268          * children.
01269          */
01270         VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
01271 
01272         (void) dbuf_dirty(dn->dn_dbuf, tx);
01273 
01274         dsl_dataset_dirty(os->os_dsl_dataset, tx);
01275 }
01276 
01277 void
01278 dnode_free(dnode_t *dn, dmu_tx_t *tx)
01279 {
01280         int txgoff = tx->tx_txg & TXG_MASK;
01281 
01282         dprintf("dn=%p txg=%llu\n", dn, tx->tx_txg);
01283 
01284         /* we should be the only holder... hopefully */
01285         /* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1); */
01286 
01287         mutex_enter(&dn->dn_mtx);
01288         if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
01289                 mutex_exit(&dn->dn_mtx);
01290                 return;
01291         }
01292         dn->dn_free_txg = tx->tx_txg;
01293         mutex_exit(&dn->dn_mtx);
01294 
01295         /*
01296          * If the dnode is already dirty, it needs to be moved from
01297          * the dirty list to the free list.
01298          */
01299         mutex_enter(&dn->dn_objset->os_lock);
01300         if (list_link_active(&dn->dn_dirty_link[txgoff])) {
01301                 list_remove(&dn->dn_objset->os_dirty_dnodes[txgoff], dn);
01302                 list_insert_tail(&dn->dn_objset->os_free_dnodes[txgoff], dn);
01303                 mutex_exit(&dn->dn_objset->os_lock);
01304         } else {
01305                 mutex_exit(&dn->dn_objset->os_lock);
01306                 dnode_setdirty(dn, tx);
01307         }
01308 }
01309 
01314 int
01315 dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
01316 {
01317         dmu_buf_impl_t *db, *db_next;
01318         int err;
01319 
01320         if (size == 0)
01321                 size = SPA_MINBLOCKSIZE;
01322         if (size > SPA_MAXBLOCKSIZE)
01323                 size = SPA_MAXBLOCKSIZE;
01324         else
01325                 size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
01326 
01327         if (ibs == dn->dn_indblkshift)
01328                 ibs = 0;
01329 
01330         if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
01331                 return (0);
01332 
01333         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
01334 
01335         /* Check for any allocated blocks beyond the first */
01336         if (dn->dn_phys->dn_maxblkid != 0)
01337                 goto fail;
01338 
01339         mutex_enter(&dn->dn_dbufs_mtx);
01340         for (db = list_head(&dn->dn_dbufs); db; db = db_next) {
01341                 db_next = list_next(&dn->dn_dbufs, db);
01342 
01343                 if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID &&
01344                     db->db_blkid != DMU_SPILL_BLKID) {
01345                         mutex_exit(&dn->dn_dbufs_mtx);
01346                         goto fail;
01347                 }
01348         }
01349         mutex_exit(&dn->dn_dbufs_mtx);
01350 
01351         if (ibs && dn->dn_nlevels != 1)
01352                 goto fail;
01353 
01354         /* resize the old block */
01355         err = dbuf_hold_impl(dn, 0, 0, TRUE, FTAG, &db);
01356         if (err == 0)
01357                 dbuf_new_size(db, size, tx);
01358         else if (err != ENOENT)
01359                 goto fail;
01360 
01361         dnode_setdblksz(dn, size);
01362         dnode_setdirty(dn, tx);
01363         dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
01364         if (ibs) {
01365                 dn->dn_indblkshift = ibs;
01366                 dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
01367         }
01368         /* rele after we have fixed the blocksize in the dnode */
01369         if (db)
01370                 dbuf_rele(db, FTAG);
01371 
01372         rw_exit(&dn->dn_struct_rwlock);
01373         return (0);
01374 
01375 fail:
01376         rw_exit(&dn->dn_struct_rwlock);
01377         return (ENOTSUP);
01378 }
01379 
01381 void
01382 dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read)
01383 {
01384         uint64_t txgoff = tx->tx_txg & TXG_MASK;
01385         int epbs, new_nlevels;
01386         uint64_t sz;
01387 
01388         ASSERT(blkid != DMU_BONUS_BLKID);
01389 
01390         ASSERT(have_read ?
01391             RW_READ_HELD(&dn->dn_struct_rwlock) :
01392             RW_WRITE_HELD(&dn->dn_struct_rwlock));
01393 
01394         /*
01395          * if we have a read-lock, check to see if we need to do any work
01396          * before upgrading to a write-lock.
01397          */
01398         if (have_read) {
01399                 if (blkid <= dn->dn_maxblkid)
01400                         return;
01401 
01402                 if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
01403                         rw_exit(&dn->dn_struct_rwlock);
01404                         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
01405                 }
01406         }
01407 
01408         if (blkid <= dn->dn_maxblkid)
01409                 goto out;
01410 
01411         dn->dn_maxblkid = blkid;
01412 
01413         /*
01414          * Compute the number of levels necessary to support the new maxblkid.
01415          */
01416         new_nlevels = 1;
01417         epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
01418         for (sz = dn->dn_nblkptr;
01419             sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
01420                 new_nlevels++;
01421 
01422         if (new_nlevels > dn->dn_nlevels) {
01423                 int old_nlevels = dn->dn_nlevels;
01424                 dmu_buf_impl_t *db;
01425                 list_t *list;
01426                 dbuf_dirty_record_t *new, *dr, *dr_next;
01427 
01428                 dn->dn_nlevels = new_nlevels;
01429 
01430                 ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
01431                 dn->dn_next_nlevels[txgoff] = new_nlevels;
01432 
01433                 /* dirty the left indirects */
01434                 db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
01435                 ASSERT(db != NULL);
01436                 new = dbuf_dirty(db, tx);
01437                 dbuf_rele(db, FTAG);
01438 
01439                 /* transfer the dirty records to the new indirect */
01440                 mutex_enter(&dn->dn_mtx);
01441                 mutex_enter(&new->dt.di.dr_mtx);
01442                 list = &dn->dn_dirty_records[txgoff];
01443                 for (dr = list_head(list); dr; dr = dr_next) {
01444                         dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
01445                         if (dr->dr_dbuf->db_level != new_nlevels-1 &&
01446                             dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
01447                             dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
01448                                 ASSERT(dr->dr_dbuf->db_level == old_nlevels-1);
01449                                 list_remove(&dn->dn_dirty_records[txgoff], dr);
01450                                 list_insert_tail(&new->dt.di.dr_children, dr);
01451                                 dr->dr_parent = new;
01452                         }
01453                 }
01454                 mutex_exit(&new->dt.di.dr_mtx);
01455                 mutex_exit(&dn->dn_mtx);
01456         }
01457 
01458 out:
01459         if (have_read)
01460                 rw_downgrade(&dn->dn_struct_rwlock);
01461 }
01462 
01463 void
01464 dnode_clear_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
01465 {
01466         avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK];
01467         avl_index_t where;
01468         free_range_t *rp;
01469         free_range_t rp_tofind;
01470         uint64_t endblk = blkid + nblks;
01471 
01472         ASSERT(MUTEX_HELD(&dn->dn_mtx));
01473         ASSERT(nblks <= UINT64_MAX - blkid); /* no overflow */
01474 
01475         dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
01476             blkid, nblks, tx->tx_txg);
01477         rp_tofind.fr_blkid = blkid;
01478         rp = avl_find(tree, &rp_tofind, &where);
01479         if (rp == NULL)
01480                 rp = avl_nearest(tree, where, AVL_BEFORE);
01481         if (rp == NULL)
01482                 rp = avl_nearest(tree, where, AVL_AFTER);
01483 
01484         while (rp && (rp->fr_blkid <= blkid + nblks)) {
01485                 uint64_t fr_endblk = rp->fr_blkid + rp->fr_nblks;
01486                 free_range_t *nrp = AVL_NEXT(tree, rp);
01487 
01488                 if (blkid <= rp->fr_blkid && endblk >= fr_endblk) {
01489                         /* clear this entire range */
01490                         avl_remove(tree, rp);
01491                         kmem_free(rp, sizeof (free_range_t));
01492                 } else if (blkid <= rp->fr_blkid &&
01493                     endblk > rp->fr_blkid && endblk < fr_endblk) {
01494                         /* clear the beginning of this range */
01495                         rp->fr_blkid = endblk;
01496                         rp->fr_nblks = fr_endblk - endblk;
01497                 } else if (blkid > rp->fr_blkid && blkid < fr_endblk &&
01498                     endblk >= fr_endblk) {
01499                         /* clear the end of this range */
01500                         rp->fr_nblks = blkid - rp->fr_blkid;
01501                 } else if (blkid > rp->fr_blkid && endblk < fr_endblk) {
01502                         /* clear a chunk out of this range */
01503                         free_range_t *new_rp =
01504                             kmem_alloc(sizeof (free_range_t), KM_SLEEP);
01505 
01506                         new_rp->fr_blkid = endblk;
01507                         new_rp->fr_nblks = fr_endblk - endblk;
01508                         avl_insert_here(tree, new_rp, rp, AVL_AFTER);
01509                         rp->fr_nblks = blkid - rp->fr_blkid;
01510                 }
01511                 /* there may be no overlap */
01512                 rp = nrp;
01513         }
01514 }
01515 
01516 void
01517 dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
01518 {
01519         dmu_buf_impl_t *db;
01520         uint64_t blkoff, blkid, nblks;
01521         int blksz, blkshift, head, tail;
01522         int trunc = FALSE;
01523         int epbs;
01524 
01525         rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
01526         blksz = dn->dn_datablksz;
01527         blkshift = dn->dn_datablkshift;
01528         epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
01529 
01530         if (len == -1ULL) {
01531                 len = UINT64_MAX - off;
01532                 trunc = TRUE;
01533         }
01534 
01535         /*
01536          * First, block align the region to free:
01537          */
01538         if (ISP2(blksz)) {
01539                 head = P2NPHASE(off, blksz);
01540                 blkoff = P2PHASE(off, blksz);
01541                 if ((off >> blkshift) > dn->dn_maxblkid)
01542                         goto out;
01543         } else {
01544                 ASSERT(dn->dn_maxblkid == 0);
01545                 if (off == 0 && len >= blksz) {
01546                         /* Freeing the whole block; fast-track this request */
01547                         blkid = 0;
01548                         nblks = 1;
01549                         goto done;
01550                 } else if (off >= blksz) {
01551                         /* Freeing past end-of-data */
01552                         goto out;
01553                 } else {
01554                         /* Freeing part of the block. */
01555                         head = blksz - off;
01556                         ASSERT3U(head, >, 0);
01557                 }
01558                 blkoff = off;
01559         }
01560         /* zero out any partial block data at the start of the range */
01561         if (head) {
01562                 ASSERT3U(blkoff + head, ==, blksz);
01563                 if (len < head)
01564                         head = len;
01565                 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off), TRUE,
01566                     FTAG, &db) == 0) {
01567                         caddr_t data;
01568 
01569                         /* don't dirty if it isn't on disk and isn't dirty */
01570                         if (db->db_last_dirty ||
01571                             (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
01572                                 rw_exit(&dn->dn_struct_rwlock);
01573                                 dbuf_will_dirty(db, tx);
01574                                 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
01575                                 data = db->db.db_data;
01576                                 bzero(data + blkoff, head);
01577                         }
01578                         dbuf_rele(db, FTAG);
01579                 }
01580                 off += head;
01581                 len -= head;
01582         }
01583 
01584         /* If the range was less than one block, we're done */
01585         if (len == 0)
01586                 goto out;
01587 
01588         /* If the remaining range is past end of file, we're done */
01589         if ((off >> blkshift) > dn->dn_maxblkid)
01590                 goto out;
01591 
01592         ASSERT(ISP2(blksz));
01593         if (trunc)
01594                 tail = 0;
01595         else
01596                 tail = P2PHASE(len, blksz);
01597 
01598         ASSERT0(P2PHASE(off, blksz));
01599         /* zero out any partial block data at the end of the range */
01600         if (tail) {
01601                 if (len < tail)
01602                         tail = len;
01603                 if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, off+len),
01604                     TRUE, FTAG, &db) == 0) {
01605                         /* don't dirty if not on disk and not dirty */
01606                         if (db->db_last_dirty ||
01607                             (db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
01608                                 rw_exit(&dn->dn_struct_rwlock);
01609                                 dbuf_will_dirty(db, tx);
01610                                 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
01611                                 bzero(db->db.db_data, tail);
01612                         }
01613                         dbuf_rele(db, FTAG);
01614                 }
01615                 len -= tail;
01616         }
01617 
01618         /* If the range did not include a full block, we are done */
01619         if (len == 0)
01620                 goto out;
01621 
01622         ASSERT(IS_P2ALIGNED(off, blksz));
01623         ASSERT(trunc || IS_P2ALIGNED(len, blksz));
01624         blkid = off >> blkshift;
01625         nblks = len >> blkshift;
01626         if (trunc)
01627                 nblks += 1;
01628 
01629         /*
01630          * Read in and mark all the level-1 indirects dirty,
01631          * so that they will stay in memory until syncing phase.
01632          * Always dirty the first and last indirect to make sure
01633          * we dirty all the partial indirects.
01634          */
01635         if (dn->dn_nlevels > 1) {
01636                 uint64_t i, first, last;
01637                 int shift = epbs + dn->dn_datablkshift;
01638 
01639                 first = blkid >> epbs;
01640                 if (db = dbuf_hold_level(dn, 1, first, FTAG)) {
01641                         dbuf_will_dirty(db, tx);
01642                         dbuf_rele(db, FTAG);
01643                 }
01644                 if (trunc)
01645                         last = dn->dn_maxblkid >> epbs;
01646                 else
01647                         last = (blkid + nblks - 1) >> epbs;
01648                 if (last > first && (db = dbuf_hold_level(dn, 1, last, FTAG))) {
01649                         dbuf_will_dirty(db, tx);
01650                         dbuf_rele(db, FTAG);
01651                 }
01652                 for (i = first + 1; i < last; i++) {
01653                         uint64_t ibyte = i << shift;
01654                         int err;
01655 
01656                         err = dnode_next_offset(dn,
01657                             DNODE_FIND_HAVELOCK, &ibyte, 1, 1, 0);
01658                         i = ibyte >> shift;
01659                         if (err == ESRCH || i >= last)
01660                                 break;
01661                         ASSERT(err == 0);
01662                         db = dbuf_hold_level(dn, 1, i, FTAG);
01663                         if (db) {
01664                                 dbuf_will_dirty(db, tx);
01665                                 dbuf_rele(db, FTAG);
01666                         }
01667                 }
01668         }
01669 done:
01670         /*
01671          * Add this range to the dnode range list.
01672          * We will finish up this free operation in the syncing phase.
01673          */
01674         mutex_enter(&dn->dn_mtx);
01675         dnode_clear_range(dn, blkid, nblks, tx);
01676         {
01677                 free_range_t *rp, *found;
01678                 avl_index_t where;
01679                 avl_tree_t *tree = &dn->dn_ranges[tx->tx_txg&TXG_MASK];
01680 
01681                 /* Add new range to dn_ranges */
01682                 rp = kmem_alloc(sizeof (free_range_t), KM_SLEEP);
01683                 rp->fr_blkid = blkid;
01684                 rp->fr_nblks = nblks;
01685                 found = avl_find(tree, rp, &where);
01686                 ASSERT(found == NULL);
01687                 avl_insert(tree, rp, where);
01688                 dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
01689                     blkid, nblks, tx->tx_txg);
01690         }
01691         mutex_exit(&dn->dn_mtx);
01692 
01693         dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
01694         dnode_setdirty(dn, tx);
01695 out:
01696         if (trunc && dn->dn_maxblkid >= (off >> blkshift))
01697                 dn->dn_maxblkid = (off >> blkshift ? (off >> blkshift) - 1 : 0);
01698 
01699         rw_exit(&dn->dn_struct_rwlock);
01700 }
01701 
01702 static boolean_t
01703 dnode_spill_freed(dnode_t *dn)
01704 {
01705         int i;
01706 
01707         mutex_enter(&dn->dn_mtx);
01708         for (i = 0; i < TXG_SIZE; i++) {
01709                 if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK)
01710                         break;
01711         }
01712         mutex_exit(&dn->dn_mtx);
01713         return (i < TXG_SIZE);
01714 }
01715 
01720 uint64_t
01721 dnode_block_freed(dnode_t *dn, uint64_t blkid)
01722 {
01723         free_range_t range_tofind;
01724         void *dp = spa_get_dsl(dn->dn_objset->os_spa);
01725         int i;
01726 
01727         if (blkid == DMU_BONUS_BLKID)
01728                 return (FALSE);
01729 
01730         /*
01731          * If we're in the process of opening the pool, dp will not be
01732          * set yet, but there shouldn't be anything dirty.
01733          */
01734         if (dp == NULL)
01735                 return (FALSE);
01736 
01737         if (dn->dn_free_txg)
01738                 return (TRUE);
01739 
01740         if (blkid == DMU_SPILL_BLKID)
01741                 return (dnode_spill_freed(dn));
01742 
01743         range_tofind.fr_blkid = blkid;
01744         mutex_enter(&dn->dn_mtx);
01745         for (i = 0; i < TXG_SIZE; i++) {
01746                 free_range_t *range_found;
01747                 avl_index_t idx;
01748 
01749                 range_found = avl_find(&dn->dn_ranges[i], &range_tofind, &idx);
01750                 if (range_found) {
01751                         ASSERT(range_found->fr_nblks > 0);
01752                         break;
01753                 }
01754                 range_found = avl_nearest(&dn->dn_ranges[i], idx, AVL_BEFORE);
01755                 if (range_found &&
01756                     range_found->fr_blkid + range_found->fr_nblks > blkid)
01757                         break;
01758         }
01759         mutex_exit(&dn->dn_mtx);
01760         return (i < TXG_SIZE);
01761 }
01762 
01766 void
01767 dnode_diduse_space(dnode_t *dn, int64_t delta)
01768 {
01769         uint64_t space;
01770         dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
01771             dn, dn->dn_phys,
01772             (u_longlong_t)dn->dn_phys->dn_used,
01773             (longlong_t)delta);
01774 
01775         mutex_enter(&dn->dn_mtx);
01776         space = DN_USED_BYTES(dn->dn_phys);
01777         if (delta > 0) {
01778                 ASSERT3U(space + delta, >=, space); /* no overflow */
01779         } else {
01780                 ASSERT3U(space, >=, -delta); /* no underflow */
01781         }
01782         space += delta;
01783         if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
01784                 ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
01785                 ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
01786                 dn->dn_phys->dn_used = space >> DEV_BSHIFT;
01787         } else {
01788                 dn->dn_phys->dn_used = space;
01789                 dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
01790         }
01791         mutex_exit(&dn->dn_mtx);
01792 }
01793 
01799 void
01800 dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx)
01801 {
01802         objset_t *os = dn->dn_objset;
01803         dsl_dataset_t *ds = os->os_dsl_dataset;
01804 
01805         if (space > 0)
01806                 space = spa_get_asize(os->os_spa, space);
01807 
01808         if (ds)
01809                 dsl_dir_willuse_space(ds->ds_dir, space, tx);
01810 
01811         dmu_tx_willuse_space(tx, space);
01812 }
01813 
01831 static int
01832 dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
01833         int lvl, uint64_t blkfill, uint64_t txg)
01834 {
01835         dmu_buf_impl_t *db = NULL;
01836         void *data = NULL;
01837         uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
01838         uint64_t epb = 1ULL << epbs;
01839         uint64_t minfill, maxfill;
01840         boolean_t hole;
01841         int i, inc, error, span;
01842 
01843         dprintf("probing object %llu offset %llx level %d of %u\n",
01844             dn->dn_object, *offset, lvl, dn->dn_phys->dn_nlevels);
01845 
01846         hole = ((flags & DNODE_FIND_HOLE) != 0);
01847         inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
01848         ASSERT(txg == 0 || !hole);
01849 
01850         if (lvl == dn->dn_phys->dn_nlevels) {
01851                 error = 0;
01852                 epb = dn->dn_phys->dn_nblkptr;
01853                 data = dn->dn_phys->dn_blkptr;
01854         } else {
01855                 uint64_t blkid = dbuf_whichblock(dn, *offset) >> (epbs * lvl);
01856                 error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FTAG, &db);
01857                 if (error) {
01858                         if (error != ENOENT)
01859                                 return (error);
01860                         if (hole)
01861                                 return (0);
01862                         /*
01863                          * This can only happen when we are searching up
01864                          * the block tree for data.  We don't really need to
01865                          * adjust the offset, as we will just end up looking
01866                          * at the pointer to this block in its parent, and its
01867                          * going to be unallocated, so we will skip over it.
01868                          */
01869                         return (ESRCH);
01870                 }
01871                 error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT);
01872                 if (error) {
01873                         dbuf_rele(db, FTAG);
01874                         return (error);
01875                 }
01876                 data = db->db.db_data;
01877         }
01878 
01879         if (db && txg &&
01880             (db->db_blkptr == NULL || db->db_blkptr->blk_birth <= txg)) {
01881                 /*
01882                  * This can only happen when we are searching up the tree
01883                  * and these conditions mean that we need to keep climbing.
01884                  */
01885                 error = ESRCH;
01886         } else if (lvl == 0) {
01887                 dnode_phys_t *dnp = data;
01888                 span = DNODE_SHIFT;
01889                 ASSERT(dn->dn_type == DMU_OT_DNODE);
01890 
01891                 for (i = (*offset >> span) & (blkfill - 1);
01892                     i >= 0 && i < blkfill; i += inc) {
01893                         if ((dnp[i].dn_type == DMU_OT_NONE) == hole)
01894                                 break;
01895                         *offset += (1ULL << span) * inc;
01896                 }
01897                 if (i < 0 || i == blkfill)
01898                         error = ESRCH;
01899         } else {
01900                 blkptr_t *bp = data;
01901                 uint64_t start = *offset;
01902                 span = (lvl - 1) * epbs + dn->dn_datablkshift;
01903                 minfill = 0;
01904                 maxfill = blkfill << ((lvl - 1) * epbs);
01905 
01906                 if (hole)
01907                         maxfill--;
01908                 else
01909                         minfill++;
01910 
01911                 *offset = *offset >> span;
01912                 for (i = BF64_GET(*offset, 0, epbs);
01913                     i >= 0 && i < epb; i += inc) {
01914                         if (bp[i].blk_fill >= minfill &&
01915                             bp[i].blk_fill <= maxfill &&
01916                             (hole || bp[i].blk_birth > txg))
01917                                 break;
01918                         if (inc > 0 || *offset > 0)
01919                                 *offset += inc;
01920                 }
01921                 *offset = *offset << span;
01922                 if (inc < 0) {
01923                         /* traversing backwards; position offset at the end */
01924                         ASSERT3U(*offset, <=, start);
01925                         *offset = MIN(*offset + (1ULL << span) - 1, start);
01926                 } else if (*offset < start) {
01927                         *offset = start;
01928                 }
01929                 if (i < 0 || i >= epb)
01930                         error = ESRCH;
01931         }
01932 
01933         if (db)
01934                 dbuf_rele(db, FTAG);
01935 
01936         return (error);
01937 }
01938 
01962 int
01963 dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
01964     int minlvl, uint64_t blkfill, uint64_t txg)
01965 {
01966         uint64_t initial_offset = *offset;
01967         int lvl, maxlvl;
01968         int error = 0;
01969 
01970         if (!(flags & DNODE_FIND_HAVELOCK))
01971                 rw_enter(&dn->dn_struct_rwlock, RW_READER);
01972 
01973         if (dn->dn_phys->dn_nlevels == 0) {
01974                 error = ESRCH;
01975                 goto out;
01976         }
01977 
01978         if (dn->dn_datablkshift == 0) {
01979                 if (*offset < dn->dn_datablksz) {
01980                         if (flags & DNODE_FIND_HOLE)
01981                                 *offset = dn->dn_datablksz;
01982                 } else {
01983                         error = ESRCH;
01984                 }
01985                 goto out;
01986         }
01987 
01988         maxlvl = dn->dn_phys->dn_nlevels;
01989 
01990         for (lvl = minlvl; lvl <= maxlvl; lvl++) {
01991                 error = dnode_next_offset_level(dn,
01992                     flags, offset, lvl, blkfill, txg);
01993                 if (error != ESRCH)
01994                         break;
01995         }
01996 
01997         while (error == 0 && --lvl >= minlvl) {
01998                 error = dnode_next_offset_level(dn,
01999                     flags, offset, lvl, blkfill, txg);
02000         }
02001 
02002         if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
02003             initial_offset < *offset : initial_offset > *offset))
02004                 error = ESRCH;
02005 out:
02006         if (!(flags & DNODE_FIND_HAVELOCK))
02007                 rw_exit(&dn->dn_struct_rwlock);
02008 
02009         return (error);
02010 }
 All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Defines