diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/Doxyfile SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/Doxyfile --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/Doxyfile 1969-12-31 17:00:00.000000000 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/Doxyfile 2012-11-15 00:35:17.577449714 -0700 @@ -0,0 +1,1679 @@ +# Doxyfile 1.7.3 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" "). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded +# by quotes) that should identify the project. + +PROJECT_NAME = "FreeBSD ZFS" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description for a project that appears at the top of each page and should give viewer a quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "The Zettabyte File System" + +# With the PROJECT_LOGO tag one can specify an logo or icon that is +# included in the documentation. The maximum height of the logo should not +# exceed 55 pixels and the maximum width should not exceed 200 pixels. +# Doxygen will copy the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = YES + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, +# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, +# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful if your file system +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = YES + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given extension. +# Doxygen has a built-in mapping, but you can override or extend it using this +# tag. The format is ext=language, where ext is a file extension, and language +# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, +# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make +# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C +# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions +# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also makes the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate getter +# and setter methods for a property. Setting this option to YES (the default) +# will make doxygen replace the get and set methods by a property in the +# documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = NO + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to +# determine which symbols to keep in memory and which to flush to disk. +# When the cache is full, less often used symbols will be written to disk. +# For small to medium size projects (<1000 input files) the default value is +# probably good enough. For larger projects a too small cache size can cause +# doxygen to be busy swapping symbols to and from disk most of the time +# causing a significant performance penalty. +# If the system has enough physical memory increasing the cache will improve the +# performance by keeping more symbols in memory. Note that the value works on +# a logarithmic scale so increasing the size by one will roughly double the +# memory usage. The cache size is given by this formula: +# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols + +SYMBOL_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = YES + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = YES + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = YES + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespaces are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = NO + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = NO + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper type resolution of all parameters of a function it will reject a +# match between the prototype and the implementation of a member function even if there is only one candidate or it is obvious which candidate to choose by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen +# will still accept a match between prototype and implementation in such cases. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or macro consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and macros in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# If the sources in your project are distributed over multiple directories +# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy +# in the documentation. The default is NO. + +SHOW_DIRECTORIES = NO + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command , where is the value of +# the FILE_VERSION_FILTER tag, and is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. The create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_NO_PARAMDOC option can be enabled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = ./ sys/ + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh +# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py +# *.f90 *.f *.for *.vhd *.vhdl + +FILE_PATTERNS = + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command , where +# is the value of the INPUT_FILTER tag, and is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty or if +# non of the patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) +# and it is also possible to disable source filtering for a specific pattern +# using *.ext= (so without naming a filter). This option only has effect when +# FILTER_SOURCE_FILES is enabled. + +FILTER_SOURCE_PATTERNS = + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = YES + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = YES + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# stylesheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the stylesheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = YES + +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to +# NO a bullet list will be used. + +HTML_ALIGN_MEMBERS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. For this to work a browser that supports +# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox +# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). + +HTML_DYNAMIC_SECTIONS = NO + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = org.doxygen.Project + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# +# Qt Help Project / Custom Filters. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# +# Qt Help Project / Filter Attributes. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index at +# top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. + +DISABLE_INDEX = NO + +# This tag can be used to set the number of enum values (range [0,1..20]) +# that doxygen will group on one line in the generated HTML documentation. +# Note that a value of 0 will completely suppress the enum values from appearing in the overview section. + +ENUM_VALUES_PER_LINE = 4 + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. + +GENERATE_TREEVIEW = NO + +# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, +# and Class Hierarchy pages using a tree view instead of an ordered list. + +USE_INLINE_TREES = NO + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax +# (see http://www.mathjax.org) which uses client side Javascript for the +# rendering instead of using prerendered bitmaps. Use this if you do not +# have LaTeX installed or if you want to formulas look prettier in the HTML +# output. When enabled you also need to install MathJax separately and +# configure the path to it using the MATHJAX_RELPATH option. + +USE_MATHJAX = NO + +# When MathJax is enabled you need to specify the location relative to the +# HTML output directory using the MATHJAX_RELPATH option. The destination +# directory should contain the MathJax.js script. For instance, if the mathjax +# directory is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to the mathjax.org site, so you can quickly see the result without installing +# MathJax, but it is strongly recommended to install a local copy of MathJax +# before deployment. + +MATHJAX_RELPATH = http://www.mathjax.org/mathjax + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = YES + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a PHP enabled web server instead of at the web client +# using Javascript. Doxygen will generate the search PHP script and index +# file to put on the web server. The advantage of the server +# based approach is that it scales better to large projects and allows +# full text search. The disadvantages are that it is more difficult to setup +# and does not have live searching capabilities. + +SERVER_BASED_SEARCH = NO + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = NO + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4 + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load stylesheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = NO + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = NO + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = NO + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# in the INCLUDE_PATH (see below) will be search if a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = ../../../../../../../geom ../../../../../../../sys + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = DOXYGEN _KERNEL + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition that overrules the definition found in the source code. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all references to function-like macros +# that are alone on a line, have an all uppercase name, and do not end with a +# semicolon, because these will confuse the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. +# Optionally an initial location of the external documentation +# can be added for each tagfile. The format of a tag file without +# this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths or +# URLs. If a location is present for each tag, the installdox tool +# does not have to be run to correct the links. +# Note that each tag file must have a unique name +# (where the name does NOT include the path) +# If a tag file is not located in the directory in which doxygen +# is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option also works with HAVE_DOT disabled, but it is recommended to +# install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = YES + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + +# By default doxygen will write a font called Helvetica to the output +# directory and reference it in all dot files that doxygen generates. +# When you want a differently looking font you can specify the font name +# using DOT_FONTNAME. You need to make sure dot is able to find the font, +# which can be done by putting it in a standard location or by setting the +# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory +# containing the font. + +DOT_FONTNAME = Helvetica + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the output directory to look for the +# FreeSans.ttf font (which doxygen will put there itself). If you specify a +# different font using DOT_FONTNAME you can set the path where dot +# can find it using this tag. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# the CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will generate a graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are png, svg, gif or svg. +# If left blank png will be used. + +DOT_IMAGE_FORMAT = png + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MSCFILE_DIRS tag can be used to specify one or more directories that +# contain msc files that are included in the documentation (see the +# \mscfile command). + +MSCFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = NO + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = YES + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c 2012-11-16 11:07:22.129456337 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c 2012-11-16 17:41:05.588458447 -0700 @@ -24,65 +24,69 @@ * Copyright (c) 2011 by Delphix. All rights reserved. */ -/* - * DVA-based Adjustable Replacement Cache +/** + * \file arc.c + * DVA-based Adaptive Replacement Cache + * + *

Megiddo and Modha's Adaptive Replacement Cache

* * While much of the theory of operation used here is * based on the self-tuning, low overhead replacement cache * presented by Megiddo and Modha at FAST 2003, there are some * significant differences: * - * 1. The Megiddo and Modha model assumes any page is evictable. - * Pages in its cache cannot be "locked" into memory. This makes - * the eviction algorithm simple: evict the last page in the list. - * This also make the performance characteristics easy to reason - * about. Our cache is not so simple. At any given moment, some - * subset of the blocks in the cache are un-evictable because we - * have handed out a reference to them. Blocks are only evictable - * when there are no external references active. This makes - * eviction far more problematic: we choose to evict the evictable - * blocks that are the "lowest" in the list. + * -# The Megiddo and Modha model assumes any page is evictable. + * Pages in its cache cannot be "locked" into memory. This makes + * the eviction algorithm simple: evict the last page in the list. + * This also make the performance characteristics easy to reason + * about. Our cache is not so simple. At any given moment, some + * subset of the blocks in the cache are un-evictable because we + * have handed out a reference to them. Blocks are only evictable + * when there are no external references active. This makes + * eviction far more problematic: we choose to evict the evictable + * blocks that are the "lowest" in the list. + *

+ * There are times when it is not possible to evict the requested + * space. In these circumstances we are unable to adjust the cache + * size. To prevent the cache growing unbounded at these times we + * implement a "cache throttle" that slows the flow of new data + * into the cache until we can make space available. + * -# The Megiddo and Modha model assumes a fixed cache size. + * Pages are evicted when the cache is full and there is a cache + * miss. Our model has a variable sized cache. It grows with + * high use, but also tries to react to memory pressure from the + * operating system: decreasing its size when system memory is + * tight. + * -# The Megiddo and Modha model assumes a fixed page size. All + * elements of the cache are therefore exactly the same size. So + * when adjusting the cache size following a cache miss, it's simply + * a matter of choosing a single page to evict. In our model, we + * have variable sized cache blocks (ranging from 512 bytes to + * 128K bytes). We therefore choose a set of blocks to evict to make + * space for a cache miss that approximates as closely as possible + * the space used by the new block. * - * There are times when it is not possible to evict the requested - * space. In these circumstances we are unable to adjust the cache - * size. To prevent the cache growing unbounded at these times we - * implement a "cache throttle" that slows the flow of new data - * into the cache until we can make space available. + * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" + * by N. Megiddo & D. Modha, FAST 2003 * - * 2. The Megiddo and Modha model assumes a fixed cache size. - * Pages are evicted when the cache is full and there is a cache - * miss. Our model has a variable sized cache. It grows with - * high use, but also tries to react to memory pressure from the - * operating system: decreasing its size when system memory is - * tight. * - * 3. The Megiddo and Modha model assumes a fixed page size. All - * elements of the cache are therefor exactly the same size. So - * when adjusting the cache size following a cache miss, its simply - * a matter of choosing a single page to evict. In our model, we - * have variable sized cache blocks (rangeing from 512 bytes to - * 128K bytes). We therefor choose a set of blocks to evict to make - * space for a cache miss that approximates as closely as possible - * the space used by the new block. + *

Locking Model

* - * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" - * by N. Megiddo & D. Modha, FAST 2003 - */ - -/* - * The locking model: + * A new reference to a cache buffer can be obtained in two ways: + * + * -# via a hash table lookup using the DVA as a key + * -# via one of the ARC lists * - * A new reference to a cache buffer can be obtained in two - * ways: 1) via a hash table lookup using the DVA as a key, - * or 2) via one of the ARC lists. The arc_read() interface - * uses method 1, while the internal arc algorithms for - * adjusting the cache use method 2. We therefor provide two - * types of locks: 1) the hash table lock array, and 2) the - * arc list locks. + * The arc_read() interface uses method 1, while the internal arc + * algorithms for adjusting the cache use method 2. We therefore + * provide two types of locks: * - * Buffers do not have their own mutexs, rather they rely on the - * hash table mutexs for the bulk of their protection (i.e. most - * fields in the arc_buf_hdr_t are protected by these mutexs). + * -# the hash table lock array + * -# the arc list locks + * + * Buffers do not have their own mutexes, rather they rely on the + * hash table mutexes for the bulk of their protection (i.e. most + * fields in the arc_buf_hdr_t are protected by these mutexes). * * buf_hash_find() returns the appropriate mutex (held) when it * locates the requested buffer in the hash table. It returns @@ -137,14 +141,14 @@ #ifdef illumos #ifndef _KERNEL -/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ +/** set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ boolean_t arc_watch = B_FALSE; int arc_procfd; #endif #endif /* illumos */ static kmutex_t arc_reclaim_thr_lock; -static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ +static kcondvar_t arc_reclaim_thr_cv; /**< used to signal reclaim thr */ static uint8_t arc_thread_exit; extern int zfs_write_limit_shift; @@ -155,20 +159,20 @@ extern kmutex_t zfs_write_limit_lock; uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; typedef enum arc_reclaim_strategy { - ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ - ARC_RECLAIM_CONS /* Conservative reclaim strategy */ + ARC_RECLAIM_AGGR, /**< Aggressive reclaim strategy */ + ARC_RECLAIM_CONS /**< Conservative reclaim strategy */ } arc_reclaim_strategy_t; -/* number of seconds before growing cache again */ +/** number of seconds before growing cache again */ static int arc_grow_retry = 60; -/* shift of arc_c for calculating both min and max arc_p */ +/** shift of arc_c for calculating both min and max arc_p */ static int arc_p_min_shift = 4; -/* log2(fraction of arc to reclaim) */ +/** log2(fraction of arc to reclaim) */ static int arc_shrink_shift = 5; -/* +/** * minimum lifespan of a prefetch block in clock ticks * (initialized in arc_init()) */ @@ -177,7 +181,7 @@ static int arc_min_prefetch_lifespan; static int arc_dead; extern int zfs_prefetch_disable; -/* +/** * The arc has filled available memory and has now warmed up. */ static boolean_t arc_warm; @@ -185,9 +189,14 @@ static boolean_t arc_warm; /* * These tunables are for performance analysis. */ +/** + * \addtogroup tunables + * \{ + */ uint64_t zfs_arc_max; uint64_t zfs_arc_min; uint64_t zfs_arc_meta_limit = 0; +/** \} */ int zfs_arc_grow_retry = 0; int zfs_arc_shrink_shift = 0; int zfs_arc_p_min_shift = 0; @@ -202,14 +211,18 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0, "Minimum ARC size"); -/* - * Note that buffers can be in one of 6 states: - * ARC_anon - anonymous (discussed below) - * ARC_mru - recently used, currently cached - * ARC_mru_ghost - recentely used, no longer in cache - * ARC_mfu - frequently used, currently cached - * ARC_mfu_ghost - frequently used, no longer in cache - * ARC_l2c_only - exists in L2ARC but not other states +/** + * \file arc.c + * + *

Arc Buffer States

+ * + * Buffers can be in one of 6 states: + * - ARC_anon - anonymous (discussed below) + * - ARC_mru - recently used, currently cached + * - ARC_mru_ghost - recentely used, no longer in cache + * - ARC_mfu - frequently used, currently cached + * - ARC_mfu_ghost - frequently used, no longer in cache + * - ARC_l2c_only - exists in L2ARC but not other states * When there are no active references to the buffer, they are * are linked onto a list in one of these arc states. These are * the only buffers that can be evicted or deleted. Within each @@ -242,18 +255,17 @@ struct arcs_lock { #endif }; -/* +/** * must be power of two for mask use to work - * */ #define ARC_BUFC_NUMDATALISTS 16 #define ARC_BUFC_NUMMETADATALISTS 16 #define ARC_BUFC_NUMLISTS (ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS) typedef struct arc_state { - uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ - uint64_t arcs_size; /* total amount of data in this state */ - list_t arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */ + uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /**< amount of evictable data */ + uint64_t arcs_size; /**< total amount of data in this state */ + list_t arcs_lists[ARC_BUFC_NUMLISTS]; /**< list of evictable buffers */ struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE); } arc_state_t; @@ -431,7 +443,7 @@ static arc_stats_t arc_stats = { #define ARCSTAT_MAXSTAT(stat) \ ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) -/* +/** * We define a macro to allow ARC hits/misses to be easily broken down by * two separate conditions, giving a total of four different subtypes for * each of hits and misses (so eight statistics total). @@ -467,13 +479,13 @@ static arc_state_t *arc_l2c_only; * the possibility of inconsistency by having shadow copies of the variables, * while still allowing the code to be readable. */ -#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ -#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ -#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ -#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ -#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ +#define arc_size ARCSTAT(arcstat_size) /**< actual total arc size */ +#define arc_p ARCSTAT(arcstat_p) /**< target size of MRU */ +#define arc_c ARCSTAT(arcstat_c) /**< target size of cache */ +#define arc_c_min ARCSTAT(arcstat_c_min) /**< min target cache size */ +#define arc_c_max ARCSTAT(arcstat_c_max) /**< max target cache size */ -static int arc_no_grow; /* Don't try to grow cache size */ +static int arc_no_grow; /**< Don't try to grow cache size */ static uint64_t arc_tempreserve; static uint64_t arc_loaned_bytes; static uint64_t arc_meta_used; @@ -567,16 +579,16 @@ static boolean_t l2arc_write_eligible(ui * public flags, make sure not to smash the private ones. */ -#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ -#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ -#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ -#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ -#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ -#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ -#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ -#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ -#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ -#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ +#define ARC_IN_HASH_TABLE (1 << 9) /**< this buffer is hashed */ +#define ARC_IO_IN_PROGRESS (1 << 10) /**< I/O in progress for buf */ +#define ARC_IO_ERROR (1 << 11) /**< I/O failed for buf */ +#define ARC_FREED_IN_READ (1 << 12) /**< buf freed while in read */ +#define ARC_BUF_AVAILABLE (1 << 13) /**< block not in active use */ +#define ARC_INDIRECT (1 << 14) /**< this is an indirect block*/ +#define ARC_FREE_IN_PROGRESS (1 << 15) /**< hdr about to be freed */ +#define ARC_L2_WRITING (1 << 16) /**< L2ARC write in progress */ +#define ARC_L2_EVICTED (1 << 17) /**< evicted during I/O */ +#define ARC_L2_WRITE_HEAD (1 << 18) /**< head of write list */ #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) @@ -634,25 +646,28 @@ uint64_t zfs_crc64_table[256]; * Level 2 ARC */ -#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ -#define L2ARC_HEADROOM 2 /* num of writes */ -#define L2ARC_FEED_SECS 1 /* caching interval secs */ -#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ +#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /**< initial write max*/ +#define L2ARC_HEADROOM 2 /**< num of writes */ +#define L2ARC_FEED_SECS 1 /**< caching interval secs */ +#define L2ARC_FEED_MIN_MS 200 /**< min caching interval ms */ #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) -/* - * L2ARC Performance Tunables - */ -uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ -uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ -uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ -uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ -uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ -boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ -boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ -boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ +/* L2ARC Performance Tunables */ +/** + * \addtogroup tunables + * \{ + */ +uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /**< default max write size */ +uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /**< extra write during warmup*/ +uint64_t l2arc_headroom = L2ARC_HEADROOM; /**< number of dev writes */ +uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /**< interval seconds */ +uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /**< min interval milliseconds*/ +boolean_t l2arc_noprefetch = B_TRUE; /**< don't cache prefetch bufs*/ +boolean_t l2arc_feed_again = B_TRUE; /**< turbo warmup */ +boolean_t l2arc_norw = B_TRUE; /**< no reads during writes */ +/** \} */ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW, &l2arc_write_max, 0, "max write size"); @@ -718,47 +733,47 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_onl * L2ARC Internals */ typedef struct l2arc_dev { - vdev_t *l2ad_vdev; /* vdev */ - spa_t *l2ad_spa; /* spa */ - uint64_t l2ad_hand; /* next write location */ - uint64_t l2ad_write; /* desired write size, bytes */ - uint64_t l2ad_boost; /* warmup write boost, bytes */ - uint64_t l2ad_start; /* first addr on device */ - uint64_t l2ad_end; /* last addr on device */ - uint64_t l2ad_evict; /* last addr eviction reached */ - boolean_t l2ad_first; /* first sweep through */ - boolean_t l2ad_writing; /* currently writing */ - list_t *l2ad_buflist; /* buffer list */ - list_node_t l2ad_node; /* device list node */ + vdev_t *l2ad_vdev; /**< vdev */ + spa_t *l2ad_spa; /**< spa */ + uint64_t l2ad_hand; /**< next write location */ + uint64_t l2ad_write; /**< desired write size, bytes*/ + uint64_t l2ad_boost; /**< warmup write boost, bytes*/ + uint64_t l2ad_start; /**< first addr on device */ + uint64_t l2ad_end; /**< last addr on device */ + uint64_t l2ad_evict; /**b_size); } -/* Detach an arc_buf from a dbuf (tag) */ +/** + * Detach an arc_buf from a dbuf (tag) + */ void arc_loan_inuse_buf(arc_buf_t *buf, void *tag) { @@ -1573,7 +1590,7 @@ arc_buf_add_ref(arc_buf_t *buf, void* ta data, metadata, hits); } -/* +/** * Free the arc data buffer. If it is an l2arc write in progress, * the buffer is placed on l2arc_free_on_write to be freed later. */ @@ -1829,7 +1846,7 @@ arc_buf_size(arc_buf_t *buf) return (buf->b_hdr->b_size); } -/* +/** * Called from the DMU to determine if the current buffer should be * evicted. In order to ensure proper locking, the eviction must be initiated * from the DMU. Return true if the buffer is associated with user data and @@ -1871,7 +1888,7 @@ arc_buf_eviction_needed(arc_buf_t *buf) return (evict_needed); } -/* +/** * Evict buffers from list until we've removed the specified number of * bytes. Move the removed buffers to the appropriate evict state. * If the recycle flag is set, then attempt to "recycle" a buffer: @@ -2077,7 +2094,7 @@ evict_start: return (stolen); } -/* +/** * Remove buffers from list until we've removed the specified number of * bytes. Destroy the buffers that are removed. */ @@ -2283,9 +2300,10 @@ restart: goto restart; } -/* +/** * Flush all *evictable* data from the cache for the given spa. - * NOTE: this will not touch "active" (i.e. referenced) data. + * + * \note This will not touch "active" (i.e. referenced) data. */ void arc_flush(spa_t *spa) @@ -2542,7 +2560,7 @@ arc_reclaim_thread(void *dummy __unused) thread_exit(); } -/* +/** * Adapt arc info given the number of bytes we are trying to add and * the state that we are comming from. This function is only called * when we are adding new content to the cache. @@ -2610,7 +2628,7 @@ arc_adapt(int bytes, arc_state_t *state) ASSERT((int64_t)arc_p >= 0); } -/* +/** * Check if the cache has reached its limits and eviction is required * prior to insert. */ @@ -2640,28 +2658,23 @@ arc_evict_needed(arc_buf_contents_t type return (arc_size > arc_c); } -/* +/** * The buffer, supplied as the first argument, needs a data block. * So, if we are at cache max, determine which cache should be victimized. * We have the following cases: - * - * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> - * In this situation if we're out of space, but the resident size of the MFU is - * under the limit, victimize the MFU cache to satisfy this insertion request. - * - * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> - * Here, we've used up all of the available space for the MRU, so we need to - * evict from our own cache instead. Evict from the set of resident MRU - * entries. - * - * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> - * c minus p represents the MFU space in the cache, since p is the size of the - * cache that is dedicated to the MRU. In this situation there's still space on - * the MFU side, so the MRU side needs to be victimized. - * - * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> - * MFU's resident set is consuming more space than it has been allotted. In - * this situation, we must victimize our own cache, the MFU, for this insertion. + * -# Insert for MRU, p > sizeof(arc_anon + arc_mru) -> In this situation if + * we're out of space, but the resident size of the MFU is under the limit, + * victimize the MFU cache to satisfy this insertion request. + * -# Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> Here, we've used up all + * of the available space for the MRU, so we need to evict from our own cache + * instead. Evict from the set of resident MRU entries. + * -# Insert for MFU (c - p) > sizeof(arc_mfu) -> c minus p represents the MFU + * space in the cache, since p is the size of the cache that is dedicated to + * the MRU. In this situation there's still space on the MFU side, so the + * MRU side needs to be victimized. + * -# Insert for MFU (c - p) < sizeof(arc_mfu) -> MFU's resident set is + * consuming more space than it has been allotted. In this situation, we + * must victimize our own cache, the MFU, for this insertion. */ static void arc_get_data_buf(arc_buf_t *buf) @@ -2745,9 +2758,10 @@ out: ARCSTAT_BUMP(arcstat_allocated); } -/* +/** * This routine is called whenever a buffer is accessed. - * NOTE: the hash lock is dropped in this function. + * + * \note The hash lock is dropped in this function. */ static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) @@ -2879,7 +2893,9 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t } } -/* a generic arc_done_func_t which you can use */ +/** + * a generic arc_done_func_t which you can use + */ /* ARGSUSED */ void arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) @@ -2889,7 +2905,9 @@ arc_bcopy_func(zio_t *zio, arc_buf_t *bu VERIFY(arc_buf_remove_ref(buf, arg) == 1); } -/* a generic arc_done_func_t */ +/** + * a generic arc_done_func_t + */ void arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) { @@ -3032,7 +3050,7 @@ arc_read_done(zio_t *zio) arc_hdr_destroy(hdr); } -/* +/** * "Read" the block block at the specified DVA (in bp) via the * cache. If the block is found in the cache, invoke the provided * callback immediately and return. Note that the `zio' parameter @@ -3358,7 +3376,7 @@ arc_set_callback(arc_buf_t *buf, arc_evi buf->b_private = private; } -/* +/** * This is used by the DMU to let the ARC know that a buffer is * being evicted, so the ARC should clean up. If this arc buf * is not yet in the evicted state, it will be put there. @@ -3446,7 +3464,7 @@ arc_buf_evict(arc_buf_t *buf) return (1); } -/* +/** * Release this buffer from the cache. This must be done * after a read and prior to modifying the buffer contents. * If the buffer has more than one reference, we must make @@ -3577,7 +3595,7 @@ arc_release(arc_buf_t *buf, void *tag) } } -/* +/** * Release this buffer. If it does not match the provided BP, fill it * with that block's contents. */ @@ -3846,7 +3864,7 @@ arc_tempreserve_space(uint64_t reserve, /* * Writes will, almost always, require additional memory allocations - * in order to compress/encrypt/etc the data. We therefor need to + * in order to compress/encrypt/etc the data. We therefore need to * make sure that there is sufficient available memory for this. */ if (error = arc_memory_throttle(reserve, anon_size, txg)) @@ -4136,8 +4154,10 @@ arc_fini(void) #endif } -/* - * Level 2 ARC +/** + * \file arc.c + * + *

Level 2 ARC

* * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. * It uses dedicated storage devices to hold cached data, which are populated @@ -4145,128 +4165,121 @@ arc_fini(void) * the performance of random read workloads. The intended L2ARC devices * include short-stroked disks, solid state disks, and other media with * substantially faster read latency than disk. - * - * +-----------------------+ - * | ARC | - * +-----------------------+ - * | ^ ^ - * | | | - * l2arc_feed_thread() arc_read() - * | | | - * | l2arc read | - * V | | - * +---------------+ | - * | L2ARC | | - * +---------------+ | - * | ^ | - * l2arc_write() | | - * | | | - * V | | - * +-------+ +-------+ - * | vdev | | vdev | - * | cache | | cache | - * +-------+ +-------+ - * +=========+ .-----. - * : L2ARC : |-_____-| - * : devices : | Disks | - * +=========+ `-_____-' - * + \verbatim + +-----------------------+ + | ARC | + +-----------------------+ + | ^ ^ + | | | + l2arc_feed_thread() arc_read() + | | | + | l2arc read | + V | | + +---------------+ | + | L2ARC | | + +---------------+ | + | ^ | + l2arc_write() | | + | | | + V | | + +-------+ +-------+ + | vdev | | vdev | + | cache | | cache | + +-------+ +-------+ + +=========+ .-----. + : L2ARC : |-_____-| + : devices : | Disks | + +=========+ `-_____-' + \endverbatim * Read requests are satisfied from the following sources, in order: - * - * 1) ARC - * 2) vdev cache of L2ARC devices - * 3) L2ARC devices - * 4) vdev cache of disks - * 5) disks + * -# ARC + * -# vdev cache of L2ARC devices + * -# L2ARC devices + * -# vdev cache of disks + * -# disks * * Some L2ARC device types exhibit extremely slow write performance. * To accommodate for this there are some significant differences between * the L2ARC and traditional cache design: * - * 1. There is no eviction path from the ARC to the L2ARC. Evictions from - * the ARC behave as usual, freeing buffers and placing headers on ghost - * lists. The ARC does not send buffers to the L2ARC during eviction as - * this would add inflated write latencies for all ARC memory pressure. - * - * 2. The L2ARC attempts to cache data from the ARC before it is evicted. - * It does this by periodically scanning buffers from the eviction-end of - * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are - * not already there. It scans until a headroom of buffers is satisfied, - * which itself is a buffer for ARC eviction. The thread that does this is - * l2arc_feed_thread(), illustrated below; example sizes are included to - * provide a better sense of ratio than this diagram: - * - * head --> tail - * +---------------------+----------+ - * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC - * +---------------------+----------+ | o L2ARC eligible - * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer - * +---------------------+----------+ | - * 15.9 Gbytes ^ 32 Mbytes | - * headroom | - * l2arc_feed_thread() - * | - * l2arc write hand <--[oooo]--' - * | 8 Mbyte - * | write max - * V - * +==============================+ - * L2ARC dev |####|#|###|###| |####| ... | - * +==============================+ - * 32 Gbytes - * - * 3. If an ARC buffer is copied to the L2ARC but then hit instead of - * evicted, then the L2ARC has cached a buffer much sooner than it probably - * needed to, potentially wasting L2ARC device bandwidth and storage. It is - * safe to say that this is an uncommon case, since buffers at the end of - * the ARC lists have moved there due to inactivity. - * - * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, - * then the L2ARC simply misses copying some buffers. This serves as a - * pressure valve to prevent heavy read workloads from both stalling the ARC - * with waits and clogging the L2ARC with writes. This also helps prevent - * the potential for the L2ARC to churn if it attempts to cache content too - * quickly, such as during backups of the entire pool. - * - * 5. After system boot and before the ARC has filled main memory, there are - * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru - * lists can remain mostly static. Instead of searching from tail of these - * lists as pictured, the l2arc_feed_thread() will search from the list heads - * for eligible buffers, greatly increasing its chance of finding them. - * - * The L2ARC device write speed is also boosted during this time so that - * the L2ARC warms up faster. Since there have been no ARC evictions yet, - * there are no L2ARC reads, and no fear of degrading read performance - * through increased writes. - * - * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that - * the vdev queue can aggregate them into larger and fewer writes. Each - * device is written to in a rotor fashion, sweeping writes through - * available space then repeating. - * - * 7. The L2ARC does not store dirty content. It never needs to flush - * write buffers back to disk based storage. - * - * 8. If an ARC buffer is written (and dirtied) which also exists in the - * L2ARC, the now stale L2ARC buffer is immediately dropped. + * -# There is no eviction path from the ARC to the L2ARC. Evictions from + * the ARC behave as usual, freeing buffers and placing headers on ghost + * lists. The ARC does not send buffers to the L2ARC during eviction as + * this would add inflated write latencies for all ARC memory pressure. + * -# The L2ARC attempts to cache data from the ARC before it is evicted. + * It does this by periodically scanning buffers from the eviction-end of + * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are + * not already there. It scans until a headroom of buffers is satisfied, + * which itself is a buffer for ARC eviction. The thread that does this is + * l2arc_feed_thread(), illustrated below; example sizes are included to + * provide a better sense of ratio than this diagram: + \verbatim + head --> tail + +---------------------+----------+ + ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC + +---------------------+----------+ | o L2ARC eligible + ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer + +---------------------+----------+ | + 15.9 Gbytes ^ 32 Mbytes | + headroom | + l2arc_feed_thread() + | + l2arc write hand <--[oooo]--' + | 8 Mbyte + | write max + V + +==============================+ + L2ARC dev |####|#|###|###| |####| ... | + +==============================+ + 32 Gbytes + \endverbatim + * -# If an ARC buffer is copied to the L2ARC but then hit instead of + * evicted, then the L2ARC has cached a buffer much sooner than it probably + * needed to, potentially wasting L2ARC device bandwidth and storage. It is + * safe to say that this is an uncommon case, since buffers at the end of + * the ARC lists have moved there due to inactivity. + * -# If the ARC evicts faster than the L2ARC can maintain a headroom, + * then the L2ARC simply misses copying some buffers. This serves as a + * pressure valve to prevent heavy read workloads from both stalling the ARC + * with waits and clogging the L2ARC with writes. This also helps prevent + * the potential for the L2ARC to churn if it attempts to cache content too + * quickly, such as during backups of the entire pool. + * -# After system boot and before the ARC has filled main memory, there are + * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru + * lists can remain mostly static. Instead of searching from tail of these + * lists as pictured, the l2arc_feed_thread() will search from the list heads + * for eligible buffers, greatly increasing its chance of finding them. + *

+ * The L2ARC device write speed is also boosted during this time so that + * the L2ARC warms up faster. Since there have been no ARC evictions yet, + * there are no L2ARC reads, and no fear of degrading read performance + * through increased writes. + * -# Writes to the L2ARC devices are grouped and sent in-sequence, so that + * the vdev queue can aggregate them into larger and fewer writes. Each + * device is written to in a rotor fashion, sweeping writes through + * available space then repeating. + * -# The L2ARC does not store dirty content. It never needs to flush + * write buffers back to disk based storage. + * -# If an ARC buffer is written (and dirtied) which also exists in the + * L2ARC, the now stale L2ARC buffer is immediately dropped. * * The performance of the L2ARC can be tweaked by a number of tunables, which * may be necessary for different workloads: * - * l2arc_write_max max write bytes per interval - * l2arc_write_boost extra write bytes during device warmup - * l2arc_noprefetch skip caching prefetched buffers - * l2arc_headroom number of max device writes to precache - * l2arc_feed_secs seconds between L2ARC writing + * - l2arc_write_max max write bytes per interval + * - l2arc_write_boost extra write bytes during device warmup + * - l2arc_noprefetch skip caching prefetched buffers + * - l2arc_headroom number of max device writes to precache + * - l2arc_feed_secs seconds between L2ARC writing * * Tunables may be removed or added as future performance improvements are * integrated, and also may become zpool properties. * * There are three key functions that control how the L2ARC warms up: * - * l2arc_write_eligible() check if a buffer is eligible to cache - * l2arc_write_size() calculate how much to write - * l2arc_write_interval() calculate sleep delay between writes + * - l2arc_write_eligible() check if a buffer is eligible to cache + * - l2arc_write_size() calculate how much to write + * - l2arc_write_interval() calculate sleep delay between writes * * These three functions determine what to write, how much, and how quickly * to send writes. @@ -4352,7 +4365,7 @@ l2arc_hdr_stat_remove(void) ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); } -/* +/** * Cycle through L2ARC devices. This is how L2ARC load balances. * If a device is returned, this also returns holding the spa config lock. */ @@ -4413,7 +4426,7 @@ out: return (next); } -/* +/** * Free buffers that were tagged for destruction. */ static void @@ -4437,7 +4450,7 @@ l2arc_do_free_on_write() mutex_exit(&l2arc_free_on_write_mtx); } -/* +/** * A write to a cache device has completed. Update all headers to allow * reads from these buffers to begin. */ @@ -4513,7 +4526,7 @@ l2arc_write_done(zio_t *zio) kmem_free(cb, sizeof (l2arc_write_callback_t)); } -/* +/** * A read to a cache device completed. Validate buffer contents before * handing over to the regular ARC routines. */ @@ -4584,7 +4597,7 @@ l2arc_read_done(zio_t *zio) kmem_free(cb, sizeof (l2arc_read_callback_t)); } -/* +/** * This is the list priority from which the L2ARC will search for pages to * cache. This is used within loops (0..3) to cycle through lists in the * desired order. This order can have a significant effect on cache @@ -4626,7 +4639,7 @@ l2arc_list_locked(int list_num, kmutex_t return (list); } -/* +/** * Evict buffers from the device write hand to the distance specified in * bytes. This distance may span populated buffers, it may span nothing. * This is clearing a region on the L2ARC device ready for writing. @@ -4757,7 +4770,7 @@ top: dev->l2ad_evict = taddr; } -/* +/** * Find and write ARC buffers to the L2ARC device. * * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid @@ -4944,9 +4957,9 @@ l2arc_write_buffers(spa_t *spa, l2arc_de return (write_sz); } -/* - * This thread feeds the L2ARC at regular intervals. This is the beating - * heart of the L2ARC. +/** + * Feed the L2ARC with buffers from the ARC at regular intervals. + * This thread is the beating heart of the L2ARC. */ static void l2arc_feed_thread(void *dummy __unused) @@ -5057,7 +5070,7 @@ l2arc_vdev_present(vdev_t *vd) return (dev != NULL); } -/* +/** * Add a vdev for use by the L2ARC. By this point the spa has already * validated the vdev and opened it. */ @@ -5103,7 +5116,7 @@ l2arc_add_vdev(spa_t *spa, vdev_t *vd) mutex_exit(&l2arc_dev_mtx); } -/* +/** * Remove a vdev from the L2ARC. */ void diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c 2012-11-16 11:07:22.134455298 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bpobj.c 2012-11-16 17:42:48.492456382 -0700 @@ -30,7 +30,7 @@ #include #include -/* +/** * Return an empty bpobj, preferably the empty dummy one (dp_empty_bpobj). */ uint64_t @@ -342,7 +342,7 @@ out: return (err); } -/* +/** * Iterate and remove the entries. If func returns nonzero, iteration * will stop and that entry will not be removed. */ @@ -352,7 +352,7 @@ bpobj_iterate(bpobj_t *bpo, bpobj_itor_t return (bpobj_iterate_impl(bpo, func, arg, tx, B_TRUE)); } -/* +/** * Iterate the entries. If func returns nonzero, iteration will stop. */ int diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bptree.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bptree.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bptree.c 2012-11-16 11:07:22.134455298 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/bptree.c 2012-11-16 17:35:21.581082296 -0700 @@ -36,25 +36,27 @@ #include #include -/* +/** + * \file bptree.c + * * A bptree is a queue of root block pointers from destroyed datasets. When a * dataset is destroyed its root block pointer is put on the end of the pool's * bptree queue so the dataset's blocks can be freed asynchronously by * dsl_scan_sync. This allows the delete operation to finish without traversing * all the dataset's blocks. * - * Note that while bt_begin and bt_end are only ever incremented in this code - * they are effectively reset to 0 every time the entire bptree is freed because - * the bptree's object is destroyed and re-created. + * \note While bt_begin and bt_end are only ever incremented in this code + * they are effectively reset to 0 every time the entire bptree is + * freed because the bptree's object is destroyed and re-created. */ struct bptree_args { - bptree_phys_t *ba_phys; /* data in bonus buffer, dirtied if freeing */ - boolean_t ba_free; /* true if freeing during traversal */ + bptree_phys_t *ba_phys; /**< data in bonus buffer, dirtied if freeing */ + boolean_t ba_free; /**< true if freeing during traversal */ - bptree_itor_t *ba_func; /* function to call for each blockpointer */ - void *ba_arg; /* caller supplied argument to ba_func */ - dmu_tx_t *ba_tx; /* caller supplied tx, NULL if not freeing */ + bptree_itor_t *ba_func; /**< function to call for each blockpointer */ + void *ba_arg; /**< caller supplied argument to ba_func */ + dmu_tx_t *ba_tx; /**< caller supplied tx, NULL if not freeing */ } bptree_args_t; uint64_t diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c 2012-11-16 11:07:22.136455907 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c 2012-11-15 18:17:56.847456937 -0700 @@ -42,7 +42,7 @@ static void dbuf_destroy(dmu_buf_impl_t static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx); static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx); -/* +/** * Global data structures and functions for the dbuf cache. */ static kmem_cache_t *dbuf_cache; @@ -70,7 +70,7 @@ dbuf_dest(void *vdb, void *unused) refcount_destroy(&db->db_holds); } -/* +/** * dbuf hash table routines */ static dbuf_hash_table_t dbuf_hash_table; @@ -129,7 +129,7 @@ dbuf_find(dnode_t *dn, uint8_t level, ui return (NULL); } -/* +/** * Insert an entry into the hash table. If there is already an element * equal to elem in the hash table, then the already existing element * will be returned and the new element will not be inserted. @@ -168,7 +168,7 @@ dbuf_hash_insert(dmu_buf_impl_t *db) return (NULL); } -/* +/** * Remove an entry from the hash table. This operation will * fail if there are any existing holds on the db. */ @@ -415,8 +415,10 @@ dbuf_update_data(dmu_buf_impl_t *db) } } -/* Set the dbuf's buffer to the ARC buffer, including any associated state, - * such as db_data. */ +/** + * Set the dbuf's buffer to the ARC buffer, including any associated state, + * such as db_data. + */ static void dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf) { @@ -437,8 +439,10 @@ dbuf_set_data(dmu_buf_impl_t *db, arc_bu } } -/* - * Loan out an arc_buf for read. Return the loaned arc_buf. +/** + * Loan out an arc_buf for read. + * + * \return the loaned arc_buf. */ arc_buf_t * dbuf_loan_arcbuf(dmu_buf_impl_t *db) @@ -552,7 +556,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t (db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(db->db_blkptr)))) { arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db); - + dbuf_set_data(db, arc_buf_alloc(dn->dn_objset->os_spa, db->db.db_size, db, type)); DB_DNODE_EXIT(db); @@ -647,6 +651,14 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio if (!havepzio) err = zio_wait(zio); } else { + /* + * Another reader came in while the dbuf was in flight + * between UNCACHED and CACHED. Either a writer will finish + * writing the buffer (sending the dbuf to CACHED) or the + * first reader's request will reach the read_done callback + * and send the dbuf to CACHED. Otherwise, a failure + * occurred and the dbuf went to UNCACHED. + */ mutex_exit(&db->db_mtx); if (prefetch) dmu_zfetch(&dn->dn_zfetch, db->db.db_offset, @@ -655,6 +667,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio rw_exit(&dn->dn_struct_rwlock); DB_DNODE_EXIT(db); + /* Skip the wait per the caller's request. */ mutex_enter(&db->db_mtx); if ((flags & DB_RF_NEVERWAIT) == 0) { while (db->db_state == DB_READ || @@ -698,9 +711,9 @@ dbuf_noread(dmu_buf_impl_t *db) mutex_exit(&db->db_mtx); } -/* +/** * This is our just-in-time copy function. It makes a copy of - * buffers, that have been modified in a previous transaction + * buffers that have been modified in a previous transaction * group, before we modify them in the current active group. * * This function is used in two places: when we are dirtying a @@ -752,6 +765,12 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, ui } } +/** + * Signal that the dirty record is about to be re-dirtied after sync. + * + * This function is used to notify, if necessary, that a dbuf is about + * to be dirtied again after having an immediate write via dmu_sync(). + */ void dbuf_unoverride(dbuf_dirty_record_t *dr) { @@ -788,7 +807,7 @@ dbuf_unoverride(dbuf_dirty_record_t *dr) arc_release(dr->dt.dl.dr_data, db); } -/* +/** * Evict (if its unreferenced) or clear (if its referenced) any level-0 * data blocks in the free range, so that any future readers will find * empty blocks. Also, if we happen accross any level-1 dbufs in the @@ -999,7 +1018,9 @@ dbuf_release_bp(dmu_buf_impl_t *db) db->db_blkptr, os->os_spa, &zb); } -/* Mark a dbuf as dirty. */ +/** + * Mark a dbuf as dirty. + */ dbuf_dirty_record_t * dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx) { @@ -1274,6 +1295,9 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t return (dr); } +/** + * Undirty a buffer, clearing dirty records. + */ static int dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx) { @@ -1449,7 +1473,7 @@ dbuf_fill_done(dmu_buf_impl_t *db, dmu_t mutex_exit(&db->db_mtx); } -/* +/** * Directly assign a provided arc buf to a given dbuf if it's not referenced * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf. */ @@ -1474,7 +1498,8 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, a ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED); - /* If the dbuf is cached and the number of holds exceeds the number + /* + * If the dbuf is cached and the number of holds exceeds the number * of dirty calls on it, then dirty it again and remove the buffer * reference, before copying the ARC buffer to the dbuf. */ @@ -1518,18 +1543,22 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, a dbuf_fill_done(db, tx); } -/* +/** * "Clear" the contents of this dbuf. This will mark the dbuf - * EVICTING and clear *most* of its references. Unfortunetely, + * EVICTING and clear *most* of its references. Unfortunately, * when we are not holding the dn_dbufs_mtx, we can't clear the * entry in the dn_dbufs list. We have to wait until dbuf_destroy() - * in this case. For callers from the DMU we will usually see: - * dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy() + * in this case. + * + * For callers from the DMU we will usually see: + * - dbuf_clear()->arc_buf_evict()->dbuf_do_evict()->dbuf_destroy() + * * For the arc callback, we will usually see: - * dbuf_do_evict()->dbuf_clear();dbuf_destroy() + * - dbuf_do_evict()->dbuf_clear();dbuf_destroy() + * * Sometimes, though, we will get a mix of these two: - * DMU: dbuf_clear()->arc_buf_evict() - * ARC: dbuf_do_evict()->dbuf_destroy() + * - DMU: dbuf_clear()->arc_buf_evict() + * - ARC: dbuf_do_evict()->dbuf_destroy() */ void dbuf_clear(dmu_buf_impl_t *db) @@ -1862,9 +1891,10 @@ dbuf_prefetch(dnode_t *dn, uint64_t blki } } -/* +/** * Returns with db_holds incremented, and db_mtx not held. - * Note: dn_struct_rwlock must be held. + * + * \note dn_struct_rwlock must be held. */ int dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse, @@ -2018,12 +2048,14 @@ dbuf_add_ref(dmu_buf_impl_t *db, void *t ASSERT(holds > 1); } -/* - * If you call dbuf_rele() you had better not be referencing the dnode handle - * unless you have some other direct or indirect hold on the dnode. (An indirect - * hold is a hold on one of the dnode's dbufs, including the bonus buffer.) - * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the - * dnode's parent dbuf evicting its dnode handles. +/** + * \note If you call dbuf_rele() you had better not be referencing + * the dnode handle unless you have some other direct or + * indirect hold on the dnode. (An indirect hold is a hold + * on one of the dnode's dbufs, including the bonus buffer.) + * Without that, the dbuf_rele() could lead to a dnode_rele() + * followed by the dnode's parent dbuf evicting its dnode + * handles. */ #pragma weak dmu_buf_rele = dbuf_rele void @@ -2033,7 +2065,7 @@ dbuf_rele(dmu_buf_impl_t *db, void *tag) dbuf_rele_and_unlock(db, tag); } -/* +/** * dbuf_rele() for an already-locked dbuf. This is necessary to allow * db_dirtycnt and db_holds to be updated atomically. */ @@ -2260,6 +2292,7 @@ dbuf_sync_indirect(dbuf_dirty_record_t * ASSERT(db->db_level > 0); DBUF_VERIFY(db); + /* Read the block if it hasn't been read yet. */ if (db->db_buf == NULL) { mutex_exit(&db->db_mtx); (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED); @@ -2270,10 +2303,12 @@ dbuf_sync_indirect(dbuf_dirty_record_t * DB_DNODE_ENTER(db); dn = DB_DNODE(db); + /* Indirect block size must match what the dnode thinks it is. */ ASSERT3U(db->db.db_size, ==, 1<dn_phys->dn_indblkshift); dbuf_check_blkptr(dn, db); DB_DNODE_EXIT(db); + /* Provide the pending dirty record to child dbufs */ db->db_data_pending = dr; mutex_exit(&db->db_mtx); @@ -2403,6 +2438,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, *datap = arc_buf_alloc(os->os_spa, blksz, db, type); bcopy(db->db.db_data, (*datap)->b_data, blksz); } + /* notify that the dirty record is about to write */ db->db_data_pending = dr; mutex_exit(&db->db_mtx); @@ -2560,6 +2596,10 @@ dbuf_write_done(zio_t *zio, arc_buf_t *b DBUF_VERIFY(db); + /* + * Now that the write is completed, the dirty record it resolves is + * no longer needed, so remove it. + */ drp = &db->db_last_dirty; while ((dr = *drp) != db->db_data_pending) drp = &dr->dr_next; @@ -2582,15 +2622,26 @@ dbuf_write_done(zio_t *zio, arc_buf_t *b } #endif + /* Clean up the dirty record. */ if (db->db_level == 0) { ASSERT(db->db_blkid != DMU_BONUS_BLKID); ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN); if (db->db_state != DB_NOFILL) { - if (dr->dt.dl.dr_data != db->db_buf) + if (dr->dt.dl.dr_data != db->db_buf) { + /* + * What we wrote is already out of date, so + * just free the ARC buffer. + */ VERIFY(arc_buf_remove_ref(dr->dt.dl.dr_data, db) == 1); - else if (!arc_released(db->db_buf)) + } else if (!arc_released(db->db_buf)) { + /* + * Our dbuf has yet to be evicted, so + * register a callback to clean it up once + * its ARC buffer is released. + */ arc_set_callback(db->db_buf, dbuf_do_evict, db); + } } } else { dnode_t *dn; @@ -2598,7 +2649,15 @@ dbuf_write_done(zio_t *zio, arc_buf_t *b DB_DNODE_ENTER(db); dn = DB_DNODE(db); ASSERT(list_head(&dr->dt.di.dr_children) == NULL); + /* + * The size of an indirect block must match what its + * associated dnode thinks it should be. + */ ASSERT3U(db->db.db_size, ==, 1<dn_phys->dn_indblkshift); + /* + * If the dbuf's block pointer is not a hole, evict it when + * its last ARC buffer hold has been released. + */ if (!BP_IS_HOLE(db->db_blkptr)) { int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT; @@ -2660,6 +2719,9 @@ dbuf_write_override_done(zio_t *zio) dbuf_write_done(zio, NULL, db); } +/** + * Commit a dirty buffer to disk. + */ static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx) { @@ -2670,7 +2732,7 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_ uint64_t txg = tx->tx_txg; zbookmark_t zb; zio_prop_t zp; - zio_t *zio; + zio_t *pio; /* parent I/O */ int wp_flag = 0; DB_DNODE_ENTER(db); @@ -2694,23 +2756,28 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_ } if (parent != dn->dn_dbuf) { + /* Our parent is an indirect block. */ + /* We have a dirty parent that has been scheduled for write. */ ASSERT(parent && parent->db_data_pending); + /* Our parent's buffer is one level closer to the dnode. */ ASSERT(db->db_level == parent->db_level-1); + /* Nobody can find the old parent in the ARC. */ ASSERT(arc_released(parent->db_buf)); - zio = parent->db_data_pending->dr_zio; + pio = parent->db_data_pending->dr_zio; } else { + /* Our parent is the dnode itself. */ ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 && db->db_blkid != DMU_SPILL_BLKID) || (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0)); if (db->db_blkid != DMU_SPILL_BLKID) ASSERT3P(db->db_blkptr, ==, &dn->dn_phys->dn_blkptr[db->db_blkid]); - zio = dn->dn_zio; + pio = dn->dn_zio; } ASSERT(db->db_level == 0 || data == db->db_buf); ASSERT3U(db->db_blkptr->blk_birth, <=, txg); - ASSERT(zio); + ASSERT(pio); SET_BOOKMARK(&zb, os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : DMU_META_OBJSET, @@ -2725,7 +2792,7 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_ if (db->db_level == 0 && dr->dt.dl.dr_override_state == DR_OVERRIDDEN) { ASSERT(db->db_state != DB_NOFILL); - dr->dr_zio = zio_write(zio, os->os_spa, txg, + dr->dr_zio = zio_write(pio, os->os_spa, txg, db->db_blkptr, data->b_data, arc_buf_size(data), &zp, dbuf_write_override_ready, dbuf_write_override_done, dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); @@ -2736,14 +2803,14 @@ dbuf_write(dbuf_dirty_record_t *dr, arc_ mutex_exit(&db->db_mtx); } else if (db->db_state == DB_NOFILL) { ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF); - dr->dr_zio = zio_write(zio, os->os_spa, txg, + dr->dr_zio = zio_write(pio, os->os_spa, txg, db->db_blkptr, NULL, db->db.db_size, &zp, dbuf_write_nofill_ready, dbuf_write_nofill_done, db, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb); } else { ASSERT(arc_released(data)); - dr->dr_zio = arc_write(zio, os->os_spa, txg, + dr->dr_zio = arc_write(pio, os->os_spa, txg, db->db_blkptr, data, DBUF_IS_L2CACHEABLE(db), &zp, dbuf_write_ready, dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/ddt.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/ddt.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/ddt.c 2012-11-16 11:07:22.137456595 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/ddt.c 2012-11-15 18:17:56.858457201 -0700 @@ -37,8 +37,10 @@ #include #include -/* +/** * Enable/disable prefetching of dedup-ed blocks which are going to be freed. + * + * \ingroup tunables */ int zfs_dedup_prefetch = 1; diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c 2012-11-16 11:07:22.138456771 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu.c 2012-11-15 18:17:56.865456828 -0700 @@ -232,9 +232,6 @@ dmu_rm_spill(objset_t *os, uint64_t obje return (error); } -/* - * returns ENOENT, EIO, or 0. - */ int dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp) { @@ -276,14 +273,14 @@ dmu_bonus_hold(objset_t *os, uint64_t ob return (0); } -/* - * returns ENOENT, EIO, or 0. - * +/** * This interface will allocate a blank spill dbuf when a spill blk * doesn't already exist on the dnode. * * if you only want to find an already existing spill db, then * dmu_spill_hold_existing() should be used. + * + * \return ENOENT, EIO, or 0. */ int dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp) @@ -352,11 +349,11 @@ dmu_spill_hold_by_bonus(dmu_buf_t *bonus return (err); } -/* - * Note: longer-term, we should modify all of the dmu_buf_*() interfaces - * to take a held dnode rather than -- the lookup is wasteful, - * and can induce severe lock contention when writing to several files - * whose dnodes are in the same block. +/** + * \note Longer-term, we should modify all of the dmu_buf_*() interfaces + * to take a held dnode rather than -- the lookup is + * wasteful, and can induce severe lock contention when writing to + * several files whose dnodes are in the same block. */ static int dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length, @@ -559,7 +556,7 @@ dmu_prefetch(objset_t *os, uint64_t obje dnode_rele(dn, FTAG); } -/* +/** * Get the next "chunk" of file data to free. We traverse the file from * the end so that the file gets shorter over time (if we crashes in the * middle, this will leave us in a better state). We find allocated file @@ -842,7 +839,7 @@ dmu_prealloc(objset_t *os, uint64_t obje dmu_buf_rele_array(dbp, numbufs, FTAG); } -/* +/** * DMU support for xuio */ kstat_t *xuio_ksp = NULL; @@ -886,7 +883,7 @@ dmu_xuio_fini(xuio_t *xuio) XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk); } -/* +/** * Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf } * and increase priv->next by 1. */ @@ -1175,7 +1172,7 @@ dmu_write_pages(objset_t *os, uint64_t o #endif /* sun */ #endif -/* +/** * Allocate a loaned anonymous arc buffer. */ arc_buf_t * @@ -1188,7 +1185,7 @@ dmu_request_arcbuf(dmu_buf_t *handle, in return (arc_loan_buf(spa, size)); } -/* +/** * Free a loaned arc buffer. */ void @@ -1198,7 +1195,7 @@ dmu_return_arcbuf(arc_buf_t *buf) VERIFY(arc_buf_remove_ref(buf, FTAG) == 1); } -/* +/** * When possible directly assign passed loaned arc buffer to a dbuf. * If this is not possible copy the contents of passed arc buf via * dmu_write(). @@ -1350,31 +1347,6 @@ dmu_sync_late_arrival(zio_t *pio, objset return (0); } -/* - * Intent log support: sync the block associated with db to disk. - * N.B. and XXX: the caller is responsible for making sure that the - * data isn't changing while dmu_sync() is writing it. - * - * Return values: - * - * EEXIST: this txg has already been synced, so there's nothing to to. - * The caller should not log the write. - * - * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do. - * The caller should not log the write. - * - * EALREADY: this block is already in the process of being synced. - * The caller should track its progress (somehow). - * - * EIO: could not do the I/O. - * The caller should do a txg_wait_synced(). - * - * 0: the I/O has been initiated. - * The caller should log this blkptr in the done callback. - * It is possible that the I/O will fail, in which case - * the error will be reported to the done callback and - * propagated to pio from zio_done(). - */ int dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd) { @@ -1660,10 +1632,6 @@ dmu_object_info_from_dnode(dnode_t *dn, rw_exit(&dn->dn_struct_rwlock); } -/* - * Get information on a DMU object. - * If doi is NULL, just indicates whether the object exists. - */ int dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi) { @@ -1680,9 +1648,6 @@ dmu_object_info(objset_t *os, uint64_t o return (0); } -/* - * As above, but faster; can be used when you have a held dbuf in hand. - */ void dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi) { @@ -1693,10 +1658,6 @@ dmu_object_info_from_db(dmu_buf_t *db_fa DB_DNODE_EXIT(db); } -/* - * Faster still when you only care about the size. - * This is specifically optimized for zfs_getattr(). - */ void dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize, u_longlong_t *nblk512) diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c 2012-11-16 11:07:22.139449076 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c 2012-11-15 18:17:56.872457480 -0700 @@ -44,7 +44,7 @@ #include #include -/* +/** * Needed to close a window in dnode_move() that allows the objset to be freed * before it can be safely accessed. */ @@ -1283,7 +1283,7 @@ dmu_objset_do_userquota_updates(objset_t } } -/* +/** * Returns a pointer to data to find uid/gid from * * If a dirty record for transaction group that is syncing can't @@ -1628,9 +1628,10 @@ findfunc(spa_t *spa, uint64_t dsobj, con return (fa->func(dsname, fa->arg)); } -/* +/** * Find all objsets under name, and for each, call 'func(child_name, arg)'. - * Perhaps change all callers to use dmu_objset_find_spa()? + * + * \todo Perhaps change all callers to use dmu_objset_find_spa()? */ int dmu_objset_find(const char *name, int func(const char *, void *), void *arg, @@ -1642,7 +1643,7 @@ dmu_objset_find(const char *name, int fu return (dmu_objset_find_spa(NULL, name, findfunc, &fa, flags)); } -/* +/** * Find all objsets under name, call func on each */ int diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c 2012-11-16 11:07:22.140455716 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c 2012-11-15 18:17:56.879451731 -0700 @@ -797,7 +797,7 @@ dmu_recv_verify_features(dsl_dataset_t * (spa_version(dsl_dataset_get_spa(ds)) < SPA_VERSION_SA)); } -/* +/** * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin() * succeeds; otherwise we will leak the holds on the datasets. */ @@ -1258,7 +1258,7 @@ restore_write(struct restorearg *ra, obj return (0); } -/* +/** * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed * streams to refer to a copy of the data that is already on the * system because it came in earlier in the stream. This function @@ -1385,7 +1385,7 @@ restore_free(struct restorearg *ra, objs return (err); } -/* +/** * NB: callers *must* call dmu_recv_end() if this succeeds. */ int diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c 2012-11-16 11:07:22.141456404 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c 2012-11-16 22:20:15.772453014 -0700 @@ -138,13 +138,16 @@ typedef enum resume_skip { RESUME_SKIP_CHILDREN } resume_skip_t; -/* - * Returns RESUME_SKIP_ALL if td indicates that we are resuming a traversal and - * the block indicated by zb does not need to be visited at all. Returns - * RESUME_SKIP_CHILDREN if we are resuming a post traversal and we reach the - * resume point. This indicates that this block should be visited but not its - * children (since they must have been visited in a previous traversal). - * Otherwise returns RESUME_SKIP_NONE. +/** + * \retval RESUME_SKIP_ALL td indicates that we are resuming a traversal + * and the block indicated by zb does not need to + * be visited at all. + * \retval RESUME_SKIP_CHILDREN We are resuming a post traversal and we reach + * the resume point. This indicates that this + * block should be visited but not its children + * (since they must have been visited in a + * previous traversal). + * \retval RESUME_SKIP_NONE No skipping required. */ static resume_skip_t resume_skip_check(traverse_data_t *td, const dnode_phys_t *dnp, @@ -484,7 +487,7 @@ traverse_prefetch_thread(void *arg) mutex_exit(&td_main->td_pfd->pd_mtx); } -/* +/** * NB: dataset must not be changing on-disk (eg, is a snapshot or we are * in syncing context). */ @@ -555,7 +558,7 @@ traverse_impl(spa_t *spa, dsl_dataset_t return (err); } -/* +/** * NB: dataset must not be changing on-disk (eg, is a snapshot or we are * in syncing context). */ @@ -576,7 +579,7 @@ traverse_dataset_destroyed(spa_t *spa, b blkptr, txg_start, resume, flags, func, arg)); } -/* +/** * NB: pool must not be changing on-disk (eg, from zdb or sync context). */ int diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c 2012-11-16 11:07:22.141456404 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_tx.c 2012-11-15 18:17:56.890456220 -0700 @@ -1053,21 +1053,20 @@ dmu_tx_unassign(dmu_tx_t *tx) tx->tx_txg = 0; } -/* - * Assign tx to a transaction group. txg_how can be one of: +/** + * Assign tx to a transaction group. * - * (1) TXG_WAIT. If the current open txg is full, waits until there's - * a new one. This should be used when you're not holding locks. - * If will only fail if we're truly out of space (or over quota). - * - * (2) TXG_NOWAIT. If we can't assign into the current open txg without - * blocking, returns immediately with ERESTART. This should be used - * whenever you're holding locks. On an ERESTART error, the caller - * should drop locks, do a dmu_tx_wait(tx), and try again. - * - * (3) A specific txg. Use this if you need to ensure that multiple - * transactions all sync in the same txg. Like TXG_NOWAIT, it - * returns ERESTART if it can't assign you into the requested txg. + * txg_how can be one of: + * -# TXG_WAIT. If the current open txg is full, waits until there's + * a new one. This should be used when you're not holding locks. + * If will only fail if we're truly out of space (or over quota). + * -# TXG_NOWAIT. If we can't assign into the current open txg without + * blocking, returns immediately with ERESTART. This should be used + * whenever you're holding locks. On an ERESTART error, the caller + * should drop locks, do a dmu_tx_wait(tx), and try again. + * -# A specific txg. Use this if you need to ensure that multiple + * transactions all sync in the same txg. Like TXG_NOWAIT, it + * returns ERESTART if it can't assign you into the requested txg. */ int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how) @@ -1241,7 +1240,7 @@ dmu_tx_callback_register(dmu_tx_t *tx, d list_insert_tail(&tx->tx_callbacks, dcb); } -/* +/** * Call all the commit callbacks on a list, with a given error code. */ void @@ -1256,16 +1255,7 @@ dmu_tx_do_callbacks(list_t *cb_list, int } } -/* - * Interface to hold a bunch of attributes. - * used for creating new files. - * attrsize is the total size of all attributes - * to be added during object creation - * - * For updating/adding a single attribute dmu_tx_hold_sa() should be used. - */ - -/* +/** * hold necessary attribute name for attribute registration. * should be a very rare case where this is needed. If it does * happen it would only happen on the first write to the file system. @@ -1322,6 +1312,15 @@ dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t } } +/** + * Interface to hold a bunch of attributes. Used for creating new + * files. + * + * For updating/adding a single attribute dmu_tx_hold_sa() should be used. + * + * \param[in] attrsize The total size of all attributes to be added + * during object creation. + */ void dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize) { @@ -1350,7 +1349,7 @@ dmu_tx_hold_sa_create(dmu_tx_t *tx, int THT_SPILL, 0, 0); } -/* +/** * Hold SA attribute * * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size) diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_zfetch.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_zfetch.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_zfetch.c 2012-10-17 17:00:59.730589598 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_zfetch.c 2012-11-15 15:59:19.977456426 -0700 @@ -35,17 +35,22 @@ * I'm against tune-ables, but these should probably exist as tweakable globals * until we can get this working the way we want it to. */ +/** + * \addtogroup tunables + * \{ + */ int zfs_prefetch_disable = 0; -/* max # of streams per zfetch */ +/** max # of streams per zfetch */ uint32_t zfetch_max_streams = 8; -/* min time before stream reclaim */ +/** min time before stream reclaim */ uint32_t zfetch_min_sec_reap = 2; -/* max number of blocks to fetch at a time */ +/** max number of blocks to fetch at a time */ uint32_t zfetch_block_cap = 256; -/* number of bytes in a array_read at which we stop prefetching (1Mb) */ +/** number of bytes in a array_read at which we stop prefetching (1Mb) */ uint64_t zfetch_array_rd_sz = 1024 * 1024; +/* \} */ SYSCTL_DECL(_vfs_zfs); SYSCTL_INT(_vfs_zfs, OID_AUTO, prefetch_disable, CTLFLAG_RW, @@ -111,7 +116,7 @@ static zfetch_stats_t zfetch_stats = { kstat_t *zfetch_ksp; -/* +/** * Given a zfetch structure and a zstream structure, determine whether the * blocks to be read are part of a co-linear pair of existing prefetch * streams. If a set is found, coalesce the streams, removing one, and @@ -122,7 +127,7 @@ kstat_t *zfetch_ksp; * last stream, then we are probably in a strided access pattern. So * combine the two sequential streams into a single strided stream. * - * If no co-linear streams are found, return NULL. + * \return NULL if no co-linear streams are found, 1 otherwise */ static int dmu_zfetch_colinear(zfetch_t *zf, zstream_t *zh) @@ -191,7 +196,7 @@ dmu_zfetch_colinear(zfetch_t *zf, zstrea return (0); } -/* +/** * Given a zstream_t, determine the bounds of the prefetch. Then call the * routine that actually prefetches the individual blocks. */ @@ -270,7 +275,9 @@ zfetch_fini(void) } } -/* +/** + * Sets up a zfetch data based on an associated dnode. + * * This takes a pointer to a zfetch structure and a dnode. It performs the * necessary setup for the zfetch structure, grokking data from the * associated dnode. @@ -292,7 +299,7 @@ dmu_zfetch_init(zfetch_t *zf, dnode_t *d rw_init(&zf->zf_rwlock, NULL, RW_DEFAULT, NULL); } -/* +/** * This function computes the actual size, in blocks, that can be prefetched, * and fetches it. */ @@ -311,8 +318,8 @@ dmu_zfetch_fetch(dnode_t *dn, uint64_t b return (fetchsz); } -/* - * this function returns the number of blocks that would be prefetched, based +/** + * This function returns the number of blocks that would be prefetched, based * upon the supplied dnode, blockid, and nblks. This is used so that we can * update streams in place, and then prefetch with their old value after the * fact. This way, we can delay the prefetch, but subsequent accesses to the @@ -339,8 +346,8 @@ dmu_zfetch_fetchsz(dnode_t *dn, uint64_t return (fetchsz); } -/* - * given a zfetch and a zstream structure, see if there is an associated zstream +/** + * Given a zfetch and a zstream structure, see if there is an associated zstream * for this block read. If so, it starts a prefetch for the stream it * located and returns true, otherwise it returns false */ @@ -536,7 +543,7 @@ out: return (rc); } -/* +/** * Clean-up state associated with a zfetch structure. This frees allocated * structure members, empties the zf_stream tree, and generally makes things * nice. This doesn't free the zfetch_t itself, that's left to the caller. @@ -562,7 +569,7 @@ dmu_zfetch_rele(zfetch_t *zf) zf->zf_dnode = NULL; } -/* +/** * Given a zfetch and zstream structure, insert the zstream structure into the * AVL tree contained within the zfetch structure. Peform the appropriate * book-keeping. It is possible that another thread has inserted a stream which @@ -592,7 +599,7 @@ dmu_zfetch_stream_insert(zfetch_t *zf, z } -/* +/** * Walk the list of zstreams in the given zfetch, find an old one (by time), and * reclaim it for use by the caller. */ @@ -623,7 +630,7 @@ dmu_zfetch_stream_reclaim(zfetch_t *zf) return (zs); } -/* +/** * Given a zfetch and zstream structure, remove the zstream structure from its * container in the zfetch structure. Perform the appropriate book-keeping. */ @@ -660,7 +667,7 @@ dmu_zfetch_streams_equal(zstream_t *zs1, return (1); } -/* +/** * This is the prefetch entry point. It calls all of the other dmu_zfetch * routines to create, delete, find, or operate upon prefetch streams. */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c 2012-11-16 11:07:22.143455925 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode.c 2012-11-15 18:17:56.918456393 -0700 @@ -39,7 +39,7 @@ static int free_range_compar(const void *node1, const void *node2); static kmem_cache_t *dnode_cache; -/* +/** * Define DNODE_STATS to turn on statistic gathering. By default, it is only * turned on when DEBUG is also defined. */ @@ -429,7 +429,7 @@ dnode_create(objset_t *os, dnode_phys_t return (dn); } -/* +/** * Caller must be holding the dnode handle, which is released upon return. */ static void @@ -999,11 +999,13 @@ dnode_buf_pageout(dmu_buf_t *db, void *a (epb - 1) * sizeof (dnode_handle_t)); } -/* - * errors: - * EINVAL - invalid object number. - * EIO - i/o error. - * succeeds even for free dnodes. +/** + * Succeeds even for free dnodes. + * + * \retval 0 Success + * \retval EINVAL invalid object number + * \retval EIO I/O error + * \retval other Other errors */ int dnode_hold_impl(objset_t *os, uint64_t object, int flag, @@ -1139,8 +1141,8 @@ dnode_hold_impl(objset_t *os, uint64_t o return (0); } -/* - * Return held dnode if the object is allocated, NULL if not. +/** + * \return held dnode if the object is allocated, NULL if not. */ int dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp) @@ -1148,10 +1150,11 @@ dnode_hold(objset_t *os, uint64_t object return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, tag, dnp)); } -/* +/** * Can only add a reference if there is already at least one - * reference on the dnode. Returns FALSE if unable to add a - * new reference. + * reference on the dnode. + * + * \return FALSE if unable to add a new reference, TRUE otherwise */ boolean_t dnode_add_ref(dnode_t *dn, void *tag) @@ -1304,7 +1307,7 @@ dnode_free(dnode_t *dn, dmu_tx_t *tx) } } -/* +/** * Try to change the block size for the indicated dnode. This can only * succeed if there are no blocks allocated or dirty beyond first block */ @@ -1374,7 +1377,7 @@ fail: return (ENOTSUP); } -/* read-holding callers must not rely on the lock being continuously held */ +/** read-holding callers must not rely on the lock being continuously held */ void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read) { @@ -1710,7 +1713,10 @@ dnode_spill_freed(dnode_t *dn) return (i < TXG_SIZE); } -/* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */ +/** + * \retval TRUE This blkid was freed in a recent txg + * \retval FALSE This blkid was not freed in a recent txg + */ uint64_t dnode_block_freed(dnode_t *dn, uint64_t blkid) { @@ -1754,7 +1760,9 @@ dnode_block_freed(dnode_t *dn, uint64_t return (i < TXG_SIZE); } -/* call from syncing context when we actually write/free space for this dnode */ +/** + * call from syncing context when we actually write/free space for this dnode + */ void dnode_diduse_space(dnode_t *dn, int64_t delta) { @@ -1783,7 +1791,7 @@ dnode_diduse_space(dnode_t *dn, int64_t mutex_exit(&dn->dn_mtx); } -/* +/** * Call when we think we're going to write/free space in open context. * Be conservative (ie. OK to write less than this or free more than * this, but don't write more or free less). @@ -1803,15 +1811,16 @@ dnode_willuse_space(dnode_t *dn, int64_t dmu_tx_willuse_space(tx, space); } -/* - * This function scans a block at the indicated "level" looking for - * a hole or data (depending on 'flags'). If level > 0, then we are - * scanning an indirect block looking at its pointers. If level == 0, - * then we are looking at a block of dnodes. If we don't find what we - * are looking for in the block, we return ESRCH. Otherwise, return - * with *offset pointing to the beginning (if searching forwards) or - * end (if searching backwards) of the range covered by the block - * pointer we matched on (or dnode). +/** + * Scans a block at the indicated "level" looking for + * a hole or data (depending on 'flags'). + * + * If level > 0, then we are scanning an indirect block looking at its + * pointers. If level == 0, then we are looking at a block of dnodes. + * If we don't find what we are looking for in the block, we return + * ESRCH. Otherwise, return with *offset pointing to the beginning + * (if searching forwards) or end (if searching backwards) of the range + * covered by the block pointer we matched on (or dnode). * * The basic search algorithm used below by dnode_next_offset() is to * use this function to search up the block tree (widen the search) until @@ -1927,7 +1936,7 @@ dnode_next_offset_level(dnode_t *dn, int return (error); } -/* +/** * Find the next hole, data, or sparse region at or after *offset. * The value 'blkfill' tells us how many items we expect to find * in an L0 data block; this value is 1 for normal objects, diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode_sync.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode_sync.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode_sync.c 2012-11-16 11:07:22.143455925 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dnode_sync.c 2012-11-15 18:17:56.922456902 -0700 @@ -301,9 +301,9 @@ free_children(dmu_buf_impl_t *db, uint64 return (all ? ALL : blocks_freed); } -/* - * free_range: Traverse the indicated range of the provided file - * and "free" all the blocks contained there. +/** + * Traverses the indicated range of the provided file + * and "free"s all the blocks contained there. */ static void dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx) @@ -369,8 +369,8 @@ dnode_sync_free_range(dnode_t *dn, uint6 } } -/* - * Try to kick all the dnodes dbufs out of the cache... +/** + * Tries to kick all the dnode's dbufs out of the cache... */ void dnode_evict_dbufs(dnode_t *dn) @@ -522,8 +522,8 @@ dnode_sync_free(dnode_t *dn, dmu_tx_t *t */ } -/* - * Write out the dnode's dirty buffers. +/** + * Writes out the dnode's dirty buffers. */ void dnode_sync(dnode_t *dn, dmu_tx_t *tx) diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/doxygen.dox SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/doxygen.dox --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/doxygen.dox 1969-12-31 17:00:00.000000000 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/doxygen.dox 2012-11-15 17:56:36.662456919 -0700 @@ -0,0 +1,549 @@ +/* + * CDDL HEADER START + * + * The contents of this file are subject to the terms of the + * Common Development and Distribution License (the "License"). + * You may not use this file except in compliance with the License. + * + * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE + * or http://www.opensolaris.org/os/licensing. + * See the License for the specific language governing permissions + * and limitations under the License. + * + * When distributing Covered Code, include this CDDL HEADER in each + * file and include the License file at usr/src/OPENSOLARIS.LICENSE. + * If applicable, add the following below this CDDL HEADER, with the + * fields enclosed by brackets "[]" replaced with your own identifying + * information: Portions Copyright [yyyy] [name of copyright owner] + * + * CDDL HEADER END + */ +/* + * Copyright (c) 2011, 2012 Spectra Logic + * All rights reserved. + */ + +/* This file contains extra doxygen tags only; it has no code */ + +/** + * \defgroup tunables + * Knobs whose value can be set at compile time or kernel load time + */ + +/** + * \page "Documentation Format" + * The ZFS code base is documented with Doxygen styled C comments. + * To ensure consistency, please adhere to the following guidelines when + * writing ZFS code: + * - General + * - Doxygen comments in ZFS use the Javadoc style. + * - For objects that support brief comments (files, functions, + * data structure fields), it is assumed that "auto-brief" is + * enabled and that the first sentence of the description will + * be rendered as a brief comment. + * - All comments should add information that cannot be easily + * descerned from the code directly. For example, don't add + * Doxygen comments to function paramters like + * boolean_t enable where the paramter's meaning is + * already well expressed by its type and/or name. + * - Files + * - At the top of every .c and .h file should be a \\file + * tag containing the filename. + * - Below that should be a a one-line "brief" description of the + * file's purpose. + * - Below that should be the detailed description of the file. + * It should be as thorough as possible and visual markup is + * encouraged where appropriate. + * - Additional sections of file scope information may occur + * elsewhere in the file (e.g. the description of an algorithm + * implemented by a group of functions that follows), but must + * be tagged with \\file so that it is properly + * aggregated by Doxygen. + * - Functions + * - API documentation should be located immediately above the + * function declaration in the header exposing this API. + * - The API documentation for static/module local functions + * should be located immediately above the function definition + * (not declaration). + * - Implementation documentation should be located immediately + * above the function's definition (not declaration). + * - Every function's documentation may contain: + * - A "brief" one-line description. + * - A \\param tag for every parameter warranting + * additional documentation. Where it is not already + * clear from C context, the parameter's direction + * should be specified. The direction can be either + * [in] for an input to the function, + * [out] for an output, or [in,out] + * for a bidirectional parameter. + * - A description of the return value. This may be done + * using either a single \\return tag, or + * multiple \\retval tags. \\retval + * tags are prefered where the return value has a few + * discrete possibilities. + * - Any \\invariants, if applicable. + * - A detailed description including information + * pertinent to those modifying the function. + * - Any important \\notes or \\todo>s, + * if applicable. + * - Data structures + * - Structs should be documented at their definitions. The + * documentation block should contain a "brief" one-line + * description, and a detailed description (if necessary). + * - Members' documentation can be either above the member or + * in-lined using a "<" comment just after the member. + * - Tunables + * - ZFS contains multiple tunable knobs that can be set at + * module/kernel load time or during operation. They should + * be documented by placing their definitions in a + * \\addtogroup tunables block, or including an + * \\ingroup tunables directive in their documentation + * block. + * \todo Devise an elegant method for including the tunable's + * path in the documentation. For example, the + * documentation for zfs_no_write_throttle should tell + * you that its path is "vfs.zfs.no_write_throttle". + * + * Here is an example of the above guidelines: +\verbatim +/** + * \file example.c + * Examples on proper doxygen usage. + * + * This file contains examples of how to properly use doxygen markup in the + * ZFS module. Also, it can help you concatenate strings + */ + +/** + * Classroom Roster Element + * + * The classroom roster is a single linked list that stores all students' + * information. This struct is one element in the list +*/ +struct roster_elem { + /** + * A key to the student's last report card in the database. + * + * \note The database is volatile; the key will be invalid following a + * reboot. Do not save the key to persistent storage. + */ + int report_card_key; + char name[80]; /**< The student's name */ + bool is_male; /**< True iff the student is a boy */ + + /** + * pointer to next elem in list. + * A null value signifies end of list + */ + struct roster_elem* next; +}; + +/** + * Concatenate two strings + * + * Copies the C-string pointed to by src to the end of the C-string pointed + * to by dest. + * + * \warning Does not verify that dest has enough space available. This can + * lead to buffer overflows. It is recomended to use strncat or + * strlcat instead. + * + * \param[in,out] dest Pointer to the destination string + * \param[in] src Pointer to the source string + * + * \return A pointer to the resulting string dest + */ +char * +strcat(char *dest, const char *src) +{ + return (0); +} +\endverbatim + */ + +/** + * \page Glossary + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
TermDefinition
ARCAdaptive Replacement Cache
DVAData Virtual Address
DMUData Management Unit
SPAStorage Pool Allocator
überblockThe root block of the tree of blocks on each pool.
ZAPZFS Attribute Processor
ZILZFS Intent Log
ZPLZFS Posix Layer
+ */ + +/** + * \page RaidZ + * + *

Encoding

+ * + * The raidz vdev supports single, double, and triple parity. For single + * parity, we use a simple XOR of all the data columns. For double or triple + * parity, we use a special case of Reed-Solomon coding. This extends the + * technique described in "The mathematics of RAID-6" by H. Peter Anvin by + * drawing on the system described in "A Tutorial on Reed-Solomon Coding for + * Fault-Tolerance in RAID-like Systems" by James S. Plank on which the former + * is also based. The latter is designed to provide higher performance for + * writes. + * + * Note that the Plank paper claimed to support arbitrary N+M, but was then + * amended six years later identifying a critical flaw that invalidates its + * claims. Nevertheless, the technique can be adapted to work for up to + * triple parity. For additional parity, the amendment "Note: Correction to + * the 1997 Tutorial on Reed-Solomon Coding" by James S. Plank and Ying Ding + * is viable, but the additional complexity means that write performance will + * suffer. + * + * All of the methods above operate on a Galois field, defined over the + * integers mod \f$2^N\f$. In our case we choose N=8 for \f$GF(2^8)\f$ so that + * all elements can be expressed with a single byte. Briefly, the operations on + * the field are defined as follows: + * - addition (+) is represented by a bitwise XOR + * - subtraction (-) is therefore identical to addition: \f$A + B = A - B\f$ + * - multiplication of A by 2 (defined as 00000010b, not 1+1) is defined by + * the following bitwise expression: + * \f[ + \begin{array}{rrl} + (A \cdot 2)_7 & = & A_6 \\ + (A \cdot 2)_6 & = & A_5 \\ + (A \cdot 2)_5 & = & A_4 \\ + (A \cdot 2)_4 & = & A_3 + A_7 \\ + (A \cdot 2)_3 & = & A_2 + A_7 \\ + (A \cdot 2)_2 & = & A_1 + A_7 \\ + (A \cdot 2)_1 & = & A_0 \\ + (A \cdot 2)_0 & = & A_7 \\ + \end{array} + \f] + * + * In C, multiplying by 2 is therefore (a << 1) ^ ((a & 0x80) ? 0x1d : + * 0). As an aside, this multiplication is derived from the error + * correcting primitive polynomial \f$x^8 + x^4 + x^3 + x^2 + 1\f$. + * + * Observe that any number in the field (except for 0) can be expressed as a + * power of 2 -- a generator for the field. We store a table of the powers of 2 + * and logs base 2 for quick look ups, and exploit the fact that + * \f$ A \cdot B \f$ can be rewritten as \f$2^{\log_2 A + \log_2 B }\f$ + * (where '+' is normal addition rather than field addition). The inverse of + * a field element \f$A\f$ (\f$A^{-1}\f$) is therefore + * \f$A ^ {255 - 1} = A^{254}\f$. + * + * The up-to-three parity columns, \f$P, Q, R\f$ over several data columns, + * \f$D_0, \ldots D_{n-1}\f$, can be expressed by field operations: + * + * \f[ + \begin{array}{rrl} + P & = & D_0 + D_1 + \ldots + D_{n-2} + D_{n-1} \\ + Q & = & 2^{n-1} D_0 + 2^{n-2} D_1 + \ldots + 2^1 D_{n-2} + + 2^0 D_{n-1} \\ + & = & ((\ldots((D_0) \cdot 2 + D_1) \cdot 2 + \ldots) + \cdot 2 + D_{n-2}) \cdot 2 + D_{n-1} \\ + R & = & 4^{n-1} D_0 + 4^{n-2} D_1 + \ldots + 4^1 D_{n-2} + + 4^0 D_{n-1} \\ + & = & ((\ldots((D_0) \cdot 4 + D_1) \cdot 4 + \ldots) + \cdot 4 + D_{n-2}) \cdot 4 + D_{n-1} + \end{array} + \f] + * + * We chose 1, 2, and 4 as our generators because 1 corresponds to the trival + * XOR operation, and 2 and 4 can be computed quickly and generate linearly- + * independent coefficients. (There are no additional coefficients that have + * this property which is why the uncorrected Plank method breaks down.) + * + * See the reconstruction code below for how P, Q and R can used individually + * or in concert to recover missing data columns. + * + *

Reconstruction

+ * + * In the general case of reconstruction, we must solve the system of linear + * equations defined by the coeffecients used to generate parity as well as + * the contents of the data and parity disks. This can be expressed with + * vectors for the original data (\f$D\f$) and the actual data (\f$d\f$) and + * parity (\f$p\f$) and a matrix composed of the identity matrix (\f$I\f$) and + * a dispersal matrix (\f$V\f$): + * + \f[ + \left[ \begin{array}{c} + V \\ + I + \end{array} \right] + \times + \left[ \begin{array}{c} + D_0 \\ + \vdots \\ + D_{n-1} + \end{array} \right] + = + \left[ \begin{array}{c} + p_0 \\ + p_{m-1} \\ + d_0 \\ + \vdots \\ + d_{n-1} + \end{array}\right] + \f] + * + * \f$I\f$ is simply a square identity matrix of size \f$n\f$, and \f$V\f$ is a + * vandermonde matrix defined by the coeffecients we chose for the various + * parity columns (1, 2, 4). Note that these values were chosen both for + * simplicity, speedy computation as well as linear separability. + * + \f[ + \left[ \begin{array}{ccccc} + 1 & \ldots & 1 & 1 & 1 \\ + 2^{n-1} & \ldots & 4 & 2 & 1 \\ + 4^{n-1} & \ldots & 16 & 4 & 1 \\ + 1 & \ldots & 0 & 0 & 0 \\ + 0 & \ldots & 0 & 0 & 0 \\ + \vdots & & \vdots & \vdots & \vdots \\ + 0 & \ldots & 1 & 0 & 0 \\ + 0 & \ldots & 0 & 1 & 0 \\ + 0 & \ldots & 0 & 0 & 1 + \end{array} \right] + \times + \left[ \begin{array}{c} + D_0 \\ + D_1 \\ + D_2 \\ + \vdots \\ + D_{n-1} + \end{array} \right] + = + \left[ \begin{array}{c} + p_0 \\ + \vdots \\ + p_{m-1} \\ + d_0 \\ + d_1 \\ + d_2 \\ + \vdots \\ + d_{n-1} + \end{array} \right] + \f] + * + * Note that \f$I\f$, \f$V\f$, \f$d\f$, and \f$p\f$ are known. To compute + * \f$D\f$, we must invert the matrix and use the known data and parity values + * to reconstruct the unknown data values. We begin by removing the rows in + * \f$V|I\f$ and \f$d|p\f$ that correspond to failed or missing columns; we + * then make \f$V|I\f$ square (\f$n \times n\f$) and \f$d|p\f$ sized \f$n\f$ by + * removing rows corresponding to unused parity from the bottom up to generate + * \f$(V|I)'\f$ and \f$(d|p)'\f$. We can then generate the inverse of + * \f$(V|I)'\f$ using Gauss-Jordan elimination. In the example below we use + * \f$m=3\f$ parity columns, \f$n=8\f$ data columns, with errors in \f$d_1, + * d_2\f$, and \f$p_1\f$: + * + \f[ (V|I) = + \left[ \begin{array}{cccccccc} + 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 \\ + 128 & 64 & 32 & 16 & 8 & 4 & 2 & 1 \\ + 19 & 205 & 116 & 29 & 64 & 16 & 4 & 1 \\ + 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ + 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 + \end{array}\right] + \begin{array}{l} + \\ + \leftarrow missing \\ + \\ + \\ + \leftarrow missing \\ + \leftarrow missing \\ + \\ + \\ + \\ + \\ + \\ + \end{array} + \f] + + \f[ (V|I)' = + \left[ \begin{array}{cccccccc} + 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 \\ + 19 & 205 & 116 & 29 & 64 & 16 & 4 & 1 \\ + 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 + \end{array}\right] + \f] + * + * Here we employ Gauss-Jordan elimination to find the inverse of \f$(V|I)\f$'. + * We have carefully chosen the seed values 1, 2, and 4 to ensure that this + * matrix is not singular. + \f[ + \left[ \begin{array}{ccccccccccccccccc} + 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & & + 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ + 19 & 205 & 116 & 29 & 64 & 16 & 4 & 1 & & + 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\ + 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & & + 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & & + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & & + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & & + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & & + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & & + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 + \end{array}\right] + \f] + + \f[ + \left[ \begin{array}{ccccccccccccccccc} + 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & & + 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\ + 1 & 1 & 1 & 1 & 1 & 1 & 1 & 1 & & + 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ + 19 & 205 & 116 & 29 & 64 & 16 & 4 & 1 & & + 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & & + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & & + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & & + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & & + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & & + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 + \end{array}\right] + \f] + + \f[ + \left[ \begin{array}{ccccccccccccccccc} + 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & & + 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\ + 0 & 1 & 1 & 0 & 0 & 0 & 0 & 0 & & + 1 & 0 & 1 & 1 & 1 & 1 & 1 & 1 \\ + 0 & 205 & 116 & 0 & 0 & 0 & 0 & 0 & & + 0 & 1 & 19 & 29 & 64 & 16 & 4 & 1 \\ + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & & + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & & + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & & + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & & + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & & + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 + \end{array}\right] + \f] + + \f[ + \left[ \begin{array}{ccccccccccccccccc} + 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & & + 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\ + 0 & 1 & 1 & 0 & 0 & 0 & 0 & 0 & & + 1 & 0 & 1 & 1 & 1 & 1 & 1 & 1 \\ + 0 & 0 & 185 & 0 & 0 & 0 & 0 & 0 & & + 205 & 1 & 222 & 208 & 141 & 221 & 201 & 204 \\ + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & & + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & & + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & & + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & & + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & & + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 + \end{array}\right] + \f] + + \f[ + \left[ \begin{array}{ccccccccccccccccc} + 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & & + 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\ + 0 & 1 & 1 & 0 & 0 & 0 & 0 & 0 & & + 1 & 0 & 1 & 1 & 1 & 1 & 1 & 1 \\ + 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & & + 166 & 100 & 4 & 40 & 158 & 168 & 216 & 209 \\ + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & & + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & & + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & & + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & & + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & & + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 + \end{array}\right] + \f] + + \f[ + \left[ \begin{array}{ccccccccccccccccc} + 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & & + 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\ + 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & & + 167 & 100 & 5 & 41 & 159 & 169 & 217 & 208 \\ + 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & & + 166 & 100 & 4 & 40 & 158 & 168 & 216 & 209 \\ + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & & + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & & + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & & + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & & + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & & + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 + \end{array}\right] + \f] + + \f[ (V|I)'^{-1} = + \left[ \begin{array}{cccccccc}\ + 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\ + 167 & 100 & 5 & 41 & 159 & 169 & 217 & 208 \\ + 166 & 100 & 4 & 40 & 158 & 168 & 216 & 209 \\ + 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\ + 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 + \end{array}\right] + \f] + * We can then simply compute \f$D = (V|I)'^-1 \times (d|p)'\f$ to discover the + * values of the missing data. + * + * As is apparent from the example above, the only non-trivial rows in the + * inverse matrix correspond to the data disks that we're trying to + * reconstruct. Indeed, those are the only rows we need as the others would + * only be useful for reconstructing data known or assumed to be valid. For + * that reason, we only build the coefficients in the rows that correspond to + * targeted columns. + */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c 2012-11-16 11:07:22.146457925 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dataset.c 2012-11-15 22:28:00.999460072 -0700 @@ -69,7 +69,7 @@ static dsl_syncfunc_t dsl_dataset_set_re #define DSL_DATASET_IS_DESTROYED(ds) ((ds)->ds_owner == dsl_reaper) -/* +/** * Figure out how much of this delta should be propogated to the dsl_dir * layer. If there's a refreservation, that space has already been * partially accounted for in our ancestors. @@ -946,7 +946,9 @@ dmu_get_recursive_snaps_nvl(const char * /* FreeBSD ioctl compat end */ #endif /* __FreeBSD__ */ -/* +/** + * Destroy 'snapname' in all descendants of 'fsname'. + * * The snapshots must all be in the same pool. */ int @@ -1029,11 +1031,12 @@ dsl_dataset_might_destroy_origin(dsl_dat return (might_destroy); } -/* +/** * If we're removing a clone, and these three conditions are true: - * 1) the clone's origin has no other children - * 2) the clone's origin has no user references - * 3) the clone's origin has been marked for deferred destruction + * -# the clone's origin has no other children + * -# the clone's origin has no user references + * -# the clone's origin has been marked for deferred destruction + * * Then, prepare to remove the origin as part of this sync task group. */ static int @@ -1068,7 +1071,7 @@ dsl_dataset_origin_rm_prep(struct dsl_ds return (0); } -/* +/** * ds must be opened as OWNER. On return (whether successful or not), * ds will be closed and caller can no longer dereference it. */ @@ -1269,7 +1272,7 @@ dsl_dataset_is_dirty(dsl_dataset_t *ds) return (B_FALSE); } -/* +/** * The unique space in the head dataset can be calculated by subtracting * the space used in the most recent snapshot, that is still being used * in this file system, from the space currently in use. To figure out @@ -1416,7 +1419,7 @@ dsl_dataset_origin_check(struct dsl_ds_d return (0); } -/* +/** * If you add new checks here, you may need to add * additional checks to the "temporary" case in * snapshot_check() in dmu_objset.c. @@ -3004,7 +3007,7 @@ dsl_dataset_promote_sync(void *arg1, voi } static char *snaplist_tag = "snaplist"; -/* +/** * Make a list of dsl_dataset_t's for the snapshots between first_obj * (exclusive) and last_obj (inclusive). The list will be in reverse * order (last_obj will be the list_head()). If first_obj == 0, do all @@ -3090,14 +3093,16 @@ snaplist_destroy(list_t *l, boolean_t ow list_destroy(l); } -/* - * Promote a clone. Nomenclature note: - * "clone" or "cds": the original clone which is being promoted - * "origin" or "ods": the snapshot which is originally clone's origin - * "origin head" or "ohds": the dataset which is the head - * (filesystem/volume) for the origin - * "origin origin": the origin of the origin's filesystem (typically - * NULL, indicating that the clone is not a clone of a clone). +/** + * Promote a clone. + * + * Nomenclature note: + * - "clone" or "cds": the original clone which is being promoted + * - "origin" or "ods": the snapshot which is originally clone's origin + * - "origin head" or "ohds": the dataset which is the head + * (filesystem/volume) for the origin + * - "origin origin": the origin of the origin's filesystem (typically + * NULL, indicating that the clone is not a clone of a clone). */ int dsl_dataset_promote(const char *name, char *conflsnap) @@ -3358,7 +3363,7 @@ dsl_dataset_clone_swap_sync(void *arg1, dsl_scan_ds_clone_swapped(csa->ohds, csa->cds, tx); } -/* +/** * Swap 'clone' with its origin head datasets. Used at the end of "zfs * recv" into an existing fs to swizzle the file system to the new * version, and by "zfs rollback". Can also be used to swap two @@ -3398,7 +3403,9 @@ retry: return (error); } -/* +/** + * Find the name of a dataset + * * Given a pool name and a dataset object number in that pool, * return the name of that dataset. */ @@ -3669,7 +3676,7 @@ dsl_register_onexit_hold_cleanup(dsl_dat dsl_dataset_user_release_onexit, ca, NULL)); } -/* +/** * If you add new checks here, you may need to add * additional checks to the "temporary" case in * snapshot_check() in dmu_objset.c. @@ -4113,9 +4120,9 @@ top: return (error); } -/* +/** * Called at spa_load time (with retry == B_FALSE) to release a stale - * temporary user hold. Also called by the onexit code (with retry == B_TRUE). + * temporary user hold. Also called by the onexit code (with retry == B_TRUE). */ int dsl_dataset_user_release_tmp(dsl_pool_t *dp, uint64_t dsobj, char *htag, @@ -4189,11 +4196,11 @@ dsl_dataset_get_holds(const char *dsname return (0); } -/* - * Note, this function is used as the callback for dmu_objset_find(). We - * always return 0 so that we will continue to find and process - * inconsistent datasets, even if we encounter an error trying to - * process one of them. +/** + * \note This function is used as the callback for dmu_objset_find(). We + * always return 0 so that we will continue to find and process + * inconsistent datasets, even if we encounter an error trying to + * process one of them. */ /* ARGSUSED */ int diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deadlist.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deadlist.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deadlist.c 2012-11-16 11:07:22.147457334 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deadlist.c 2012-11-15 18:17:56.958456109 -0700 @@ -237,7 +237,7 @@ dsl_deadlist_insert(dsl_deadlist_t *dl, dle_enqueue(dl, dle, bp, tx); } -/* +/** * Insert new key in deadlist, which must be > all current entries. * mintxg is not inclusive. */ @@ -262,7 +262,7 @@ dsl_deadlist_add_key(dsl_deadlist_t *dl, mintxg, obj, tx)); } -/* +/** * Remove this key, merging its entries into the previous key. */ void @@ -289,7 +289,7 @@ dsl_deadlist_remove_key(dsl_deadlist_t * VERIFY3U(0, ==, zap_remove_int(dl->dl_os, dl->dl_object, mintxg, tx)); } -/* +/** * Walk ds's snapshots to regenerate generate ZAP & AVL. */ static void @@ -362,7 +362,7 @@ dsl_deadlist_space(dsl_deadlist_t *dl, mutex_exit(&dl->dl_lock); } -/* +/** * return space used in the range (mintxg, maxtxg]. * Includes maxtxg, does not include mintxg. * mintxg and maxtxg must both be keys in the deadlist (unless maxtxg is @@ -447,7 +447,7 @@ dsl_deadlist_insert_cb(void *arg, const return (0); } -/* +/** * Merge the deadlist pointed to by 'obj' into dl. obj will be left as * an empty deadlist. */ @@ -486,7 +486,7 @@ dsl_deadlist_merge(dsl_deadlist_t *dl, u dmu_buf_rele(bonus, FTAG); } -/* +/** * Remove entries on dl that are >= mintxg, and put them on the bpobj. */ void diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deleg.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deleg.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deleg.c 2012-11-16 11:07:22.148457318 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_deleg.c 2012-11-15 18:17:56.963456218 -0700 @@ -23,7 +23,10 @@ * Copyright (c) 2012 by Delphix. All rights reserved. */ -/* +/** + * \file dsl_deleg.c + * DSL permissions + * * DSL permissions are stored in a two level zap attribute * mechanism. The first level identifies the "class" of * entry. The class is identified by the first 2 letters of @@ -82,7 +85,7 @@ #include "zfs_deleg.h" -/* +/** * Validate that user is allowed to delegate specified permissions. * * In order to delegate "create" you must have "create" @@ -116,7 +119,7 @@ dsl_deleg_can_allow(char *ddname, nvlist return (0); } -/* +/** * Validate that user is allowed to unallow specified permissions. They * must have the 'allow' permission, and even then can only unallow * perms for their uid. @@ -271,7 +274,7 @@ dsl_deleg_set(const char *ddname, nvlist return (error); } -/* +/** * Find all 'allow' permissions from a given point and then continue * traversing up to the root. * @@ -359,7 +362,7 @@ dsl_deleg_get(const char *ddname, nvlist return (0); } -/* +/** * Routines for dsl_deleg_access() -- access checking. */ typedef struct perm_set { @@ -381,7 +384,7 @@ perm_set_compare(const void *arg1, const return (val > 0 ? 1 : -1); } -/* +/** * Determine whether a specified permission exists. * * First the base attribute has to be retrieved. i.e. ul$12 @@ -409,7 +412,7 @@ dsl_check_access(objset_t *mos, uint64_t return (error); } -/* +/** * check a specified user/group for a requested permission */ static int @@ -452,7 +455,7 @@ dsl_check_user_access(objset_t *mos, uin return (EPERM); } -/* +/** * Iterate over the sets specified in the specified zapobj * and load them into the permsets avl tree. */ @@ -492,7 +495,7 @@ dsl_load_sets(objset_t *mos, uint64_t za return (0); } -/* +/** * Load all permissions user based on cred belongs to. */ static void @@ -523,7 +526,7 @@ dsl_load_user_sets(objset_t *mos, uint64 } } -/* +/** * Check if user has requested permission. If descendent is set, must have * descendent perms. */ @@ -696,8 +699,8 @@ copy_create_perms(dsl_dir_t *dd, uint64_ zap_cursor_fini(&zc); } -/* - * set all create time permission on new dataset. +/** + * Set all create time permission on new dataset. */ void dsl_deleg_set_create_perms(dsl_dir_t *sdd, dmu_tx_t *tx, cred_t *cr) diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dir.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dir.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dir.c 2012-11-16 11:07:22.149455958 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_dir.c 2012-11-15 22:29:23.342456782 -0700 @@ -205,7 +205,7 @@ dsl_dir_close(dsl_dir_t *dd, void *tag) dmu_buf_rele(dd->dd_dbuf, tag); } -/* buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */ +/** buf must be long enough (MAXNAMELEN + strlen(MOS_DIR_NAME) + 1 should do) */ void dsl_dir_name(dsl_dir_t *dd, char *buf) { @@ -228,7 +228,7 @@ dsl_dir_name(dsl_dir_t *dd, char *buf) } } -/* Calculate name length, avoiding all the strcat calls of dsl_dir_name */ +/** Calculate name length, avoiding all the strcat calls of dsl_dir_name */ int dsl_dir_namelen(dsl_dir_t *dd) { @@ -300,7 +300,7 @@ getcomponent(const char *path, char *com return (0); } -/* +/** * same as dsl_open_dir, ignore the first component of name and use the * spa instead */ @@ -399,7 +399,7 @@ dsl_dir_open_spa(spa_t *spa, const char return (err); } -/* +/** * Return the dsl_dir_t, and possibly the last component which couldn't * be found in *tail. Return NULL if the path is bogus, or if * tail==NULL and we couldn't parse the whole name. (*tail)[0] == '@' @@ -620,7 +620,7 @@ dsl_dir_space_towrite(dsl_dir_t *dd) return (space); } -/* +/** * How much space would dd have available if ancestor had delta applied * to it? If ondiskonly is set, we're only interested in what's * on-disk, not estimated pending changes. @@ -809,7 +809,7 @@ dsl_dir_tempreserve_impl(dsl_dir_t *dd, } } -/* +/** * Reserve space in this dsl_dir, to be used in this tx's txg. * After the space has been dirtied (and dsl_dir_willuse_space() * has been called), the reservation should be canceled, using @@ -870,7 +870,7 @@ dsl_dir_tempreserve_space(dsl_dir_t *dd, return (err); } -/* +/** * Clear a temporary reservation that we previously made with * dsl_dir_tempreserve_space(). */ @@ -927,7 +927,7 @@ dsl_dir_willuse_space_impl(dsl_dir_t *dd dsl_dir_willuse_space_impl(dd->dd_parent, parent_space, tx); } -/* +/** * Call in open context when we think we're going to write/free space, * eg. when dirtying data. Be conservative (ie. OK to write less than * this or free more than this, but don't write more or free less). @@ -939,7 +939,7 @@ dsl_dir_willuse_space(dsl_dir_t *dd, int dsl_dir_willuse_space_impl(dd, space, tx); } -/* call from syncing context when we actually write/free space for this dd */ +/** call from syncing context when we actually write/free space for this dd */ void dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type, int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx) @@ -1221,7 +1221,7 @@ closest_common_ancestor(dsl_dir_t *ds1, return (NULL); } -/* +/** * If delta is applied to dd, how much of that delta would be applied to * ancestor? Syncing context only. */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_pool.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_pool.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_pool.c 2012-11-16 11:07:22.150456711 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_pool.c 2012-11-16 17:50:26.717455525 -0700 @@ -44,14 +44,17 @@ #include #include +/** \addtogroup tunables */ +/* \{ */ int zfs_no_write_throttle = 0; -int zfs_write_limit_shift = 3; /* 1/8th of physical memory */ -int zfs_txg_synctime_ms = 1000; /* target millisecs to sync a txg */ +int zfs_write_limit_shift = 3; /**< 1/8th of physical memory */ +int zfs_txg_synctime_ms = 1000; /**< target millisecs to sync a txg */ -uint64_t zfs_write_limit_min = 32 << 20; /* min write limit is 32MB */ -uint64_t zfs_write_limit_max = 0; /* max data payload per txg */ +uint64_t zfs_write_limit_min = 32 << 20; /**< min write limit is 32MB */ +uint64_t zfs_write_limit_max = 0; /**< max data payload per txg */ uint64_t zfs_write_limit_inflated = 0; uint64_t zfs_write_limit_override = 0; +/* \} */ kmutex_t zfs_write_limit_lock; @@ -339,7 +342,7 @@ dsl_pool_create(spa_t *spa, nvlist_t *zp return (dp); } -/* +/** * Account for the meta-objset space in its placeholder dsl_dir. */ void @@ -568,7 +571,7 @@ dsl_pool_sync_done(dsl_pool_t *dp, uint6 ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg)); } -/* +/** * TRUE if the current thread is the tx_sync_thread or if we * are being called from SPA context during pool initialization. */ @@ -852,7 +855,7 @@ dsl_pool_vnrele_taskq(dsl_pool_t *dp) return (dp->dp_vnrele_taskq); } -/* +/** * Walk through the pool-wide zap object of temporary snapshot user holds * and release them. */ @@ -883,7 +886,7 @@ dsl_pool_clean_tmp_userrefs(dsl_pool_t * zap_cursor_fini(&zc); } -/* +/** * Create the pool-wide zap object for storing temporary snapshot holds. */ void @@ -933,7 +936,7 @@ dsl_pool_user_hold_rele_impl(dsl_pool_t return (error); } -/* +/** * Add a temporary hold for the given dataset object and tag. */ int @@ -943,7 +946,7 @@ dsl_pool_user_hold(dsl_pool_t *dp, uint6 return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE)); } -/* +/** * Release a temporary hold for the given dataset object and tag. */ int diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_prop.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_prop.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_prop.c 2012-10-17 17:00:59.737595305 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_prop.c 2012-11-15 01:07:23.435881137 -0700 @@ -218,12 +218,12 @@ dsl_prop_get_ds(dsl_dataset_t *ds, const intsz, numints, buf, setpoint, snapshot)); } -/* +/** * Register interest in the named property. We'll call the callback * once to notify it of the current property value, and again each time * the property changes, until this callback is unregistered. * - * Return 0 on success, errno if the prop is not an integer value. + * \return 0 on success or errno if the prop is not an integer value. */ int dsl_prop_register(dsl_dataset_t *ds, const char *propname, @@ -283,13 +283,13 @@ dsl_prop_get(const char *dsname, const c return (err); } -/* +/** * Get the current property value. It may have changed by the time this * function returns, so it is NOT safe to follow up with * dsl_prop_register() and assume that the value has not changed in * between. * - * Return 0 on success, ENOENT if ddname is invalid. + * \return 0 on success or ENOENT if ddname is invalid. */ int dsl_prop_get_integer(const char *ddname, const char *propname, @@ -311,15 +311,15 @@ dsl_prop_setarg_init_uint64(dsl_prop_set psa->psa_effective_value = -1ULL; } -/* +/** * Predict the effective value of the given special property if it were set with * the given value and source. This is not a general purpose function. It exists * only to handle the special requirements of the quota and reservation * properties. The fact that these properties are non-inheritable greatly * simplifies the prediction logic. * - * Returns 0 on success, a positive error code on failure, or -1 if called with - * a property not handled by this function. + * \return 0 on success, a positive error code on failure, or -1 if called + * with a property not handled by this function. */ int dsl_prop_predict_sync(dsl_dir_t *dd, dsl_prop_setarg_t *psa) @@ -429,9 +429,11 @@ dsl_prop_check_prediction(dsl_dir_t *dd, } #endif -/* - * Unregister this callback. Return 0 on success, ENOENT if ddname is - * invalid, ENOMSG if no matching callback registered. +/** + * Unregister this callback. + * + * \return 0 on success, ENOENT if ddname is invalid, or ENOMSG if no matching + * callback registered. */ int dsl_prop_unregister(dsl_dataset_t *ds, const char *propname, @@ -463,7 +465,7 @@ dsl_prop_unregister(dsl_dataset_t *ds, c return (0); } -/* +/** * Return the number of callbacks that are registered for this dataset. */ int @@ -996,7 +998,7 @@ dsl_prop_get_all_impl(objset_t *mos, uin return (err); } -/* +/** * Iterate over all properties for this dataset and return them in an nvlist. */ static int @@ -1073,7 +1075,7 @@ dsl_prop_set_hasrecvd_impl(objset_t *os, dsl_prop_set_sync, ds, &psa, 2); } -/* +/** * Call after successfully receiving properties to ensure that only the first * receive on or after SPA_VERSION_RECVD_PROPS blows away local properties. */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c 2012-11-16 11:07:22.151456247 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c 2012-11-16 17:53:16.046456388 -0700 @@ -58,17 +58,22 @@ static scan_cb_t dsl_scan_remove_cb; static dsl_syncfunc_t dsl_scan_cancel_sync; static void dsl_scan_sync_state(dsl_scan_t *, dmu_tx_t *tx); -unsigned int zfs_top_maxinflight = 32; /* maximum I/Os per top-level */ -unsigned int zfs_resilver_delay = 2; /* number of ticks to delay resilver */ -unsigned int zfs_scrub_delay = 4; /* number of ticks to delay scrub */ -unsigned int zfs_scan_idle = 50; /* idle window in clock ticks */ - -unsigned int zfs_scan_min_time_ms = 1000; /* min millisecs to scrub per txg */ -unsigned int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */ -unsigned int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver - per txg */ -boolean_t zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */ -boolean_t zfs_no_scrub_prefetch = B_FALSE; /* set to disable srub prefetching */ +/** + * \addtogroup tunables + * \{ + */ +unsigned int zfs_top_maxinflight = 32; /**< maximum I/Os per top-level */ +unsigned int zfs_resilver_delay = 2; /**< number of ticks to delay resilver*/ +unsigned int zfs_scrub_delay = 4; /**< number of ticks to delay scrub */ +unsigned int zfs_scan_idle = 50; /**< idle window in clock ticks */ + +unsigned int zfs_scan_min_time_ms = 1000; /**< min millisecs to scrub per txg */ +unsigned int zfs_free_min_time_ms = 1000; /**< min millisecs to free per txg */ +/** min millisecs to resilver per txg */ +unsigned int zfs_resilver_min_time_ms = 3000; +boolean_t zfs_no_scrub_io = B_FALSE; /**< set to disable scrub i/o */ +/** set to disable scrub prefetching */ +boolean_t zfs_no_scrub_prefetch = B_FALSE; SYSCTL_DECL(_vfs_zfs); TUNABLE_INT("vfs.zfs.top_maxinflight", &zfs_top_maxinflight); @@ -98,7 +103,7 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_ TUNABLE_INT("vfs.zfs.no_scrub_prefetch", &zfs_no_scrub_prefetch); SYSCTL_INT(_vfs_zfs, OID_AUTO, no_scrub_prefetch, CTLFLAG_RW, &zfs_no_scrub_prefetch, 0, "Disable scrub prefetching"); - +/** \} */ enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE; #define DSL_SCAN_IS_SCRUB_RESILVER(scn) \ @@ -110,8 +115,8 @@ extern int zfs_txg_timeout; /* the order has to match pool_scan_type */ static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = { NULL, - dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */ - dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */ + dsl_scan_scrub_cb, /**< POOL_SCAN_SCRUB */ + dsl_scan_scrub_cb, /**< POOL_SCAN_RESILVER */ }; int @@ -630,8 +635,9 @@ dsl_scan_check_resume(dsl_scan_t *scn, c } /* - * Return nonzero on i/o error. - * Return new buf to write out in *bufp. + * \param bufp return location for new buf to write out + * + * \return nonzero on i/o error. */ static int dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype, @@ -1181,7 +1187,7 @@ enqueue_cb(spa_t *spa, uint64_t dsobj, c return (0); } -/* +/** * Scrub/dedup interaction. * * If there are N references to a deduped block, we don't want to scrub it @@ -1562,7 +1568,7 @@ dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t * dsl_scan_sync_state(scn, tx); } -/* +/** * This will start a new scan, or restart an existing one. */ void diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/lzjb.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/lzjb.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/lzjb.c 2012-10-17 17:00:59.739590088 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/lzjb.c 2012-10-17 16:27:36.802647072 -0600 @@ -23,7 +23,10 @@ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. */ -/* +/** + * \file lzjb.c + * LZJB compression of on-disk data + * * We keep our own copy of this algorithm for 3 main reasons: * 1. If we didn't, anyone modifying common/os/compress.c would * directly break our on disk format diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c 2012-11-16 11:07:22.152456871 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/metaslab.c 2012-11-15 18:17:57.002456976 -0700 @@ -31,7 +31,7 @@ #include #include -/* +/** * Allow allocations to switch to gang blocks quickly. We do this to * avoid having to load lots of space_maps in a given txg. There are, * however, some cases where we want to avoid "fast" ganging and instead @@ -46,10 +46,12 @@ uint64_t metaslab_aliquot = 512ULL << 10; uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */ -/* +/** * This value defines the number of allowed allocation failures per vdev. * If a device reaches this threshold in a given txg then we consider skipping * allocations on that device. + * + * \ingroup tunables */ int zfs_mg_alloc_failures = 0; @@ -59,12 +61,12 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, mg_alloc_ "Number of allowed allocation failures per vdev"); TUNABLE_INT("vfs.zfs.mg_alloc_failures", &zfs_mg_alloc_failures); -/* +/** * Metaslab debugging: when set, keeps all space maps in core to verify frees. */ static int metaslab_debug = 0; -/* +/** * Minimum size which forces the dynamic allocator to change * it's allocation strategy. Once the space map cannot satisfy * an allocation of this size then it switches to using more @@ -72,7 +74,7 @@ static int metaslab_debug = 0; */ uint64_t metaslab_df_alloc_threshold = SPA_MAXBLOCKSIZE; -/* +/** * The minimum free space, in percent, which must be available * in a space map to continue allocations in a first-fit fashion. * Once the space_map's free space drops below this level we dynamically @@ -80,18 +82,18 @@ uint64_t metaslab_df_alloc_threshold = S */ int metaslab_df_free_pct = 4; -/* +/** * A metaslab is considered "free" if it contains a contiguous * segment which is greater than metaslab_min_alloc_size. */ uint64_t metaslab_min_alloc_size = DMU_MAX_ACCESS; -/* +/** * Max number of space_maps to prefetch. */ int metaslab_prefetch_limit = SPA_DVAS_PER_BP; -/* +/** * Percentage bonus multiplier for metaslabs that are in the bonus area. */ int metaslab_smo_bonus_pct = 150; @@ -376,7 +378,7 @@ metaslab_segsize_compare(const void *x1, return (0); } -/* +/** * This is a helper function that can be used by the allocator to find * a suitable block to allocate. This will search the specified AVL * tree looking for a block that matches the specified criteria. @@ -463,7 +465,7 @@ metaslab_pp_free(space_map_t *sm, uint64 /* No need to update cursor */ } -/* +/** * Return the maximum contiguous segment within the metaslab. */ uint64_t @@ -512,7 +514,8 @@ static space_map_ops_t metaslab_ff_ops = /* * ========================================================================== - * Dynamic block allocator - + * Dynamic block allocator + * * Uses the first fit allocation scheme until space get low and then * adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold * and metaslab_df_free_pct to determine when to switch the allocation scheme. @@ -925,7 +928,7 @@ metaslab_passivate(metaslab_t *msp, uint ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0); } -/* +/** * Write a metaslab to disk in the context of the specified transaction group. */ void @@ -1023,7 +1026,7 @@ metaslab_sync(metaslab_t *msp, uint64_t dmu_tx_commit(tx); } -/* +/** * Called after a transaction group has completely synced to mark * all of the metaslab's free space as usable. */ @@ -1281,7 +1284,7 @@ metaslab_group_alloc(metaslab_group_t *m return (offset); } -/* +/** * Allocate a block for the specified i/o. */ static int @@ -1469,7 +1472,7 @@ next: return (ENOSPC); } -/* +/** * Free the block represented by DVA in the context of the specified * transaction group. */ @@ -1515,7 +1518,7 @@ metaslab_free_dva(spa_t *spa, const dva_ mutex_exit(&msp->ms_lock); } -/* +/** * Intent log support: upon opening the pool after a crash, notify the SPA * of blocks that the intent log has allocated for immediate write, but * which are still considered free by the SPA because the last transaction diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/refcount.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/refcount.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/refcount.c 2012-10-17 17:00:59.740590136 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/refcount.c 2012-10-07 20:59:13.842587710 -0600 @@ -54,6 +54,7 @@ refcount_fini(void) kmem_cache_destroy(reference_history_cache); } +/** refcount_t objects must be initialized with refcount_create() */ void refcount_create(refcount_t *rc) { diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/rrwlock.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/rrwlock.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/rrwlock.c 2012-10-17 17:00:59.741590375 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/rrwlock.c 2012-11-15 07:51:33.545517134 -0700 @@ -26,9 +26,9 @@ #include #include -/* - * This file contains the implementation of a re-entrant read - * reader/writer lock (aka "rrwlock"). +/** + * \file rrwlock.c + * A re-entrant read reader/writer lock (aka "rrwlock"). * * This is a normal reader/writer lock with the additional feature * of allowing threads who have already obtained a read lock to @@ -68,7 +68,7 @@ * waiting writers. Hence, we do not starve writers. */ -/* global key for TSD */ +/** global key for TSD */ uint_t rrw_tsd_key; typedef struct rrw_node { @@ -91,7 +91,7 @@ rrn_find(rrwlock_t *rrl) return (NULL); } -/* +/** * Add a node to the head of the singly linked list. */ static void @@ -105,7 +105,7 @@ rrn_add(rrwlock_t *rrl) VERIFY(tsd_set(rrw_tsd_key, rn) == 0); } -/* +/** * If a node is found for 'rrl', then remove the node from this * thread's list and return TRUE; otherwise return FALSE. */ @@ -201,6 +201,11 @@ rrw_enter_write(rrwlock_t *rrl) mutex_exit(&rrl->rr_lock); } +/** + * \param[in] tag Used in reference count tracking. The value + * used in rrw_enter() must also be used any + * corresponding rrw_exit()s. + */ void rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag) { @@ -210,6 +215,11 @@ rrw_enter(rrwlock_t *rrl, krw_t rw, void rrw_enter_write(rrl); } +/** + * \param[in] tag Used in reference count tracking. The value + * used in rrw_exit() must match that used by + * its corresponding rrw_enter(). + */ void rrw_exit(rrwlock_t *rrl, void *tag) { diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sa.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sa.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sa.c 2012-11-16 11:07:22.154458696 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sa.c 2012-11-15 18:17:57.010456650 -0700 @@ -43,8 +43,9 @@ #include #include -/* - * ZFS System attributes: +/** + * \file sa.c + * ZFS System attributes * * A generic mechanism to allow for arbitrary attributes * to be stored in a dnode. The data will be stored in the bonus buffer of @@ -55,7 +56,7 @@ * attributes that would be in the way of the blkptr_t will be relocated * into the spill block. * - * Attribute registration: + *

Attribute registration

* * Stored persistently on a per dataset basis * a mapping between attribute "string" names and their actual attribute @@ -64,7 +65,7 @@ * id value. If an attribute can have a variable size then the value * 0 will be used to indicate this. * - * Attribute Layout: + *

Attribute Layout

* * Attribute layouts are a way to compactly store multiple attributes, but * without taking the overhead associated with managing each attribute @@ -110,7 +111,8 @@ * data and special "data locator" function if the data isn't in a contiguous * location. * - * Byteswap implications: + *

Byteswap implications

+ * * Since the SA attributes are not entirely self describing we can't do * the normal byteswap processing. The special ZAP layout attribute and * attribute registration attributes define the byteswap function and the @@ -161,7 +163,9 @@ arc_byteswap_func_t *sa_bswap_table[] = sa_copy_data(f, s, t, l); \ } -/* +/** + * List of legacy attributes + * * This table is fixed and cannot be changed. Its purpose is to * allow the SA code to work with both old/new ZPL file systems. * It contains the list of legacy attributes. These attributes aren't @@ -188,18 +192,18 @@ sa_attr_reg_t sa_legacy_attrs[] = { {"ZPL_ZNODE_ACL", 88, SA_UINT8_ARRAY, 15}, }; -/* +/** * ZPL legacy layout + * * This is only used for objects of type DMU_OT_ZNODE */ sa_attr_type_t sa_legacy_zpl_layout[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; -/* +/** * Special dummy layout used for buffers with no attributes. */ - sa_attr_type_t sa_dummy_zpl_layout[] = { 0 }; static int sa_legacy_attr_count = 16; @@ -318,11 +322,12 @@ sa_get_spill(sa_handle_t *hdl) return (rc); } -/* +/** * Main attribute lookup/update function - * returns 0 for success or non zero for failures * * Operates on bulk array, first failure will abort further processing + * + * \return 0 for success or non zero for failures */ int sa_attr_op(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count, @@ -537,7 +542,7 @@ sa_copy_data(sa_data_locator_t *func, vo } } -/* +/** * Determine several different sizes * first the sa header size * the number of bytes to be stored @@ -625,8 +630,9 @@ next: #define BUF_SPACE_NEEDED(total, header) (total + header) -/* +/** * Find layout that corresponds to ordering of attributes + * * If not found a new layout number is created and added to * persistent layout tables. */ @@ -1573,7 +1579,7 @@ sa_attr_register_sync(sa_handle_t *hdl, mutex_exit(&sa->sa_lock); } -/* +/** * Replace all attributes with attributes specified in template. * If dnode had a spill buffer then those attributes will be * also be replaced, possibly with just an empty spill block @@ -1606,7 +1612,7 @@ sa_replace_all_by_template(sa_handle_t * return (error); } -/* +/** * Add/remove a single attribute or replace a variable-sized attribute value * with a value of a different size, and then rewrite the entire set * of attributes. @@ -1769,7 +1775,7 @@ sa_bulk_update_impl(sa_handle_t *hdl, sa return (error); } -/* +/** * update or add new attribute */ int @@ -1808,10 +1814,9 @@ sa_update_from_cb(sa_handle_t *hdl, sa_a return (error); } -/* +/** * Return size of an attribute */ - int sa_size(sa_handle_t *hdl, sa_attr_type_t attr, int *size) { diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c 2012-11-16 17:55:16.251457580 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa.c 2012-11-16 17:56:36.248456186 -0700 @@ -24,7 +24,10 @@ * Copyright (c) 2012 by Delphix. All rights reserved. */ -/* +/** + * \file spa.c + * Storage Pool Allocator + * * This file contains all the routines used when modifying on-disk SPA state. * This includes opening, importing, destroying, exporting a pool, and syncing a * pool. @@ -73,14 +76,20 @@ #include "zfs_prop.h" #include "zfs_comutil.h" -/* Check hostid on import? */ -static int check_hostid = 1; +/** + * Check hostid on import? + * + * \ingroup tunables + */ +int check_hostid = 1; -/* +/** * The interval at which failed configuration cache file writes * should be retried. + * + * \ingroup tunables */ -static int zfs_ccw_retry_interval = 300; +int zfs_ccw_retry_interval = 300; SYSCTL_DECL(_vfs_zfs); TUNABLE_INT("vfs.zfs.check_hostid", &check_hostid); @@ -92,10 +101,10 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, ccw_retry "Configuration cache file write, retry after failure, interval (seconds)"); typedef enum zti_modes { - zti_mode_fixed, /* value is # of threads (min 1) */ - zti_mode_online_percent, /* value is % of online CPUs */ - zti_mode_batch, /* cpu-intensive; value is ignored */ - zti_mode_null, /* don't create a taskq */ + zti_mode_fixed, /**< value is # of threads (min 1) */ + zti_mode_online_percent, /**< value is % of online CPUs */ + zti_mode_batch, /**< cpu-intensive; value is ignored */ + zti_mode_null, /**< don't create a taskq */ zti_nmodes } zti_modes_t; @@ -115,7 +124,7 @@ static const char *const zio_taskq_types "issue", "issue_high", "intr", "intr_high" }; -/* +/** * Define the taskq threads for the following I/O types: * NULL, READ, WRITE, FREE, CLAIM, and IOCTL */ @@ -139,18 +148,18 @@ static int spa_load_impl(spa_t *spa, uin char **ereport); static void spa_vdev_resilver_done(spa_t *spa); -uint_t zio_taskq_batch_pct = 100; /* 1 thread per cpu in pset */ +uint_t zio_taskq_batch_pct = 100; /**< 1 thread per cpu in pset */ #ifdef PSRSET_BIND id_t zio_taskq_psrset_bind = PS_NONE; #endif #ifdef SYSDC -boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */ +boolean_t zio_taskq_sysdc = B_TRUE; /**< use SDC scheduling class */ #endif -uint_t zio_taskq_basedc = 80; /* base duty cycle */ +uint_t zio_taskq_basedc = 80; /**< base duty cycle */ -boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */ +boolean_t spa_create_process = B_TRUE; /**< no process ==> no sysdc */ -/* +/** * This (illegal) pool name is used when temporarily importing a spa_t in order * to get the vdev stats associated with the imported devices. */ @@ -162,7 +171,7 @@ boolean_t spa_create_process = B_TRUE; / * ========================================================================== */ -/* +/** * Add a (source=src, propname=propval) list to an nvlist. */ static void @@ -184,7 +193,7 @@ spa_prop_add_list(nvlist_t *nvl, zpool_p nvlist_free(propval); } -/* +/** * Get property values from the spa configuration. */ static void @@ -276,7 +285,7 @@ spa_prop_get_config(spa_t *spa, nvlist_t } } -/* +/** * Get zpool property values. */ int @@ -383,7 +392,7 @@ out: return (0); } -/* +/** * Validate the given pool properties nvlist and modify the list * for the property values to be set. */ @@ -682,7 +691,7 @@ spa_prop_set(spa_t *spa, nvlist_t *nvp) return (0); } -/* +/** * If the bootfs property value is dsobj, clear it. */ void @@ -746,7 +755,7 @@ spa_change_guid_sync(void *arg1, void *a #endif } -/* +/** * Change the GUID for the pool. This is done so that we can later * re-import a pool built from a clone of our own vdevs. We will modify * the root vdev's guid, our own pool guid, and then mark all of our @@ -801,7 +810,7 @@ spa_error_entry_compare(const void *a, c return (0); } -/* +/** * Utility function which retrieves copies of the current logs and * re-initializes them in the process. */ @@ -962,7 +971,7 @@ spa_thread(void *arg) #endif /* SPA_PROCESS */ #endif -/* +/** * Activate an uninitialized pool. */ static void @@ -1033,7 +1042,7 @@ spa_activate(spa_t *spa, int mode) offsetof(spa_error_entry_t, se_avl)); } -/* +/** * Opposite of spa_activate(). */ static void @@ -1109,7 +1118,7 @@ spa_deactivate(spa_t *spa) #endif /* SPA_PROCESS */ } -/* +/** * Verify a pool configuration, and construct the vdev tree appropriately. This * will create all the necessary vdevs in the appropriate layout, with each vdev * in the CLOSED state. This will prep the pool before open/creation/import. @@ -1156,7 +1165,7 @@ spa_config_parse(spa_t *spa, vdev_t **vd return (0); } -/* +/** * Opposite of spa_load(). */ static void @@ -1257,7 +1266,7 @@ spa_unload(spa_t *spa) spa_config_exit(spa, SCL_ALL, FTAG); } -/* +/** * Load (or re-load) the current list of vdevs describing the active spares for * this pool. When this is called, we have some form of basic information in * 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and @@ -1372,9 +1381,9 @@ spa_load_spares(spa_t *spa) kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *)); } -/* +/** * Load (or re-load) the current list of vdevs describing the active l2cache for - * this pool. When this is called, we have some form of basic information in + * this pool. When this is called, we have some form of basic information in * 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and * then re-generate a more complete list including status information. * Devices which are already active have their details maintained, and are @@ -1525,7 +1534,7 @@ load_nvlist(spa_t *spa, uint64_t obj, nv return (error); } -/* +/** * Checks to see if the given vdev could not be opened, in which case we post a * sysevent to notify the autoreplace code that the device has been removed. */ @@ -1541,7 +1550,7 @@ spa_check_removed(vdev_t *vd) } } -/* +/** * Validate the current config against the MOS config */ static boolean_t @@ -1663,7 +1672,7 @@ spa_config_valid(spa_t *spa, nvlist_t *c return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum); } -/* +/** * Check for missing log devices */ static int @@ -1750,6 +1759,9 @@ spa_aux_check_removed(spa_aux_vdev_t *sa spa_check_removed(sav->sav_vdevs[i]); } +/** + * Log claim callback + */ void spa_claim_notify(zio_t *zio) { @@ -1858,7 +1870,7 @@ spa_load_verify(spa_t *spa) return (verify_ok ? 0 : EIO); } -/* +/** * Find a value in the pool props object. */ static void @@ -1868,7 +1880,7 @@ spa_prop_find(spa_t *spa, zpool_prop_t p zpool_prop_to_name(prop), sizeof (uint64_t), 1, val); } -/* +/** * Find a value in the pool directory object. */ static int @@ -1885,7 +1897,7 @@ spa_vdev_err(vdev_t *vdev, vdev_aux_t au return (err); } -/* +/** * Fix up config after a partly-completed split. This is done with the * ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off * pool have that entry in their config, but only the splitting one contains @@ -2032,7 +2044,7 @@ spa_load(spa_t *spa, spa_load_state_t st return (error); } -/* +/** * Load an existing storage pool, using the pool's builtin spa_config as a * source of configuration information. */ @@ -2676,7 +2688,7 @@ spa_load_retry(spa_t *spa, spa_load_stat return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig)); } -/* +/** * If spa_load() fails this function will try loading prior txg's. If * 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool * will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this @@ -2767,7 +2779,7 @@ spa_load_best(spa_t *spa, spa_load_state } } -/* +/** * Pool Open/Import * * The import case is identical to an open except that the configuration is sent @@ -2911,7 +2923,9 @@ spa_open(const char *name, spa_t **spapp return (spa_open_common(name, spapp, tag, NULL, NULL)); } -/* +/** + * Increment a SPA's inject count + * * Lookup the given spa_t, incrementing the inject count in the process, * preventing it from being exported or destroyed. */ @@ -2939,7 +2953,7 @@ spa_inject_delref(spa_t *spa) mutex_exit(&spa_namespace_lock); } -/* +/** * Add spares device information to the nvlist. */ static void @@ -2988,7 +3002,7 @@ spa_add_spares(spa_t *spa, nvlist_t *con } } -/* +/** * Add l2cache device information to the nvlist, including vdev stats. */ static void @@ -3152,7 +3166,7 @@ spa_get_stats(const char *name, nvlist_t return (error); } -/* +/** * Validate that the auxiliary device array is well formed. We must have an * array of nvlists, each which describes a valid leaf vdev. If this is an * import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be @@ -3303,7 +3317,7 @@ spa_set_aux_vdevs(spa_aux_vdev_t *sav, n } } -/* +/** * Stop and drop level 2 ARC devices */ void @@ -3325,7 +3339,7 @@ spa_l2cache_drop(spa_t *spa) } } -/* +/** * Pool Creation */ int @@ -3607,9 +3621,9 @@ spa_generate_rootconf(char *devpath, cha return (config); } -/* +/** * Walk the vdev tree and see if we can find a device with "better" - * configuration. A configuration is "better" if the label on that + * configuration. A configuration is "better" if the label on that * device has a more recent txg. */ static void @@ -3640,7 +3654,7 @@ spa_alt_rootvdev(vdev_t *vd, vdev_t **av } } -/* +/** * Import a root pool. * * For x86. devpath_list will consist of devid and/or physpath name of @@ -3873,7 +3887,7 @@ out: #endif /* sun */ #endif -/* +/** * Import a non-root pool into the system. */ int @@ -4155,7 +4169,7 @@ spa_tryimport(nvlist_t *tryconfig) return (config); } -/* +/** * Pool export/destroy * * The act of destroying or exporting a pool is very simple. We make sure there @@ -4264,7 +4278,7 @@ spa_export_common(char *pool, int new_st return (0); } -/* +/** * Destroy a storage pool. */ int @@ -4274,7 +4288,7 @@ spa_destroy(char *pool) B_FALSE, B_FALSE)); } -/* +/** * Export a storage pool. */ int @@ -4285,7 +4299,9 @@ spa_export(char *pool, nvlist_t **oldcon force, hardforce)); } -/* +/** + * Reset a storage pool. + * * Similar to spa_export(), this unloads the spa_t without actually removing it * from the namespace in any way. */ @@ -4302,7 +4318,7 @@ spa_reset(char *pool) * ========================================================================== */ -/* +/** * Add a device to a storage pool. */ int @@ -4404,7 +4420,7 @@ spa_vdev_add(spa_t *spa, nvlist_t *nvroo return (0); } -/* +/** * Attach a device to a mirror. The arguments are the path to any device * in the mirror, and the nvroot for the new device. If the path specifies * a device that is not mirrored, we automatically insert the mirror vdev. @@ -4611,8 +4627,9 @@ spa_vdev_attach(spa_t *spa, uint64_t gui return (0); } -/* +/** * Detach a device from a mirror or replacing vdev. + * * If 'replace_done' is specified, only detach if the parent * is a replacing vdev. */ @@ -4851,7 +4868,7 @@ spa_vdev_detach(spa_t *spa, uint64_t gui return (error); } -/* +/** * Split a set of devices from their mirrors, and create a new pool from them. */ int @@ -5181,7 +5198,7 @@ spa_vdev_remove_aux(nvlist_t *config, ch kmem_free(newdev, (count - 1) * sizeof (void *)); } -/* +/** * Evacuate the device. */ static int @@ -5224,7 +5241,7 @@ spa_vdev_remove_evacuate(spa_t *spa, vde return (0); } -/* +/** * Complete the removal by cleaning up the namespace. */ static void @@ -5267,19 +5284,17 @@ spa_vdev_remove_from_namespace(spa_t *sp vdev_reopen(rvd); } -/* - * Remove a device from the pool - +/** + * Remove a device from the pool. * * Removing a device from the vdev namespace requires several steps * and can take a significant amount of time. As a result we use * the spa_vdev_config_[enter/exit] functions which allow us to * grab and release the spa_config_lock while still holding the namespace * lock. During each step the configuration is synced out. - */ - -/* - * Remove a device from the pool. Currently, this supports removing only hot - * spares, slogs, and level 2 ARC devices. + * + * Currently, this supports removing only hot spares, slogs, and level 2 ARC + * devices. */ int spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) @@ -5387,7 +5402,7 @@ spa_vdev_remove(spa_t *spa, uint64_t gui return (error); } -/* +/** * Find any device that's done replacing, or a vdev marked 'unspare' that's * current spared, so we can detach it. */ @@ -5502,7 +5517,7 @@ spa_vdev_resilver_done(spa_t *spa) spa_config_exit(spa, SCL_ALL, FTAG); } -/* +/** * Update the stored path or FRU for this vdev. */ int @@ -5949,7 +5964,7 @@ spa_sync_version(void *arg1, void *arg2, vdev_config_dirty(spa->spa_root_vdev); } -/* +/** * Set zpool properties. */ static void @@ -6094,7 +6109,7 @@ spa_sync_props(void *arg1, void *arg2, d mutex_exit(&spa->spa_props_lock); } -/* +/** * Perform one-time upgrade on-disk changes. spa_version() does not * reflect the new version this txg, so there must be no changes this * txg to anything that the upgrade code depends on after it executes. @@ -6135,9 +6150,11 @@ spa_sync_upgrades(spa_t *spa, dmu_tx_t * } } -/* +/** * Sync the specified transaction group. New blocks may be dirtied as * part of the process, so we iterate until it converges. + * + * Only for DMU use */ void spa_sync(spa_t *spa, uint64_t txg) @@ -6363,7 +6380,7 @@ spa_sync(spa_t *spa, uint64_t txg) spa_async_dispatch(spa); } -/* +/** * Sync all pools. We don't want to hold the namespace lock across these * operations, so we take a reference on the spa_t and drop the lock during the * sync. @@ -6392,7 +6409,7 @@ spa_sync_allpools(void) * ========================================================================== */ -/* +/** * Remove all pools in the system. */ void @@ -6495,9 +6512,11 @@ spa_has_spare(spa_t *spa, uint64_t guid) return (B_FALSE); } -/* +/** * Check if a pool has an active shared spare device. - * Note: reference count of an active spare is 2, as a spare and as a replace + * + * \note The reference count of an active spare is 2, as a spare and as + * a replace. */ static boolean_t spa_has_active_shared_spare(spa_t *spa) @@ -6516,12 +6535,17 @@ spa_has_active_shared_spare(spa_t *spa) return (B_FALSE); } -/* - * Post a sysevent corresponding to the given event. The 'name' must be one of - * the event definitions in sys/sysevent/eventdefs.h. The payload will be - * filled in from the spa and (optionally) the vdev. This doesn't do anything - * in the userland libzpool, as we don't want consumers to misinterpret ztest - * or zdb as real changes. +/** + * Post a sysevent corresponding to the given event. + * + * \param[in] spa Provides information for the event payload + * \param[in] vd Optional. If not null, provides informationfor the + * event payload + * \param[in] name must be one of the event definitions in + * sys/sysevent/eventdefs.h + * + * This doesn't do anything in the userland libzpool, as we don't want + * consumers to misinterpret ztest or zdb as real changes. */ void spa_event_notify(spa_t *spa, vdev_t *vd, const char *name) diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_config.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_config.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_config.c 2012-11-16 11:07:22.169503963 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_config.c 2012-11-15 18:17:57.044456147 -0700 @@ -42,7 +42,8 @@ #include #endif -/* +/** + * \file spa_config.c * Pool configuration repository. * * Pool configuration is stored as a packed nvlist on the filesystem. By @@ -61,13 +62,13 @@ static uint64_t spa_config_generation = 1; -/* +/** * This can be overridden in userland to preserve an alternate namespace for * userland pools when doing testing. */ const char *spa_config_path = ZPOOL_CACHE; -/* +/** * Called when the module is first loaded, this routine loads the configuration * file into the SPA namespace. It does not actually open or load the pools; it * only populates the namespace. @@ -193,7 +194,7 @@ spa_config_write(spa_config_dirent_t *dp return (err); } -/* +/** * Synchronize pool configuration to disk. This must be called with the * namespace lock held. */ @@ -294,7 +295,7 @@ spa_config_sync(spa_t *target, boolean_t spa_event_notify(target, NULL, ESC_ZFS_CONFIG_SYNC); } -/* +/** * Sigh. Inside a local zone, we don't have access to /etc/zfs/zpool.cache, * and we don't want to allow the local zone to see all the pools anyway. * So we have to invent the ZFS_IOC_CONFIG ioctl to grab the configuration @@ -337,8 +338,9 @@ spa_config_set(spa_t *spa, nvlist_t *con mutex_exit(&spa->spa_props_lock); } -/* +/** * Generate the pool's configuration based on the current in-core state. + * * We infer whether to generate a complete config or just one top-level config * based on whether vd is the root vdev. */ @@ -479,7 +481,7 @@ spa_config_generate(spa_t *spa, vdev_t * return (config); } -/* +/** * Update all disk labels, generate a fresh config based on the current * in-core state, and sync the global config cache (do not sync the config * cache if this is a booting rootpool). diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_errlog.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_errlog.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_errlog.c 2012-10-17 17:00:59.753592416 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_errlog.c 2012-11-15 16:10:02.971455655 -0700 @@ -22,7 +22,8 @@ * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. */ -/* +/** + * \file spa_errlog.c * Routines to manage the on-disk persistent error log. * * Each pool stores a log of all logical data errors seen during normal @@ -54,7 +55,7 @@ #include -/* +/** * Convert a bookmark to a string. */ static void @@ -65,7 +66,7 @@ bookmark_to_name(zbookmark_t *zb, char * (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid); } -/* +/** * Convert a string to a bookmark */ #ifdef _KERNEL @@ -83,7 +84,7 @@ name_to_bookmark(char *buf, zbookmark_t } #endif -/* +/** * Log an uncorrectable error to the persistent error log. We add it to the * spa's list of pending errors. The changes are actually synced out to disk * during spa_errlog_sync(). @@ -128,7 +129,7 @@ spa_log_error(spa_t *spa, zio_t *zio) mutex_exit(&spa->spa_errlist_lock); } -/* +/** * Return the number of errors currently in the error log. This is actually the * sum of both the last log and the current log, since we don't know the union * of these logs until we reach userland. @@ -215,7 +216,7 @@ process_error_list(avl_tree_t *list, voi } #endif -/* +/** * Copy all known errors to userland as an array of bookmarks. This is * actually a union of the on-disk last log and current log, as well as any * pending error requests. @@ -255,7 +256,7 @@ spa_get_errlog(spa_t *spa, void *uaddr, return (ret); } -/* +/** * Called when a scrub completes. This simply set a bit which tells which AVL * tree to add new errors. spa_errlog_sync() is responsible for actually * syncing the changes to the underlying objects. @@ -268,7 +269,7 @@ spa_errlog_rotate(spa_t *spa) mutex_exit(&spa->spa_errlist_lock); } -/* +/** * Discard any pending errors from the spa_t. Called when unloading a faulted * pool, as the errors encountered during the open cannot be synced to disk. */ @@ -292,7 +293,7 @@ spa_errlog_drain(spa_t *spa) mutex_exit(&spa->spa_errlist_lock); } -/* +/** * Process a list of errors into the current on-disk log. */ static void @@ -326,7 +327,7 @@ sync_error_list(spa_t *spa, avl_tree_t * } } -/* +/** * Sync the error log out to disk. This is a little tricky because the act of * writing the error log requires the spa_errlist_lock. So, we need to lock the * error lists, take a copy of the lists, and then reinitialize them. Then, we diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_history.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_history.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_history.c 2012-11-16 11:07:22.170456460 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_history.c 2012-11-15 18:17:57.065456245 -0700 @@ -38,7 +38,8 @@ #include #endif -/* +/** + * \file spa_history.c * Routines to manage the on-disk history log. * * The history log is stored as a dmu object containing @@ -67,7 +68,9 @@ * and permanently lost. */ -/* convert a logical offset to physical */ +/** + * convert a logical offset to physical + */ static uint64_t spa_history_log_to_phys(uint64_t log_off, spa_history_phys_t *shpp) { @@ -112,7 +115,7 @@ spa_history_create_obj(spa_t *spa, dmu_t dmu_buf_rele(dbp, FTAG); } -/* +/** * Change 'sh_bof' to the beginning of the next record. */ static int @@ -186,7 +189,7 @@ spa_history_zone() return ("global"); } -/* +/** * Write out a history event. */ /*ARGSUSED*/ @@ -294,7 +297,7 @@ spa_history_log_sync(void *arg1, void *a kmem_free(hap, sizeof (history_arg_t)); } -/* +/** * Write out a history event. */ int @@ -331,7 +334,7 @@ spa_history_log(spa_t *spa, const char * return (err); } -/* +/** * Read out the command history. */ int diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c 2012-11-16 11:07:22.171456060 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/spa_misc.c 2012-11-15 22:34:56.673455558 -0700 @@ -50,45 +50,40 @@ #include "zfs_prop.h" #include "zfeature_common.h" -/* +/** + * \file spa_misc.c * SPA locking * * There are four basic locks for managing spa_t structures: * - * spa_namespace_lock (global mutex) - * - * This lock must be acquired to do any of the following: - * - * - Lookup a spa_t by name - * - Add or remove a spa_t from the namespace - * - Increase spa_refcount from non-zero - * - Check if spa_refcount is zero - * - Rename a spa_t - * - add/remove/attach/detach devices - * - Held for the duration of create/destroy/import/export - * - * It does not need to handle recursion. A create or destroy may - * reference objects (files or zvols) in other pools, but by - * definition they must have an existing reference, and will never need - * to lookup a spa_t by name. - * - * spa_refcount (per-spa refcount_t protected by mutex) - * - * This reference count keep track of any active users of the spa_t. The - * spa_t cannot be destroyed or freed while this is non-zero. Internally, - * the refcount is never really 'zero' - opening a pool implicitly keeps - * some references in the DMU. Internally we check against spa_minref, but - * present the image of a zero/non-zero value to consumers. - * - * spa_config_lock[] (per-spa array of rwlocks) + * - spa_namespace_lock (global mutex)
+ * This lock does not need to handle recursion. A create or destroy may + * reference objects (files or zvols) in other pools, but by + * definition they must have an existing reference, and will never need + * to lookup a spa_t by name. + * This lock must be acquired to do any of the following: + * - Lookup a spa_t by name + * - Add or remove a spa_t from the namespace + * - Increase spa_refcount from non-zero + * - Check if spa_refcount is zero + * - Rename a spa_t + * - add/remove/attach/detach devices + * - Held for the duration of create/destroy/import/export + * - spa_refcount (per-spa refcount_t protected by mutex)
+ * This reference count keep track of any active users of the spa_t. The + * spa_t cannot be destroyed or freed while this is non-zero. Internally, + * the refcount is never really 'zero' - opening a pool implicitly keeps + * some references in the DMU. Internally we check against spa_minref, but + * present the image of a zero/non-zero value to consumers. + * - spa_config_lock[] (per-spa array of rwlocks)
+ * This protects the spa_t from config changes, and must be held in + * the following circumstances: + * - RW_READER to perform I/O to the spa + * - RW_WRITER to change the vdev config + * - spa_spare_lock (local mutex)
+ * Protects the AVL tree that stores spare vdevs * - * This protects the spa_t from config changes, and must be held in - * the following circumstances: - * - * - RW_READER to perform I/O to the spa - * - RW_WRITER to change the vdev config - * - * The locking order is fairly straightforward: + * The locking order is fairly straightforward: * * spa_namespace_lock -> spa_refcount * @@ -104,40 +99,33 @@ * * The namespace lock must always be taken before the config lock. * - * * The spa_namespace_lock can be acquired directly and is globally visible. * * The namespace is manipulated using the following functions, all of which * require the spa_namespace_lock to be held. - * - * spa_lookup() Lookup a spa_t by name. - * - * spa_add() Create a new spa_t in the namespace. - * - * spa_remove() Remove a spa_t from the namespace. This also - * frees up any memory associated with the spa_t. - * - * spa_next() Returns the next spa_t in the system, or the - * first if NULL is passed. - * - * spa_evict_all() Shutdown and remove all spa_t structures in - * the system. - * - * spa_guid_exists() Determine whether a pool/device guid exists. + * - spa_lookup() Lookup a spa_t by name. + * - spa_add() Create a new spa_t in the namespace. + * - spa_remove() Remove a spa_t from the namespace. + * This also frees up any memory associated + * with the spa_t. + * - spa_next() Returns the next spa_t in the system, + * or the first if NULL is passed. + * - spa_evict_all() Shutdown and remove all spa_t structures + * in the system. + * - spa_guid_exists() Determine whether a pool/device guid + * exists. * * The spa_refcount is manipulated using the following functions: - * - * spa_open_ref() Adds a reference to the given spa_t. Must be - * called with spa_namespace_lock held if the - * refcount is currently zero. - * - * spa_close() Remove a reference from the spa_t. This will - * not free the spa_t or remove it from the - * namespace. No locking is required. - * - * spa_refcount_zero() Returns true if the refcount is currently - * zero. Must be called with spa_namespace_lock - * held. + * - spa_open_ref() Adds a reference to the given spa_t. + * Must be called with spa_namespace_lock + * held if the refcount is currently zero. + * - spa_close() Remove a reference from the spa_t. + * This will not free the spa_t or remove + * it from the namespace. No locking is + * required. + * - spa_refcount_zero() Returns true if the refcount is + * currently zero. Must be called with + * spa_namespace_lock held. * * The spa_config_lock[] is an array of rwlocks, ordered as follows: * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. @@ -161,33 +149,28 @@ * * The lock acquisition rules are as follows: * - * SCL_CONFIG + * - SCL_CONFIG
* Protects changes to the vdev tree topology, such as vdev * add/remove/attach/detach. Protects the dirty config list * (spa_config_dirty_list) and the set of spares and l2arc devices. - * - * SCL_STATE + * - SCL_STATE
* Protects changes to pool state and vdev state, such as vdev * online/offline/fault/degrade/clear. Protects the dirty state list * (spa_state_dirty_list) and global pool state (spa_state). - * - * SCL_ALLOC + * - SCL_ALLOC
* Protects changes to metaslab groups and classes. * Held as reader by metaslab_alloc() and metaslab_claim(). - * - * SCL_ZIO + * - SCL_ZIO
* Held by bp-level zios (those which have no io_vd upon entry) * to prevent changes to the vdev tree. The bp-level zio implicitly * protects all of its vdev child zios, which do not hold SCL_ZIO. - * - * SCL_FREE + * - SCL_FREE
* Protects changes to metaslab groups and classes. * Held as reader by metaslab_free(). SCL_FREE is distinct from * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free * blocks in zio_done() while another i/o that holds either * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. - * - * SCL_VDEV + * - SCL_VDEV
* Held as reader to prevent changes to the vdev tree during trivial * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the * other locks, and lower than all of them, to ensure that it's safe @@ -195,10 +178,9 @@ * * In addition, the following rules apply: * - * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. + * -# spa_props_lock protects pool properties, spa_config and spa_config_list. * The lock ordering is SCL_CONFIG > spa_props_lock. - * - * (b) I/O operations on leaf vdevs. For any zio operation that takes + * -# I/O operations on leaf vdevs. For any zio operation that takes * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), * or zio_write_phys() -- the caller must ensure that the config cannot * cannot change in the interim, and that the vdev cannot be reopened. @@ -206,12 +188,12 @@ * * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). * - * spa_vdev_enter() Acquire the namespace lock and the config lock - * for writing. - * - * spa_vdev_exit() Release the config lock, wait for all I/O - * to complete, sync the updated configs to the - * cache, and release the namespace lock. + * - spa_vdev_enter() Acquire the namespace lock and the + * config lock for writing. + * - spa_vdev_exit() Release the config lock, wait for all + * I/O to complete, sync the updated + * configs to the cache, and release the + * namespace lock. * * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual @@ -242,10 +224,12 @@ int zfs_flags = ~ZFS_DEBUG_DPRINTF; int zfs_flags = 0; #endif -/* +/** * zfs_recover can be set to nonzero to attempt to recover from * otherwise-fatal errors, typically caused by on-disk corruption. When * set, calls to zfs_panic_recover() will turn into warning messages. + * + * \ingroup tunables */ int zfs_recover = 0; SYSCTL_DECL(_vfs_zfs); @@ -387,7 +371,7 @@ spa_config_held(spa_t *spa, int locks, k * ========================================================================== */ -/* +/** * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. * Returns NULL if no matching spa_t is found. */ @@ -421,7 +405,7 @@ spa_lookup(const char *name) return (spa); } -/* +/** * Create an uninitialized spa_t with the given name. Requires * spa_namespace_lock. The caller must ensure that the spa_t doesn't already * exist by calling spa_lookup() first. @@ -508,10 +492,10 @@ spa_add(const char *name, nvlist_t *conf return (spa); } -/* +/** * Removes a spa_t from the namespace, freeing up any memory used. Requires - * spa_namespace_lock. This is called only after the spa_t has been closed and - * deactivated. + * spa_namespace_lock. This is called only after the spa_t has been closed + * and deactivated. */ void spa_remove(spa_t *spa) @@ -569,7 +553,9 @@ spa_remove(spa_t *spa) kmem_free(spa, sizeof (spa_t)); } -/* +/** + * Find the next pool. + * * Given a pool, return the next pool in the namespace, or NULL if there is * none. If 'prev' is NULL, return the first pool. */ @@ -590,7 +576,7 @@ spa_next(spa_t *prev) * ========================================================================== */ -/* +/** * Add a reference to the given spa_t. Must have at least one reference, or * have the namespace lock held. */ @@ -602,7 +588,7 @@ spa_open_ref(spa_t *spa, void *tag) (void) refcount_add(&spa->spa_refcount, tag); } -/* +/** * Remove a reference to the given spa_t. Must have at least one reference, or * have the namespace lock held. */ @@ -614,7 +600,7 @@ spa_close(spa_t *spa, void *tag) (void) refcount_remove(&spa->spa_refcount, tag); } -/* +/** * Check to see if the spa refcount is zero. Must be called with * spa_namespace_lock held. We really compare against spa_minref, which is the * number of references acquired when opening a pool @@ -736,7 +722,10 @@ spa_aux_activate(vdev_t *vd, avl_tree_t found->aux_pool = spa_guid(vd->vdev_spa); } -/* +/** + * \file spa_misc.c + *

Spare VDev list

+ * * Spares are tracked globally due to the following constraints: * * - A spare may be part of multiple pools. @@ -805,7 +794,10 @@ spa_spare_activate(vdev_t *vd) mutex_exit(&spa_spare_lock); } -/* +/** + * \file spa_misc.c + *

Level 2 ARC cache vdev list

+ * * Level 2 ARC devices are tracked globally for the same reasons as spares. * Cache devices currently only support one pool per cache device, and so * for these devices the aux reference count is currently unused beyond 1. @@ -864,8 +856,9 @@ spa_l2cache_activate(vdev_t *vd) * ========================================================================== */ -/* +/** * Lock the given spa_t for the purpose of adding or removing a vdev. + * * Grabs the global spa_namespace_lock plus the spa config lock for writing. * It returns the next transaction group for the spa_t. */ @@ -877,7 +870,7 @@ spa_vdev_enter(spa_t *spa) return (spa_vdev_config_enter(spa)); } -/* +/** * Internal implementation for spa_vdev_enter(). Used when a vdev * operation requires multiple syncs (i.e. removing a device) while * keeping the spa_namespace_lock held. @@ -892,7 +885,7 @@ spa_vdev_config_enter(spa_t *spa) return (spa_last_synced_txg(spa) + 1); } -/* +/** * Used in combination with spa_vdev_config_enter() to allow the syncing * of multiple transactions without releasing the spa_namespace_lock. */ @@ -955,7 +948,7 @@ spa_vdev_config_exit(spa_t *spa, vdev_t spa_config_sync(spa, B_FALSE, B_TRUE); } -/* +/** * Unlock the spa_t after adding or removing a vdev. Besides undoing the * locking of spa_vdev_enter(), we also want make sure the transactions have * synced to disk, and then update the global configuration cache with the new @@ -971,7 +964,7 @@ spa_vdev_exit(spa_t *spa, vdev_t *vd, ui return (error); } -/* +/** * Lock the given spa_t for the purpose of changing vdev state. */ void @@ -1063,7 +1056,7 @@ spa_deactivate_mos_feature(spa_t *spa, c vdev_config_dirty(spa->spa_root_vdev); } -/* +/** * Rename a spa_t. */ int @@ -1113,7 +1106,7 @@ spa_rename(const char *name, const char return (0); } -/* +/** * Return the spa_t associated with given pool_guid, if it exists. If * device_guid is non-zero, determine whether the pool exists *and* contains * a device with the specified device_guid. @@ -1153,7 +1146,7 @@ spa_by_guid(uint64_t pool_guid, uint64_t return (spa); } -/* +/** * Determine whether a pool with the given pool_guid exists. */ boolean_t @@ -1261,9 +1254,9 @@ zfs_panic_recover(const char *fmt, ...) va_end(adx); } -/* +/** * This is a stripped-down version of strtoull, suitable only for converting - * lowercase hexidecimal numbers that don't overflow. + * lowercase hexadecimal numbers that don't overflow. */ uint64_t zfs_strtonum(const char *str, char **nptr) @@ -1451,8 +1444,8 @@ spa_update_dspace(spa_t *spa) ddt_get_dedup_dspace(spa); } -/* - * Return the failure mode that has been set to this pool. The default +/** + * Return the failure mode that has been set to this pool. The default * behavior will be to block all I/Os when a complete failure occurs. */ uint8_t @@ -1654,8 +1647,8 @@ spa_fini(void) mutex_destroy(&spa_l2cache_lock); } -/* - * Return whether this pool has slogs. No locking needed. +/** + * Return whether this pool has slogs. No locking needed. * It's not a problem if the wrong answer is returned as it's only for * performance and not correctness */ @@ -1719,7 +1712,7 @@ spa_dedup_checksum(spa_t *spa) return (spa->spa_dedup_checksum); } -/* +/** * Reset pool scan stat per scan pass (or reboot). */ void @@ -1731,7 +1724,7 @@ spa_scan_stat_init(spa_t *spa) vdev_scan_stat_init(spa->spa_root_vdev); } -/* +/** * Get scan stats for zpool status reports */ int diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/space_map.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/space_map.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/space_map.c 2012-11-16 11:07:22.171456060 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/space_map.c 2012-11-15 18:17:57.077456045 -0700 @@ -39,9 +39,11 @@ SYSCTL_INT(_vfs_zfs, OID_AUTO, space_map &space_map_last_hope, 0, "If kernel panic in space_map code on pool import, import the pool in readonly mode and backup all your data before trying this option."); -/* +/** + * \file space_map.c * Space map routines. - * NOTE: caller is responsible for all locking. + * + * \note Caller is responsible for all locking. */ static int space_map_seg_compare(const void *x1, const void *x2) @@ -276,7 +278,7 @@ space_map_walk(space_map_t *sm, space_ma func(mdest, ss->ss_start, ss->ss_end - ss->ss_start); } -/* +/** * Wait for any in-progress space_map_load() to complete. */ void @@ -290,7 +292,7 @@ space_map_load_wait(space_map_t *sm) } } -/* +/** * Note: space_map_load() will drop sm_lock across dmu_read() calls. * The caller must be OK with this. */ @@ -422,7 +424,7 @@ space_map_free(space_map_t *sm, uint64_t sm->sm_ops->smop_free(sm, start, size); } -/* +/** * Note: space_map_sync() will drop sm_lock across dmu_write() calls. */ void @@ -513,7 +515,7 @@ space_map_truncate(space_map_obj_t *smo, smo->smo_alloc = 0; } -/* +/** * Space map reference trees. * * A space map is a collection of integers. Every integer is either @@ -592,7 +594,7 @@ space_map_ref_add_seg(avl_tree_t *t, uin space_map_ref_add_node(t, end, -refcnt); } -/* +/** * Convert (or add) a space map into a reference tree. */ void @@ -606,7 +608,7 @@ space_map_ref_add_map(avl_tree_t *t, spa space_map_ref_add_seg(t, ss->ss_start, ss->ss_end, refcnt); } -/* +/** * Convert a reference tree into a space map. The space map will contain * all members of the reference tree for which refcnt >= minref. */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h 2012-11-16 11:07:22.172456045 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h 2012-11-15 18:17:57.081456426 -0700 @@ -56,20 +56,20 @@ struct arc_buf { }; typedef enum arc_buf_contents { - ARC_BUFC_DATA, /* buffer contains data */ - ARC_BUFC_METADATA, /* buffer contains metadata */ + ARC_BUFC_DATA, /**< buffer contains data */ + ARC_BUFC_METADATA, /**< buffer contains metadata */ ARC_BUFC_NUMTYPES } arc_buf_contents_t; /* * These are the flags we pass into calls to the arc */ -#define ARC_WAIT (1 << 1) /* perform I/O synchronously */ -#define ARC_NOWAIT (1 << 2) /* perform I/O asynchronously */ -#define ARC_PREFETCH (1 << 3) /* I/O is a prefetch */ -#define ARC_CACHED (1 << 4) /* I/O was already in cache */ -#define ARC_L2CACHE (1 << 5) /* cache in L2ARC */ +#define ARC_WAIT (1 << 1) /**< perform I/O synchronously */ +#define ARC_NOWAIT (1 << 2) /**< perform I/O asynchronously */ +#define ARC_PREFETCH (1 << 3) /**< I/O is a prefetch */ +#define ARC_CACHED (1 << 4) /**< I/O was already in cache */ +#define ARC_L2CACHE (1 << 5) /**< cache in L2ARC */ -/* +/** * The following breakdows of arc_size exist for kstat only. */ typedef enum arc_space_type { diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dbuf.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dbuf.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dbuf.h 2012-10-17 17:00:59.799592485 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dbuf.h 2012-11-15 16:18:17.333456520 -0700 @@ -40,8 +40,9 @@ extern "C" { #define IN_DMU_SYNC 2 -/* - * define flags for dbuf_read +/** + * \name Define flags for dbuf_read + * \{ */ #define DB_RF_MUST_SUCCEED (1 << 0) @@ -50,20 +51,23 @@ extern "C" { #define DB_RF_NOPREFETCH (1 << 3) #define DB_RF_NEVERWAIT (1 << 4) #define DB_RF_CACHED (1 << 5) +/** \} */ -/* +/** * The simplified state transition diagram for dbufs looks like: * - * +----> READ ----+ - * | | - * | V - * (alloc)-->UNCACHED CACHED-->EVICTING-->(free) - * | ^ ^ - * | | | - * +----> FILL ----+ | - * | | - * | | - * +--------> NOFILL -------+ + \verbatim + +----> READ ----+ + | | + | V + (alloc)-->UNCACHED CACHED-->EVICTING-->(free) + | ^ ^ + | | | + +----> FILL ----+ | + | | + | | + +--------> NOFILL -------+ + \endverbatim */ typedef enum dbuf_states { DB_UNCACHED, @@ -91,37 +95,45 @@ typedef enum override_states { DR_OVERRIDDEN } override_states_t; +/** + * The structure of dirty records (DR) mirror the dbufs they belong to. That + * is, a dnode, its indirect blocks, and its data (leaf) blocks all have + * their own DRs. Each can only have one for each in-flight TXG. Each can + * have a parent DR, which is associated with its parent dbuf. Indirects can + * have child DRs, each associated with its child dbufs. Finally, the leaf + * DRs contain the ARC buffer containing the data to be written. + */ typedef struct dbuf_dirty_record { - /* link on our parents dirty list */ + /** link on our parents dirty list */ list_node_t dr_dirty_node; - /* transaction group this data will sync in */ + /** transaction group this data will sync in */ uint64_t dr_txg; - /* zio of outstanding write IO */ + /** zio of outstanding write IO */ zio_t *dr_zio; - /* pointer back to our dbuf */ + /** pointer back to our dbuf */ struct dmu_buf_impl *dr_dbuf; - /* pointer to next dirty record */ + /** pointer to next dirty record */ struct dbuf_dirty_record *dr_next; - /* pointer to parent dirty record */ + /** pointer to parent dirty record */ struct dbuf_dirty_record *dr_parent; union dirty_types { struct dirty_indirect { - /* protect access to list */ + /** protect access to list */ kmutex_t dr_mtx; - /* Our list of dirty children */ + /** Our list of dirty children */ list_t dr_children; } di; struct dirty_leaf { - /* + /** * dr_data is set when we dirty the buffer * so that we can retain the pointer even if it * gets COW'd in a subsequent transaction group. @@ -140,18 +152,18 @@ typedef struct dmu_buf_impl { * db.db_data, which is protected by db_mtx. */ - /* the publicly visible structure */ + /** the publicly visible structure */ dmu_buf_t db; - /* the objset we belong to */ + /** the objset we belong to */ struct objset *db_objset; - /* + /** * handle to safely access the dnode we belong to (NULL when evicted) */ struct dnode_handle *db_dnode_handle; - /* + /** * our parent buffer; if the dnode points to us directly, * db_parent == db_dnode_handle->dnh_dnode->dn_dbuf * only accessed by sync thread ??? @@ -161,21 +173,21 @@ typedef struct dmu_buf_impl { */ struct dmu_buf_impl *db_parent; - /* + /** * link for hash table of all dmu_buf_impl_t's */ struct dmu_buf_impl *db_hash_next; - /* our block number */ + /** our block number */ uint64_t db_blkid; - /* + /** * Pointer to the blkptr_t which points to us. May be NULL if we * don't have one yet. (NULL when evicted) */ blkptr_t *db_blkptr; - /* + /** * Our indirection level. Data buffers have db_level==0. * Indirect buffers which point to data buffers have * db_level==1. etc. Buffers which contain dnodes have @@ -183,39 +195,39 @@ typedef struct dmu_buf_impl { */ uint8_t db_level; - /* db_mtx protects the members below */ + /** db_mtx protects the members below */ kmutex_t db_mtx; - /* + /** * Current state of the buffer */ dbuf_states_t db_state; - /* + /** * Refcount accessed by dmu_buf_{hold,rele}. * If nonzero, the buffer can't be destroyed. * Protected by db_mtx. */ refcount_t db_holds; - /* buffer holding our data */ + /** buffer holding our data */ arc_buf_t *db_buf; kcondvar_t db_changed; dbuf_dirty_record_t *db_data_pending; - /* pointer to most recent dirty record for this buffer */ + /** pointer to most recent dirty record for this buffer */ dbuf_dirty_record_t *db_last_dirty; - /* + /** * Our link on the owner dnodes's dn_dbufs list. * Protected by its dn_dbufs_mtx. */ list_node_t db_link; - /* Data which is unique to data (leaf) blocks: */ + /** Data which is unique to data (leaf) blocks: */ - /* stuff we store for the user (see dmu_buf_set_user) */ + /** stuff we store for the user (see dmu_buf_set_user) */ void *db_user_ptr; void **db_user_data_ptr_ptr; dmu_buf_evict_func_t *db_evict_func; @@ -226,9 +238,9 @@ typedef struct dmu_buf_impl { uint8_t db_dirtycnt; } dmu_buf_impl_t; -/* Note: the dbuf hash table is exposed only for the mdb module */ #define DBUF_MUTEXES 256 #define DBUF_HASH_MUTEX(h, idx) (&(h)->hash_mutexes[(idx) & (DBUF_MUTEXES-1)]) +/** \note The dbuf hash table is exposed only for the mdb module. */ typedef struct dbuf_hash_table { uint64_t hash_table_mask; dmu_buf_impl_t **hash_table; diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/ddt.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/ddt.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/ddt.h 2012-10-17 17:00:59.800592596 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/ddt.h 2012-11-15 16:20:34.515455311 -0700 @@ -35,7 +35,7 @@ extern "C" { #endif -/* +/** * On-disk DDT formats, in the desired search order (newest version first). */ enum ddt_type { @@ -43,7 +43,7 @@ enum ddt_type { DDT_TYPES }; -/* +/** * DDT classes, in the desired search order (highest replication level first). */ enum ddt_class { @@ -58,21 +58,24 @@ enum ddt_class { #define DDT_COMPRESS_BYTEORDER_MASK 0x80 #define DDT_COMPRESS_FUNCTION_MASK 0x7f -/* +/** * On-disk ddt entry: key (name) and physical storage (value). */ typedef struct ddt_key { - zio_cksum_t ddk_cksum; /* 256-bit block checksum */ - uint64_t ddk_prop; /* LSIZE, PSIZE, compression */ + zio_cksum_t ddk_cksum; /**< 256-bit block checksum */ + /** + * LSIZE, PSIZE, compression + * + * layout: + \verbatim + +-------+-------+-------+-------+-------+-------+-------+-------+ + | 0 | 0 | 0 | comp | PSIZE | LSIZE | + +-------+-------+-------+-------+-------+-------+-------+-------+ + \endverbatim + */ + uint64_t ddk_prop; /**< LSIZE, PSIZE, compression */ } ddt_key_t; -/* - * ddk_prop layout: - * - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * | 0 | 0 | 0 | comp | PSIZE | LSIZE | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - */ #define DDK_GET_LSIZE(ddk) \ BF64_GET_SB((ddk)->ddk_prop, 0, 16, SPA_MINBLOCKSHIFT, 1) #define DDK_SET_LSIZE(ddk, x) \ @@ -102,7 +105,7 @@ enum ddt_phys_type { DDT_PHYS_TYPES }; -/* +/** * In-core ddt entry */ struct ddt_entry { @@ -118,7 +121,7 @@ struct ddt_entry { avl_node_t dde_node; }; -/* +/** * In-core ddt */ struct ddt { @@ -136,7 +139,7 @@ struct ddt { avl_node_t ddt_node; }; -/* +/** * In-core and on-disk bookmark for DDT walks */ typedef struct ddt_bookmark { @@ -146,7 +149,7 @@ typedef struct ddt_bookmark { uint64_t ddb_cursor; } ddt_bookmark_t; -/* +/** * Ops vector to access a specific DDT object type. */ typedef struct ddt_ops { diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h 2012-11-16 11:07:22.174448653 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu.h 2012-11-16 17:58:06.962460357 -0700 @@ -31,7 +31,9 @@ #ifndef _SYS_DMU_H #define _SYS_DMU_H -/* +/** + * \file dmu.h + * * This file describes the interface that the DMU provides for its * consumers. * @@ -99,7 +101,7 @@ typedef enum dmu_object_byteswap { #define DMU_OT_METADATA 0x40 #define DMU_OT_BYTESWAP_MASK 0x3f -/* +/** * Defines a uint8_t object type. Object types specify if the data * in the object is metadata (boolean) and how to byteswap the data * (dmu_object_byteswap_t). @@ -124,67 +126,67 @@ typedef enum dmu_object_byteswap { typedef enum dmu_object_type { DMU_OT_NONE, /* general: */ - DMU_OT_OBJECT_DIRECTORY, /* ZAP */ - DMU_OT_OBJECT_ARRAY, /* UINT64 */ - DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */ - DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */ - DMU_OT_BPOBJ, /* UINT64 */ - DMU_OT_BPOBJ_HDR, /* UINT64 */ + DMU_OT_OBJECT_DIRECTORY, /**< ZAP */ + DMU_OT_OBJECT_ARRAY, /**< UINT64 */ + DMU_OT_PACKED_NVLIST, /**< UINT8 (XDR by nvlist_pack/unpack)*/ + DMU_OT_PACKED_NVLIST_SIZE, /**< UINT64 */ + DMU_OT_BPOBJ, /**< UINT64 */ + DMU_OT_BPOBJ_HDR, /**< UINT64 */ /* spa: */ - DMU_OT_SPACE_MAP_HEADER, /* UINT64 */ - DMU_OT_SPACE_MAP, /* UINT64 */ + DMU_OT_SPACE_MAP_HEADER, /**< UINT64 */ + DMU_OT_SPACE_MAP, /**< UINT64 */ /* zil: */ - DMU_OT_INTENT_LOG, /* UINT64 */ + DMU_OT_INTENT_LOG, /**< UINT64 */ /* dmu: */ - DMU_OT_DNODE, /* DNODE */ - DMU_OT_OBJSET, /* OBJSET */ + DMU_OT_DNODE, /**< DNODE */ + DMU_OT_OBJSET, /**< OBJSET */ /* dsl: */ - DMU_OT_DSL_DIR, /* UINT64 */ - DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */ - DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */ - DMU_OT_DSL_PROPS, /* ZAP */ - DMU_OT_DSL_DATASET, /* UINT64 */ + DMU_OT_DSL_DIR, /**< UINT64 */ + DMU_OT_DSL_DIR_CHILD_MAP, /**< ZAP */ + DMU_OT_DSL_DS_SNAP_MAP, /**< ZAP */ + DMU_OT_DSL_PROPS, /**< ZAP */ + DMU_OT_DSL_DATASET, /**< UINT64 */ /* zpl: */ - DMU_OT_ZNODE, /* ZNODE */ - DMU_OT_OLDACL, /* Old ACL */ - DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */ - DMU_OT_DIRECTORY_CONTENTS, /* ZAP */ - DMU_OT_MASTER_NODE, /* ZAP */ - DMU_OT_UNLINKED_SET, /* ZAP */ + DMU_OT_ZNODE, /**< ZNODE */ + DMU_OT_OLDACL, /**< Old ACL */ + DMU_OT_PLAIN_FILE_CONTENTS, /**< UINT8 */ + DMU_OT_DIRECTORY_CONTENTS, /**< ZAP */ + DMU_OT_MASTER_NODE, /**< ZAP */ + DMU_OT_UNLINKED_SET, /**< ZAP */ /* zvol: */ - DMU_OT_ZVOL, /* UINT8 */ - DMU_OT_ZVOL_PROP, /* ZAP */ + DMU_OT_ZVOL, /**< UINT8 */ + DMU_OT_ZVOL_PROP, /**< ZAP */ /* other; for testing only! */ - DMU_OT_PLAIN_OTHER, /* UINT8 */ - DMU_OT_UINT64_OTHER, /* UINT64 */ - DMU_OT_ZAP_OTHER, /* ZAP */ + DMU_OT_PLAIN_OTHER, /**< UINT8 */ + DMU_OT_UINT64_OTHER, /**< UINT64 */ + DMU_OT_ZAP_OTHER, /**< ZAP */ /* new object types: */ - DMU_OT_ERROR_LOG, /* ZAP */ - DMU_OT_SPA_HISTORY, /* UINT8 */ - DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */ - DMU_OT_POOL_PROPS, /* ZAP */ - DMU_OT_DSL_PERMS, /* ZAP */ - DMU_OT_ACL, /* ACL */ - DMU_OT_SYSACL, /* SYSACL */ - DMU_OT_FUID, /* FUID table (Packed NVLIST UINT8) */ - DMU_OT_FUID_SIZE, /* FUID table size UINT64 */ - DMU_OT_NEXT_CLONES, /* ZAP */ - DMU_OT_SCAN_QUEUE, /* ZAP */ - DMU_OT_USERGROUP_USED, /* ZAP */ - DMU_OT_USERGROUP_QUOTA, /* ZAP */ - DMU_OT_USERREFS, /* ZAP */ - DMU_OT_DDT_ZAP, /* ZAP */ - DMU_OT_DDT_STATS, /* ZAP */ - DMU_OT_SA, /* System attr */ - DMU_OT_SA_MASTER_NODE, /* ZAP */ - DMU_OT_SA_ATTR_REGISTRATION, /* ZAP */ - DMU_OT_SA_ATTR_LAYOUTS, /* ZAP */ - DMU_OT_SCAN_XLATE, /* ZAP */ - DMU_OT_DEDUP, /* fake dedup BP from ddt_bp_create() */ - DMU_OT_DEADLIST, /* ZAP */ - DMU_OT_DEADLIST_HDR, /* UINT64 */ - DMU_OT_DSL_CLONES, /* ZAP */ - DMU_OT_BPOBJ_SUBOBJ, /* UINT64 */ + DMU_OT_ERROR_LOG, /**< ZAP */ + DMU_OT_SPA_HISTORY, /**< UINT8 */ + DMU_OT_SPA_HISTORY_OFFSETS, /**< spa_his_phys_t */ + DMU_OT_POOL_PROPS, /**< ZAP */ + DMU_OT_DSL_PERMS, /**< ZAP */ + DMU_OT_ACL, /**< ACL */ + DMU_OT_SYSACL, /**< SYSACL */ + DMU_OT_FUID, /**< FUID table (Packed NVLIST UINT8) */ + DMU_OT_FUID_SIZE, /**< FUID table size UINT64 */ + DMU_OT_NEXT_CLONES, /**< ZAP */ + DMU_OT_SCAN_QUEUE, /**< ZAP */ + DMU_OT_USERGROUP_USED, /**< ZAP */ + DMU_OT_USERGROUP_QUOTA, /**< ZAP */ + DMU_OT_USERREFS, /**< ZAP */ + DMU_OT_DDT_ZAP, /**< ZAP */ + DMU_OT_DDT_STATS, /**< ZAP */ + DMU_OT_SA, /**< System attr */ + DMU_OT_SA_MASTER_NODE, /**< ZAP */ + DMU_OT_SA_ATTR_REGISTRATION, /**< ZAP */ + DMU_OT_SA_ATTR_LAYOUTS, /**< ZAP */ + DMU_OT_SCAN_XLATE, /**< ZAP */ + DMU_OT_DEDUP, /**db_data when + * the caller is allowed to access it. Note that + * db->db_data can change when dmu_buf_read, + * dmu_buf_tryupgrade, dmu_buf_will_dirty, or + * dmu_buf_will_fill are called. + * *user_data_ptr_ptr will be set to the new + * value when it changes. + * + * \param evict_func If not NULL, evict_func will be called + * when this buffer is being excised from the + * cache, so that the data structure pointed to + * by user_data_ptr_ptr can be cleaned up. * - * user_data_ptr_ptr should be NULL, or a pointer to a pointer which - * will be set to db->db_data when you are allowed to access it. Note - * that db->db_data (the pointer) can change when you do dmu_buf_read(), - * dmu_buf_tryupgrade(), dmu_buf_will_dirty(), or dmu_buf_will_fill(). - * *user_data_ptr_ptr will be set to the new value when it changes. - * - * If non-NULL, pageout func will be called when this buffer is being - * excised from the cache, so that you can clean up the data structure - * pointed to by user_ptr. + * \return NULL on success, or the existing user ptr if it's already + * been set. * - * dmu_evict_user() will call the pageout func for all buffers in a + * dmu_evict_user() will call the evict_func for all buffers in a * objset with a given pageout func. */ void *dmu_buf_set_user(dmu_buf_t *db, void *user_ptr, void *user_data_ptr_ptr, - dmu_buf_evict_func_t *pageout_func); -/* - * set_user_ie is the same as set_user, but request immediate eviction - * when hold count goes to zero. + dmu_buf_evict_func_t *evict_func); + +/** + * The same as set_user, but request immediate eviction when hold count goes + * to zero. */ void *dmu_buf_set_user_ie(dmu_buf_t *db, void *user_ptr, void *user_data_ptr_ptr, dmu_buf_evict_func_t *pageout_func); @@ -499,12 +521,12 @@ void *dmu_buf_update_user(dmu_buf_t *db_ dmu_buf_evict_func_t *pageout_func); void dmu_evict_user(objset_t *os, dmu_buf_evict_func_t *func); -/* - * Returns the user_ptr set with dmu_buf_set_user(), or NULL if not set. +/** + * \return The user_ptr set with dmu_buf_set_user(), or NULL if not set. */ void *dmu_buf_get_user(dmu_buf_t *db); -/* +/** * Indicate that you are going to modify the buffer's data (db_data). * * The transaction (tx) must be assigned to a txg (ie. you've called @@ -513,12 +535,15 @@ void *dmu_buf_get_user(dmu_buf_t *db); */ void dmu_buf_will_dirty(dmu_buf_t *db, dmu_tx_t *tx); -/* +/** * Tells if the given dbuf is freeable. */ boolean_t dmu_buf_freeable(dmu_buf_t *); -/* +/** + * \file dmu.h + *

Transactions

+ * * You must create a transaction, then hold the objects which you will * (or might) modify as part of this transaction. Then you must assign * the transaction to a transaction group. Once the transaction has @@ -553,12 +578,10 @@ int dmu_tx_assign(dmu_tx_t *tx, uint64_t void dmu_tx_wait(dmu_tx_t *tx); void dmu_tx_commit(dmu_tx_t *tx); -/* - * To register a commit callback, dmu_tx_callback_register() must be called. - * - * dcb_data is a pointer to caller private data that is passed on as a - * callback parameter. The caller is responsible for properly allocating and - * freeing it. +typedef void dmu_tx_callback_func_t(void *dcb_data, int error); + +/** + * Registers a commit callback. * * When registering a callback, the transaction must be already created, but * it cannot be committed or aborted. It can be assigned to a txg or not. @@ -567,13 +590,17 @@ void dmu_tx_commit(dmu_tx_t *tx); * to stable storage and will also be called if the dmu_tx is aborted. * If there is any error which prevents the transaction from being committed to * disk, the callback will be called with a value of error != 0. + * + * \param[in] func + * \param[in,out] data A pointer to caller private data that is + * passed on as a callback parameter. The caller + * is responsible for properly allocating and + * freeing it. */ -typedef void dmu_tx_callback_func_t(void *dcb_data, int error); - -void dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *dcb_func, - void *dcb_data); +void dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, + void *data); -/* +/** * Free up the data blocks for a defined range of a file. If size is * -1, the range from offset to end-of-file is freed. */ @@ -620,26 +647,26 @@ void xuio_stat_wbuf_nocopy(); extern int zfs_prefetch_disable; -/* +/** * Asynchronously try to read in the data. */ void dmu_prefetch(objset_t *os, uint64_t object, uint64_t offset, uint64_t len); +/** All sizes are in bytes unless otherwise indicated. */ typedef struct dmu_object_info { - /* All sizes are in bytes unless otherwise indicated. */ uint32_t doi_data_block_size; uint32_t doi_metadata_block_size; dmu_object_type_t doi_type; dmu_object_type_t doi_bonus_type; uint64_t doi_bonus_size; - uint8_t doi_indirection; /* 2 = dnode->indirect->data */ + uint8_t doi_indirection; /**< 2 = dnode->indirect->data*/ uint8_t doi_checksum; uint8_t doi_compress; uint8_t doi_pad[5]; - uint64_t doi_physical_blocks_512; /* data + metadata, 512b blks */ + uint64_t doi_physical_blocks_512; /** os_obj_lock > dn_struct_rwlock > * dn_dbufs_mtx > hash_mutexes > db_mtx > dd_lock > leafs * * dp_config_rwlock - * must be held before: everything - * protects dd namespace changes - * protects property changes globally - * held from: - * dsl_dir_open/r: - * dsl_dir_create_sync/w: - * dsl_dir_sync_destroy/w: - * dsl_dir_rename_sync/w: - * dsl_prop_changed_notify/r: + * - must be held before: everything + * - protects dd namespace changes + * - protects property changes globally + * - held from: + * - dsl_dir_open/r: + * - dsl_dir_create_sync/w: + * - dsl_dir_sync_destroy/w: + * - dsl_dir_rename_sync/w: + * - dsl_prop_changed_notify/r: * * os_obj_lock - * must be held before: + * - must be held before: * everything except dp_config_rwlock - * protects os_obj_next - * held from: - * dmu_object_alloc: dn_dbufs_mtx, db_mtx, hash_mutexes, dn_struct_rwlock + * - protects os_obj_next + * - held from: + * - dmu_object_alloc: dn_dbufs_mtx, db_mtx, hash_mutexes, dn_struct_rwlock * * dn_struct_rwlock - * must be held before: + * - must be held before: * everything except dp_config_rwlock and os_obj_lock - * protects structure of dnode (eg. nlevels) - * db_blkptr can change when syncing out change to nlevels - * dn_maxblkid - * dn_nlevels - * dn_*blksz* - * phys nlevels, maxblkid, physical blkptr_t's (?) - * held from: - * callers of dbuf_read_impl, dbuf_hold[_impl], dbuf_prefetch - * dmu_object_info_from_dnode: dn_dirty_mtx (dn_datablksz) - * dmu_tx_count_free: - * dbuf_read_impl: db_mtx, dmu_zfetch() - * dmu_zfetch: zf_rwlock/r, zst_lock, dbuf_prefetch() - * dbuf_new_size: db_mtx - * dbuf_dirty: db_mtx - * dbuf_findbp: (callers, phys? - the real need) - * dbuf_create: dn_dbufs_mtx, hash_mutexes, db_mtx (phys?) - * dbuf_prefetch: dn_dirty_mtx, hash_mutexes, db_mtx, dn_dbufs_mtx - * dbuf_hold_impl: hash_mutexes, db_mtx, dn_dbufs_mtx, dbuf_findbp() - * dnode_sync/w (increase_indirection): db_mtx (phys) - * dnode_set_blksz/w: dn_dbufs_mtx (dn_*blksz*) - * dnode_new_blkid/w: (dn_maxblkid) - * dnode_free_range/w: dn_dirty_mtx (dn_maxblkid) - * dnode_next_offset: (phys) + * - protects structure of dnode (eg. nlevels) + * - db_blkptr can change when syncing out change to nlevels + * - dn_maxblkid + * - dn_nlevels + * - dn_*blksz* + * - phys nlevels, maxblkid, physical blkptr_t's (?) + * - held from: + * - callers of dbuf_read_impl, dbuf_hold[_impl], dbuf_prefetch + * - dmu_object_info_from_dnode: dn_dirty_mtx (dn_datablksz) + * - dmu_tx_count_free: + * - dbuf_read_impl: db_mtx, dmu_zfetch() + * - dmu_zfetch: zf_rwlock/r, zst_lock, dbuf_prefetch() + * - dbuf_new_size: db_mtx + * - dbuf_dirty: db_mtx + * - dbuf_findbp: (callers, phys? - the real need) + * - dbuf_create: dn_dbufs_mtx, hash_mutexes, db_mtx (phys?) + * - dbuf_prefetch: dn_dirty_mtx, hash_mutexes, db_mtx, dn_dbufs_mtx + * - dbuf_hold_impl: hash_mutexes, db_mtx, dn_dbufs_mtx, dbuf_findbp() + * - dnode_sync/w (increase_indirection): db_mtx (phys) + * - dnode_set_blksz/w: dn_dbufs_mtx (dn_*blksz*) + * - dnode_new_blkid/w: (dn_maxblkid) + * - dnode_free_range/w: dn_dirty_mtx (dn_maxblkid) + * - dnode_next_offset: (phys) * * dn_dbufs_mtx - * must be held before: + * - must be held before: * db_mtx, hash_mutexes - * protects: - * dn_dbufs - * dn_evicted - * held from: - * dmu_evict_user: db_mtx (dn_dbufs) - * dbuf_free_range: db_mtx (dn_dbufs) - * dbuf_remove_ref: db_mtx, callees: + * - protects: + * - dn_dbufs + * - dn_evicted + * - held from: + * - dmu_evict_user: db_mtx (dn_dbufs) + * - dbuf_free_range: db_mtx (dn_dbufs) + * - dbuf_remove_ref: db_mtx, callees: * dbuf_hash_remove: hash_mutexes, db_mtx - * dbuf_create: hash_mutexes, db_mtx (dn_dbufs) - * dnode_set_blksz: (dn_dbufs) + * - dbuf_create: hash_mutexes, db_mtx (dn_dbufs) + * - dnode_set_blksz: (dn_dbufs) * * hash_mutexes (global) - * must be held before: + * - must be held before: * db_mtx - * protects dbuf_hash_table (global) and db_hash_next - * held from: - * dbuf_find: db_mtx - * dbuf_hash_insert: db_mtx - * dbuf_hash_remove: db_mtx + * - protects dbuf_hash_table (global) and db_hash_next + * - held from: + * - dbuf_find: db_mtx + * - dbuf_hash_insert: db_mtx + * - dbuf_hash_remove: db_mtx * * db_mtx (meta-leaf) - * must be held before: + * - must be held before: * dn_mtx, dn_dirty_mtx, dd_lock (leaf mutexes) - * protects: - * db_state - * db_holds - * db_buf - * db_changed - * db_data_pending - * db_dirtied - * db_link - * db_dirty_node (??) - * db_dirtycnt - * db_d.* - * db.* - * held from: - * dbuf_dirty: dn_mtx, dn_dirty_mtx - * dbuf_dirty->dsl_dir_willuse_space: dd_lock - * dbuf_dirty->dbuf_new_block->dsl_dataset_block_freeable: dd_lock - * dbuf_undirty: dn_dirty_mtx (db_d) - * dbuf_write_done: dn_dirty_mtx (db_state) - * dbuf_* - * dmu_buf_update_user: none (db_d) - * dmu_evict_user: none (db_d) (maybe can eliminate) - * dbuf_find: none (db_holds) - * dbuf_hash_insert: none (db_holds) - * dmu_buf_read_array_impl: none (db_state, db_changed) - * dmu_sync: none (db_dirty_node, db_d) - * dnode_reallocate: none (db) + * - protects: + * - db_state + * - db_holds + * - db_buf + * - db_changed + * - db_data_pending + * - db_dirtied + * - db_link + * - db_dirty_node (??) + * - db_dirtycnt + * - db_d.* + * - db.* + * - held from: + * - dbuf_dirty: dn_mtx, dn_dirty_mtx + * - dbuf_dirty->dsl_dir_willuse_space: dd_lock + * - dbuf_dirty->dbuf_new_block->dsl_dataset_block_freeable: dd_lock + * - dbuf_undirty: dn_dirty_mtx (db_d) + * - dbuf_write_done: dn_dirty_mtx (db_state) + * - dbuf_* + * - dmu_buf_update_user: none (db_d) + * - dmu_evict_user: none (db_d) (maybe can eliminate) + * - dbuf_find: none (db_holds) + * - dbuf_hash_insert: none (db_holds) + * - dmu_buf_read_array_impl: none (db_state, db_changed) + * - dmu_sync: none (db_dirty_node, db_d) + * - dnode_reallocate: none (db) * * dn_mtx (leaf) - * protects: - * dn_dirty_dbufs - * dn_ranges - * phys accounting - * dn_allocated_txg - * dn_free_txg - * dn_assigned_txg - * dd_assigned_tx - * dn_notxholds - * dn_dirtyctx - * dn_dirtyctx_firstset - * (dn_phys copy fields?) - * (dn_phys contents?) - * held from: - * dnode_* - * dbuf_dirty: none - * dbuf_sync: none (phys accounting) - * dbuf_undirty: none (dn_ranges, dn_dirty_dbufs) - * dbuf_write_done: none (phys accounting) - * dmu_object_info_from_dnode: none (accounting) - * dmu_tx_commit: none - * dmu_tx_hold_object_impl: none - * dmu_tx_try_assign: dn_notxholds(cv) - * dmu_tx_unassign: none + * - protects: + * - dn_dirty_dbufs + * - dn_ranges + * - phys accounting + * - dn_allocated_txg + * - dn_free_txg + * - dn_assigned_txg + * - dd_assigned_tx + * - dn_notxholds + * - dn_dirtyctx + * - dn_dirtyctx_firstset + * - (dn_phys copy fields?) + * - (dn_phys contents?) + * - held from: + * - dnode_* + * - dbuf_dirty: none + * - dbuf_sync: none (phys accounting) + * - dbuf_undirty: none (dn_ranges, dn_dirty_dbufs) + * - dbuf_write_done: none (phys accounting) + * - dmu_object_info_from_dnode: none (accounting) + * - dmu_tx_commit: none + * - dmu_tx_hold_object_impl: none + * - dmu_tx_try_assign: dn_notxholds(cv) + * - dmu_tx_unassign: none * * dd_lock - * must be held before: - * ds_lock - * ancestors' dd_lock - * protects: - * dd_prop_cbs - * dd_sync_* - * dd_used_bytes - * dd_tempreserved - * dd_space_towrite - * dd_myname - * dd_phys accounting? - * held from: - * dsl_dir_* - * dsl_prop_changed_notify: none (dd_prop_cbs) - * dsl_prop_register: none (dd_prop_cbs) - * dsl_prop_unregister: none (dd_prop_cbs) - * dsl_dataset_block_freeable: none (dd_sync_*) + * - must be held before: + * - ds_lock + * - ancestors' dd_lock + * - protects: + * - dd_prop_cbs + * - dd_sync_* + * - dd_used_bytes + * - dd_tempreserved + * - dd_space_towrite + * - dd_myname + * - dd_phys accounting? + * - held from: + * - dsl_dir_* + * - dsl_prop_changed_notify: none (dd_prop_cbs) + * - dsl_prop_register: none (dd_prop_cbs) + * - dsl_prop_unregister: none (dd_prop_cbs) + * - dsl_dataset_block_freeable: none (dd_sync_*) * * os_lock (leaf) - * protects: - * os_dirty_dnodes - * os_free_dnodes - * os_dnodes - * os_downgraded_dbufs - * dn_dirtyblksz - * dn_dirty_link - * held from: - * dnode_create: none (os_dnodes) - * dnode_destroy: none (os_dnodes) - * dnode_setdirty: none (dn_dirtyblksz, os_*_dnodes) - * dnode_free: none (dn_dirtyblksz, os_*_dnodes) + * - protects: + * - os_dirty_dnodes + * - os_free_dnodes + * - os_dnodes + * - os_downgraded_dbufs + * - dn_dirtyblksz + * - dn_dirty_link + * - held from: + * - dnode_create: none (os_dnodes) + * - dnode_destroy: none (os_dnodes) + * - dnode_setdirty: none (dn_dirtyblksz, os_*_dnodes) + * - dnode_free: none (dn_dirtyblksz, os_*_dnodes) * * ds_lock - * protects: - * ds_objset - * ds_open_refcount - * ds_snapname - * ds_phys accounting - * ds_phys userrefs zapobj - * ds_reserved - * held from: - * dsl_dataset_* + * - protects: + * - ds_objset + * - ds_open_refcount + * - ds_snapname + * - ds_phys accounting + * - ds_phys userrefs zapobj + * - ds_reserved + * - held from: + * - dsl_dataset_* * * dr_mtx (leaf) - * protects: - * dr_children - * held from: - * dbuf_dirty - * dbuf_undirty - * dbuf_sync_indirect - * dnode_new_blkid + * - protects: + * - dr_children + * - held from: + * - dbuf_dirty + * - dbuf_undirty + * - dbuf_sync_indirect + * - dnode_new_blkid */ struct objset; @@ -244,13 +246,13 @@ typedef struct dmu_xuio { } dmu_xuio_t; typedef struct xuio_stats { - /* loaned yet not returned arc_buf */ + /** loaned yet not returned arc_buf */ kstat_named_t xuiostat_onloan_rbuf; kstat_named_t xuiostat_onloan_wbuf; - /* whether a copy is made when loaning out a read buffer */ + /** whether a copy is made when loaning out a read buffer */ kstat_named_t xuiostat_rbuf_copied; kstat_named_t xuiostat_rbuf_nocopy; - /* whether a copy is made when assigning a write buffer */ + /** whether a copy is made when assigning a write buffer */ kstat_named_t xuiostat_wbuf_copied; kstat_named_t xuiostat_wbuf_nocopy; } xuio_stats_t; diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_objset.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_objset.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_objset.h 2012-11-16 11:07:22.174448653 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_objset.h 2012-11-15 18:17:57.105456026 -0700 @@ -65,7 +65,9 @@ typedef struct objset_phys { } objset_phys_t; struct objset { - /* Immutable: */ + /** + * \name Immutable + * \{ */ struct dsl_dataset *os_dsl_dataset; spa_t *os_spa; arc_buf_t *os_phys_buf; @@ -81,7 +83,10 @@ struct objset { dnode_handle_t os_groupused_dnode; zilog_t *os_zil; - /* can change, under dsl_dir's locks: */ + /** + * \} + * \name can change, under dsl_dir's locks + * \{ */ uint8_t os_checksum; uint8_t os_compress; uint8_t os_copies; @@ -92,30 +97,46 @@ struct objset { uint8_t os_secondary_cache; uint8_t os_sync; - /* no lock needed: */ - struct dmu_tx *os_synctx; /* XXX sketchy */ + /** + * \} + * \name no lock needed + * \{ */ + struct dmu_tx *os_synctx; /**< XXX sketchy */ blkptr_t *os_rootbp; zil_header_t os_zil_header; list_t os_synced_dnodes; uint64_t os_flags; - /* Protected by os_obj_lock */ + /** + * \} + * \name Protected by os_obj_lock + * \{ */ kmutex_t os_obj_lock; uint64_t os_obj_next; - /* Protected by os_lock */ + /** + * \} + * \name Protected by os_lock + * \{*/ kmutex_t os_lock; list_t os_dirty_dnodes[TXG_SIZE]; list_t os_free_dnodes[TXG_SIZE]; list_t os_dnodes; list_t os_downgraded_dbufs; - /* stuff we store for the user */ + /** + * \} + * \name stuff we store for the user + * \{*/ kmutex_t os_user_ptr_lock; void *os_user_ptr; - /* SA layout/attribute registration */ + /** + * \{ + * \name SA layout/attribute registration + * \{*/ sa_os_t *os_sa; + /** \} */ }; #define DMU_META_OBJSET 0 diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_traverse.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_traverse.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_traverse.h 2012-11-16 11:07:22.175455613 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_traverse.h 2012-11-15 18:17:57.108456616 -0700 @@ -50,7 +50,7 @@ typedef int (blkptr_cb_t)(spa_t *spa, zi #define TRAVERSE_PREFETCH (TRAVERSE_PREFETCH_METADATA | TRAVERSE_PREFETCH_DATA) #define TRAVERSE_HARD (1<<4) -/* Special traverse error return value to indicate skipping of children */ +/** Special traverse error return value to indicate skipping of children */ #define TRAVERSE_VISIT_NO_CHILDREN -1 int traverse_dataset(struct dsl_dataset *ds, diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_tx.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_tx.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_tx.h 2012-10-17 17:00:59.842591836 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_tx.h 2012-11-15 00:11:20.313457623 -0700 @@ -41,12 +41,12 @@ struct dsl_pool; struct dnode; struct dsl_dir; +/** + * No synchronization is needed because a tx can only be handled + * by one thread. + */ struct dmu_tx { - /* - * No synchronization is needed because a tx can only be handled - * by one thread. - */ - list_t tx_holds; /* list of dmu_tx_hold_t */ + list_t tx_holds; /**< list of dmu_tx_hold_t */ objset_t *tx_objset; struct dsl_dir *tx_dir; struct dsl_pool *tx_pool; @@ -56,7 +56,7 @@ struct dmu_tx { txg_handle_t tx_txgh; void *tx_tempreserve_cookie; struct dmu_tx_hold *tx_needassign_txh; - list_t tx_callbacks; /* list of dmu_tx_callback_t on this dmu_tx */ + list_t tx_callbacks; /**< list of dmu_tx_callback_t on this dmu_tx */ uint8_t tx_anyobj; int tx_err; #ifdef ZFS_DEBUG @@ -98,9 +98,9 @@ typedef struct dmu_tx_hold { } dmu_tx_hold_t; typedef struct dmu_tx_callback { - list_node_t dcb_node; /* linked to tx_callbacks list */ - dmu_tx_callback_func_t *dcb_func; /* caller function pointer */ - void *dcb_data; /* caller private data */ + list_node_t dcb_node; /**< linked to tx_callbacks list */ + dmu_tx_callback_func_t *dcb_func; /**< caller function pointer */ + void *dcb_data; /**< caller private data */ } dmu_tx_callback_t; /* @@ -113,8 +113,6 @@ void dmu_tx_abort(dmu_tx_t *tx); uint64_t dmu_tx_get_txg(dmu_tx_t *tx); void dmu_tx_wait(dmu_tx_t *tx); -void dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *dcb_func, - void *dcb_data); void dmu_tx_do_callbacks(list_t *cb_list, int error); /* diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_zfetch.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_zfetch.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_zfetch.h 2012-10-17 17:00:59.842591836 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dmu_zfetch.h 2012-10-07 20:59:13.886591408 -0600 @@ -34,31 +34,31 @@ extern "C" { extern uint64_t zfetch_array_rd_sz; -struct dnode; /* so we can reference dnode */ +struct dnode; /**< so we can reference dnode */ typedef enum zfetch_dirn { - ZFETCH_FORWARD = 1, /* prefetch increasing block numbers */ - ZFETCH_BACKWARD = -1 /* prefetch decreasing block numbers */ + ZFETCH_FORWARD = 1, /**< prefetch increasing block numbers*/ + ZFETCH_BACKWARD = -1 /**< prefetch decreasing block numbers*/ } zfetch_dirn_t; typedef struct zstream { - uint64_t zst_offset; /* offset of starting block in range */ - uint64_t zst_len; /* length of range, in blocks */ - zfetch_dirn_t zst_direction; /* direction of prefetch */ - uint64_t zst_stride; /* length of stride, in blocks */ - uint64_t zst_ph_offset; /* prefetch offset, in blocks */ - uint64_t zst_cap; /* prefetch limit (cap), in blocks */ - kmutex_t zst_lock; /* protects stream */ - clock_t zst_last; /* lbolt of last prefetch */ - avl_node_t zst_node; /* embed avl node here */ + uint64_t zst_offset; /**< offset of starting block in range*/ + uint64_t zst_len; /**< length of range, in blocks */ + zfetch_dirn_t zst_direction; /**< direction of prefetch */ + uint64_t zst_stride; /**< length of stride, in blocks */ + uint64_t zst_ph_offset; /**< prefetch offset, in blocks */ + uint64_t zst_cap; /**< prefetch limit (cap), in blocks */ + kmutex_t zst_lock; /**< protects stream */ + clock_t zst_last; /**< lbolt of last prefetch */ + avl_node_t zst_node; /**< embed avl node here */ } zstream_t; typedef struct zfetch { - krwlock_t zf_rwlock; /* protects zfetch structure */ - list_t zf_stream; /* AVL tree of zstream_t's */ - struct dnode *zf_dnode; /* dnode that owns this zfetch */ - uint32_t zf_stream_cnt; /* # of active streams */ - uint64_t zf_alloc_fail; /* # of failed attempts to alloc strm */ + krwlock_t zf_rwlock; /**< protects zfetch structure */ + list_t zf_stream; /**< AVL tree of zstream_t's */ + struct dnode *zf_dnode; /**< dnode that owns this zfetch */ + uint32_t zf_stream_cnt; /**< # of active streams */ + uint64_t zf_alloc_fail; /**<# of failed attempts to alloc strm*/ } zfetch_t; void zfetch_init(void); diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dnode.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dnode.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dnode.h 2012-11-16 11:07:22.175455613 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dnode.h 2012-11-15 18:17:57.124458333 -0700 @@ -39,44 +39,53 @@ extern "C" { #endif -/* - * dnode_hold() flags. +/** + * \name dnode_hold() flags + * \{ */ #define DNODE_MUST_BE_ALLOCATED 1 #define DNODE_MUST_BE_FREE 2 -/* - * dnode_next_offset() flags. +/** + * \} + * \name dnode_next_offset() flags + * \{ */ #define DNODE_FIND_HOLE 1 #define DNODE_FIND_BACKWARDS 2 #define DNODE_FIND_HAVELOCK 4 -/* - * Fixed constants. +/** + * \} + * \name Fixed constants + * \{ */ -#define DNODE_SHIFT 9 /* 512 bytes */ -#define DN_MIN_INDBLKSHIFT 10 /* 1k */ -#define DN_MAX_INDBLKSHIFT 14 /* 16k */ -#define DNODE_BLOCK_SHIFT 14 /* 16k */ -#define DNODE_CORE_SIZE 64 /* 64 bytes for dnode sans blkptrs */ -#define DN_MAX_OBJECT_SHIFT 48 /* 256 trillion (zfs_fid_t limit) */ -#define DN_MAX_OFFSET_SHIFT 64 /* 2^64 bytes in a dnode */ - -/* - * dnode id flags +#define DNODE_SHIFT 9 /**< 512 bytes */ +#define DN_MIN_INDBLKSHIFT 10 /**< 1k */ +#define DN_MAX_INDBLKSHIFT 14 /**< 16k */ +#define DNODE_BLOCK_SHIFT 14 /**< 16k */ +#define DNODE_CORE_SIZE 64 /**< 64 bytes for dnode sans blkptrs */ +#define DN_MAX_OBJECT_SHIFT 48 /**< 256 trillion (zfs_fid_t limit) */ +#define DN_MAX_OFFSET_SHIFT 64 /**< 2^64 bytes in a dnode */ + +/** + * \} + * \name dnode id flags * - * Note: a file will never ever have its + * \note a file will never ever have its * ids moved from bonus->spill * and only in a crypto environment would it be on spill + * \{ */ #define DN_ID_CHKED_BONUS 0x1 #define DN_ID_CHKED_SPILL 0x2 #define DN_ID_OLD_EXIST 0x4 #define DN_ID_NEW_EXIST 0x8 -/* - * Derived constants. +/** + * \} + * \name Derived constants + * \{ */ #define DNODE_SIZE (1 << DNODE_SHIFT) #define DN_MAX_NBLKPTR ((DNODE_SIZE - DNODE_CORE_SIZE) >> SPA_BLKPTRSHIFT) @@ -89,6 +98,7 @@ extern "C" { #define DNODES_PER_BLOCK (1ULL << DNODES_PER_BLOCK_SHIFT) #define DNODES_PER_LEVEL_SHIFT (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT) #define DNODES_PER_LEVEL (1ULL << DNODES_PER_LEVEL_SHIFT) +/** \} */ /* The +2 here is a cheesy way to round up */ #define DN_MAX_LEVELS (2 + ((DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT) / \ @@ -112,29 +122,29 @@ enum dnode_dirtycontext { DN_DIRTY_SYNC }; -/* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */ +/** Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */ #define DNODE_FLAG_USED_BYTES (1<<0) #define DNODE_FLAG_USERUSED_ACCOUNTED (1<<1) -/* Does dnode have a SA spill blkptr in bonus? */ +/** Does dnode have a SA spill blkptr in bonus? */ #define DNODE_FLAG_SPILL_BLKPTR (1<<2) typedef struct dnode_phys { - uint8_t dn_type; /* dmu_object_type_t */ - uint8_t dn_indblkshift; /* ln2(indirect block size) */ - uint8_t dn_nlevels; /* 1=dn_blkptr->data blocks */ - uint8_t dn_nblkptr; /* length of dn_blkptr */ - uint8_t dn_bonustype; /* type of data in bonus buffer */ - uint8_t dn_checksum; /* ZIO_CHECKSUM type */ - uint8_t dn_compress; /* ZIO_COMPRESS type */ - uint8_t dn_flags; /* DNODE_FLAG_* */ - uint16_t dn_datablkszsec; /* data block size in 512b sectors */ - uint16_t dn_bonuslen; /* length of dn_bonus */ + uint8_t dn_type; /**< dmu_object_type_t */ + uint8_t dn_indblkshift; /**< ln2(indirect block size) */ + uint8_t dn_nlevels; /**< 1=dn_blkptr->data blocks */ + uint8_t dn_nblkptr; /**< length of dn_blkptr */ + uint8_t dn_bonustype; /**< type of data in bonus buffer */ + uint8_t dn_checksum; /**< ZIO_CHECKSUM type */ + uint8_t dn_compress; /**< ZIO_COMPRESS type */ + uint8_t dn_flags; /**< DNODE_FLAG_* */ + uint16_t dn_datablkszsec; /**< data block size in 512b sectors */ + uint16_t dn_bonuslen; /**< length of dn_bonus */ uint8_t dn_pad2[4]; /* accounting is protected by dn_dirty_mtx */ - uint64_t dn_maxblkid; /* largest allocated block ID */ - uint64_t dn_used; /* bytes (or sectors) of disk space */ + uint64_t dn_maxblkid; /**< largest allocated block ID */ + uint64_t dn_used; /**< bytes (or sectors) of disk space */ uint64_t dn_pad3[4]; @@ -144,56 +154,70 @@ typedef struct dnode_phys { } dnode_phys_t; typedef struct dnode { - /* - * dn_struct_rwlock protects the structure of the dnode, - * including the number of levels of indirection (dn_nlevels), - * dn_maxblkid, and dn_next_* + /** + * Protects the structure of the dnode, including the number of levels + * of indirection (dn_nlevels), dn_maxblkid, and dn_next_* */ krwlock_t dn_struct_rwlock; - /* Our link on dn_objset->os_dnodes list; protected by os_lock. */ + /** Our link on dn_objset->os_dnodes list; protected by os_lock. */ list_node_t dn_link; - /* immutable: */ + /** + * \name Immutable + * \{ */ struct objset *dn_objset; uint64_t dn_object; struct dmu_buf_impl *dn_dbuf; struct dnode_handle *dn_handle; - dnode_phys_t *dn_phys; /* pointer into dn->dn_dbuf->db.db_data */ + dnode_phys_t *dn_phys; /**< pointer into dn->dn_dbuf->db.db_data */ + /** \} */ - /* - * Copies of stuff in dn_phys. They're valid in the open - * context (eg. even before the dnode is first synced). - * Where necessary, these are protected by dn_struct_rwlock. + /** + * \name Copies of stuff in dn_phys + * They're valid in the open context (eg. even before the dnode is + * first synced). Where necessary, these are protected by + * dn_struct_rwlock. + * \{ */ - dmu_object_type_t dn_type; /* object type */ - uint16_t dn_bonuslen; /* bonus length */ - uint8_t dn_bonustype; /* bonus type */ - uint8_t dn_nblkptr; /* number of blkptrs (immutable) */ - uint8_t dn_checksum; /* ZIO_CHECKSUM type */ - uint8_t dn_compress; /* ZIO_COMPRESS type */ + dmu_object_type_t dn_type; /**< object type */ + uint16_t dn_bonuslen; /**< bonus length */ + uint8_t dn_bonustype; /**< bonus type */ + uint8_t dn_nblkptr; /**< number of blkptrs (immutable) */ + uint8_t dn_checksum; /**< ZIO_CHECKSUM type */ + uint8_t dn_compress; /**< ZIO_COMPRESS type */ uint8_t dn_nlevels; uint8_t dn_indblkshift; - uint8_t dn_datablkshift; /* zero if blksz not power of 2! */ - uint8_t dn_moved; /* Has this dnode been moved? */ - uint16_t dn_datablkszsec; /* in 512b sectors */ - uint32_t dn_datablksz; /* in bytes */ + uint8_t dn_datablkshift; /**< zero if blksz not power of 2! */ + uint8_t dn_moved; /**< Has this dnode been moved? */ + uint16_t dn_datablkszsec; /**< in 512b sectors */ + uint32_t dn_datablksz; /**< in bytes */ uint64_t dn_maxblkid; uint8_t dn_next_nblkptr[TXG_SIZE]; uint8_t dn_next_nlevels[TXG_SIZE]; uint8_t dn_next_indblkshift[TXG_SIZE]; uint8_t dn_next_bonustype[TXG_SIZE]; - uint8_t dn_rm_spillblk[TXG_SIZE]; /* for removing spill blk */ + uint8_t dn_rm_spillblk[TXG_SIZE]; /**< for removing spill blk */ uint16_t dn_next_bonuslen[TXG_SIZE]; - uint32_t dn_next_blksz[TXG_SIZE]; /* next block size in bytes */ + uint32_t dn_next_blksz[TXG_SIZE]; /**< next block size in bytes */ + /** \} */ - /* protected by dn_dbufs_mtx; declared here to fill 32-bit hole */ - uint32_t dn_dbufs_count; /* count of dn_dbufs */ - - /* protected by os_lock: */ - list_node_t dn_dirty_link[TXG_SIZE]; /* next on dataset's dirty */ - - /* protected by dn_mtx: */ + /** + * \name Protected by dn_dbufs_mtx + * declared here to fill 32-bit hole + * \{ */ + uint32_t dn_dbufs_count; /**< count of dn_dbufs */ + /** \} */ + + /** + * \name Protected by os_lock + * \{ */ + list_node_t dn_dirty_link[TXG_SIZE]; /**< next on dataset's dirty */ + /** \} */ + + /** + * \name Protected by dn_mtx + * \{ */ kmutex_t dn_mtx; list_t dn_dirty_records[TXG_SIZE]; avl_tree_t dn_ranges[TXG_SIZE]; @@ -202,48 +226,61 @@ typedef struct dnode { uint64_t dn_assigned_txg; kcondvar_t dn_notxholds; enum dnode_dirtycontext dn_dirtyctx; - uint8_t *dn_dirtyctx_firstset; /* dbg: contents meaningless */ + uint8_t *dn_dirtyctx_firstset; /**< dbg: contents meaningless */ + /** \} */ - /* protected by own devices */ + /** + * \name Protected by own devices + * \{ */ refcount_t dn_tx_holds; refcount_t dn_holds; + /** \} */ kmutex_t dn_dbufs_mtx; - list_t dn_dbufs; /* descendent dbufs */ - - /* protected by dn_struct_rwlock */ - struct dmu_buf_impl *dn_bonus; /* bonus buffer dbuf */ + list_t dn_dbufs; /**< descendent dbufs */ - boolean_t dn_have_spill; /* have spill or are spilling */ - - /* parent IO for current sync write */ + /** + * \name Protected by dn_struct_rwlock + * \{*/ + struct dmu_buf_impl *dn_bonus; /**< bonus buffer dbuf */ + /** \} */ + + boolean_t dn_have_spill; /**< have spill or are spilling */ + + /** + * \name Parent IO for current sync write + * \{*/ zio_t *dn_zio; + /** \} */ - /* used in syncing context */ - uint64_t dn_oldused; /* old phys used bytes */ - uint64_t dn_oldflags; /* old phys dn_flags */ + /** + * \name Used in syncing context + * \{*/ + uint64_t dn_oldused; /**< old phys used bytes */ + uint64_t dn_oldflags; /**< old phys dn_flags */ uint64_t dn_olduid, dn_oldgid; uint64_t dn_newuid, dn_newgid; int dn_id_flags; + /** \} */ - /* holds prefetch structure */ + /** holds prefetch structure */ struct zfetch dn_zfetch; } dnode_t; -/* +/** * Adds a level of indirection between the dbuf and the dnode to avoid * iterating descendent dbufs in dnode_move(). Handles are not allocated * individually, but as an array of child dnodes in dnode_hold_impl(). */ typedef struct dnode_handle { - /* Protects dnh_dnode from modification by dnode_move(). */ + /** Protects dnh_dnode from modification by dnode_move(). */ zrlock_t dnh_zrlock; dnode_t *dnh_dnode; } dnode_handle_t; typedef struct dnode_children { - size_t dnc_count; /* number of children */ - dnode_handle_t dnc_children[1]; /* sized dynamically */ + size_t dnc_count; /**< number of children */ + dnode_handle_t dnc_children[1]; /**< sized dynamically */ } dnode_children_t; typedef struct free_range { diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_dataset.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_dataset.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_dataset.h 2012-11-16 11:07:22.176456494 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_dataset.h 2012-11-15 18:17:57.129455946 -0700 @@ -49,7 +49,7 @@ struct dsl_pool; #define DS_FLAG_INCONSISTENT (1ULL<<0) #define DS_IS_INCONSISTENT(ds) \ ((ds)->ds_phys->ds_flags & DS_FLAG_INCONSISTENT) -/* +/** * NB: nopromote can not yet be set, but we want support for it in this * on-disk version, so that we don't need to upgrade for it later. It * will be needed when we implement 'zfs split' (where the split off @@ -57,14 +57,14 @@ struct dsl_pool; */ #define DS_FLAG_NOPROMOTE (1ULL<<1) -/* +/** * DS_FLAG_UNIQUE_ACCURATE is set if ds_unique_bytes has been correctly * calculated for head datasets (starting with SPA_VERSION_UNIQUE_ACCURATE, * refquota/refreservations). */ #define DS_FLAG_UNIQUE_ACCURATE (1ULL<<2) -/* +/** * DS_FLAG_DEFER_DESTROY is set after 'zfs destroy -d' has been called * on a dataset. This allows the dataset to be destroyed using 'zfs release'. */ @@ -72,22 +72,22 @@ struct dsl_pool; #define DS_IS_DEFER_DESTROY(ds) \ ((ds)->ds_phys->ds_flags & DS_FLAG_DEFER_DESTROY) -/* +/** * DS_FLAG_CI_DATASET is set if the dataset contains a file system whose * name lookups should be performed case-insensitively. */ #define DS_FLAG_CI_DATASET (1ULL<<16) typedef struct dsl_dataset_phys { - uint64_t ds_dir_obj; /* DMU_OT_DSL_DIR */ - uint64_t ds_prev_snap_obj; /* DMU_OT_DSL_DATASET */ + uint64_t ds_dir_obj; /**< DMU_OT_DSL_DIR */ + uint64_t ds_prev_snap_obj; /**< DMU_OT_DSL_DATASET */ uint64_t ds_prev_snap_txg; - uint64_t ds_next_snap_obj; /* DMU_OT_DSL_DATASET */ - uint64_t ds_snapnames_zapobj; /* DMU_OT_DSL_DS_SNAP_MAP 0 for snaps */ - uint64_t ds_num_children; /* clone/snap children; ==0 for head */ - uint64_t ds_creation_time; /* seconds since 1970 */ + uint64_t ds_next_snap_obj; /**< DMU_OT_DSL_DATASET */ + uint64_t ds_snapnames_zapobj;/**< DMU_OT_DSL_DS_SNAP_MAP 0 for snaps */ + uint64_t ds_num_children;/**< clone/snap children; ==0 for head */ + uint64_t ds_creation_time; /**< seconds since 1970 */ uint64_t ds_creation_txg; - uint64_t ds_deadlist_obj; /* DMU_OT_DEADLIST */ + uint64_t ds_deadlist_obj; /**< DMU_OT_DEADLIST */ /* * ds_referenced_bytes, ds_compressed_bytes, and ds_uncompressed_bytes * include all blocks referenced by this dataset, including those @@ -96,85 +96,113 @@ typedef struct dsl_dataset_phys { uint64_t ds_referenced_bytes; uint64_t ds_compressed_bytes; uint64_t ds_uncompressed_bytes; - uint64_t ds_unique_bytes; /* only relevant to snapshots */ - /* - * The ds_fsid_guid is a 56-bit ID that can change to avoid - * collisions. The ds_guid is a 64-bit ID that will never - * change, so there is a small probability that it will collide. - */ + uint64_t ds_unique_bytes; /**< only relevant to snapshots */ + /** A 56-bit ID that can change to avoid collisions. */ uint64_t ds_fsid_guid; + /** + * A 64-bit ID that will never change, so there is a small probability + * that it will collide. + */ uint64_t ds_guid; - uint64_t ds_flags; /* DS_FLAG_* */ + uint64_t ds_flags; /**< DS_FLAG_* */ blkptr_t ds_bp; - uint64_t ds_next_clones_obj; /* DMU_OT_DSL_CLONES */ - uint64_t ds_props_obj; /* DMU_OT_DSL_PROPS for snaps */ - uint64_t ds_userrefs_obj; /* DMU_OT_USERREFS */ + uint64_t ds_next_clones_obj; /**< DMU_OT_DSL_CLONES */ + uint64_t ds_props_obj; /**< DMU_OT_DSL_PROPS for snaps */ + uint64_t ds_userrefs_obj; /**< DMU_OT_USERREFS */ uint64_t ds_pad[5]; /* pad out to 320 bytes for good measure */ } dsl_dataset_phys_t; typedef struct dsl_dataset { - /* Immutable: */ + /** + * \name Immutable + * \{ */ struct dsl_dir *ds_dir; dsl_dataset_phys_t *ds_phys; dmu_buf_t *ds_dbuf; uint64_t ds_object; uint64_t ds_fsid_guid; - /* only used in syncing context, only valid for non-snapshots: */ + /** + * \} + * \name Only used in syncing context, only valid for non-snapshots + * \{ */ struct dsl_dataset *ds_prev; - /* has internal locking: */ + /** + * \} + * \name Has internal locking + * \{ */ dsl_deadlist_t ds_deadlist; bplist_t ds_pending_deadlist; - /* to protect against multiple concurrent incremental recv */ + /** + * \} + * \name To protect against multiple concurrent incremental recv + * \{*/ kmutex_t ds_recvlock; - /* protected by lock on pool's dp_dirty_datasets list */ + /** + * \} + * \name Protected by lock on pool's dp_dirty_datasets list + * \{ */ txg_node_t ds_dirty_link; list_node_t ds_synced_link; - /* + /** + * \} + * \name Protected by ds_lock * ds_phys->ds_ is also protected by ds_lock. - * Protected by ds_lock: + * \{ */ kmutex_t ds_lock; objset_t *ds_objset; uint64_t ds_userrefs; - /* - * ds_owner is protected by the ds_rwlock and the ds_lock + /** + * \} + * \name ds_owner is protected by the ds_rwlock and the ds_lock + * \{ */ krwlock_t ds_rwlock; kcondvar_t ds_exclusive_cv; void *ds_owner; - /* no locking; only for making guesses */ + /** + * \} + * \name no locking; only for making guesses + * \{ */ uint64_t ds_trysnap_txg; - /* for objset_open() */ + /** + * \} + * \name for objset_open() + * \{*/ kmutex_t ds_opening_lock; + /** \} */ - uint64_t ds_reserved; /* cached refreservation */ - uint64_t ds_quota; /* cached refquota */ + uint64_t ds_reserved; /**< cached refreservation */ + uint64_t ds_quota; /**< cached refquota */ kmutex_t ds_sendstream_lock; list_t ds_sendstreams; - /* Protected by ds_lock; keep at end of struct for better locality */ + /** + * \name Protected by ds_lock; keep at end of struct for better locality + * \{ */ char ds_snapname[MAXNAMELEN]; + /** \} */ } dsl_dataset_t; struct dsl_ds_destroyarg { - dsl_dataset_t *ds; /* ds to destroy */ - dsl_dataset_t *rm_origin; /* also remove our origin? */ - boolean_t is_origin_rm; /* set if removing origin snap */ - boolean_t defer; /* destroy -d requested? */ - boolean_t releasing; /* destroying due to release? */ - boolean_t need_prep; /* do we need to retry due to EBUSY? */ + dsl_dataset_t *ds; /**< ds to destroy */ + dsl_dataset_t *rm_origin; /**< also remove our origin? */ + boolean_t is_origin_rm; /**< set if removing origin snap */ + boolean_t defer; /**< destroy -d requested? */ + boolean_t releasing; /**< destroying due to release? */ + boolean_t need_prep; /**< do we need to retry due to EBUSY?*/ }; -/* +/** * The max length of a temporary tag prefix is the number of hex digits * required to express UINT64_MAX plus one for the hyphen. */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_deadlist.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_deadlist.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_deadlist.h 2012-10-17 17:00:59.844592442 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_deadlist.h 2012-10-07 20:59:13.887592223 -0600 @@ -39,7 +39,7 @@ typedef struct dsl_deadlist_phys { uint64_t dl_used; uint64_t dl_comp; uint64_t dl_uncomp; - uint64_t dl_pad[37]; /* pad out to 320b for future expansion */ + uint64_t dl_pad[37]; /**< pad out to 320b for future expansion */ } dsl_deadlist_phys_t; typedef struct dsl_deadlist { @@ -51,7 +51,7 @@ typedef struct dsl_deadlist { dsl_deadlist_phys_t *dl_phys; kmutex_t dl_lock; - /* if it's the old on-disk format: */ + /** if it's the old on-disk format: */ bpobj_t dl_bpobj; boolean_t dl_oldfmt; } dsl_deadlist_t; diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_dir.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_dir.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_dir.h 2012-10-17 17:00:59.845592426 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_dir.h 2012-10-07 20:59:13.888591311 -0600 @@ -51,55 +51,67 @@ typedef enum dd_used { #define DD_FLAG_USED_BREAKDOWN (1<<0) typedef struct dsl_dir_phys { - uint64_t dd_creation_time; /* not actually used */ + uint64_t dd_creation_time; /**< not actually used */ uint64_t dd_head_dataset_obj; uint64_t dd_parent_obj; uint64_t dd_origin_obj; uint64_t dd_child_dir_zapobj; - /* + /** * how much space our children are accounting for; for leaf * datasets, == physical space used by fs + snaps */ uint64_t dd_used_bytes; uint64_t dd_compressed_bytes; uint64_t dd_uncompressed_bytes; - /* Administrative quota setting */ + /** Administrative quota setting */ uint64_t dd_quota; - /* Administrative reservation setting */ + /** Administrative reservation setting */ uint64_t dd_reserved; uint64_t dd_props_zapobj; - uint64_t dd_deleg_zapobj; /* dataset delegation permissions */ + uint64_t dd_deleg_zapobj; /**< dataset delegation permissions */ uint64_t dd_flags; uint64_t dd_used_breakdown[DD_USED_NUM]; - uint64_t dd_clones; /* dsl_dir objects */ - uint64_t dd_pad[13]; /* pad out to 256 bytes for good measure */ + uint64_t dd_clones; /**< dsl_dir objects */ + uint64_t dd_pad[13]; /**< pad out to 256 bytes for good measure */ } dsl_dir_phys_t; struct dsl_dir { - /* These are immutable; no lock needed: */ + /** + * \name These are immutable; no lock needed + * \{ */ uint64_t dd_object; dsl_dir_phys_t *dd_phys; dmu_buf_t *dd_dbuf; dsl_pool_t *dd_pool; - /* protected by lock on pool's dp_dirty_dirs list */ + /** + * \} + * \name Protected by lock on pool's dp_dirty_dirs list + * \{*/ txg_node_t dd_dirty_link; - /* protected by dp_config_rwlock */ + /** + * \} + * \name protected by dp_config_rwlock + * \{ */ dsl_dir_t *dd_parent; - /* Protected by dd_lock */ + /** + * \} + * \name Protected by dd_lock + * \{*/ kmutex_t dd_lock; - list_t dd_prop_cbs; /* list of dsl_prop_cb_record_t's */ - timestruc_t dd_snap_cmtime; /* last time snapshot namespace changed */ + list_t dd_prop_cbs; /**< list of dsl_prop_cb_record_t's */ + timestruc_t dd_snap_cmtime; /**< last time snapshot namespace changed */ uint64_t dd_origin_txg; + /** \} */ - /* gross estimate of space used by in-flight tx's */ + /** gross estimate of space used by in-flight tx's */ uint64_t dd_tempreserved[TXG_SIZE]; - /* amount of space we expect to write; == amount of dirty data */ + /** amount of space we expect to write; == amount of dirty data */ int64_t dd_space_towrite[TXG_SIZE]; - /* protected by dd_lock; keep at end of struct for better locality */ + /** protected by dd_lock; keep at end of struct for better locality */ char dd_myname[MAXNAMELEN]; }; diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_pool.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_pool.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_pool.h 2012-11-16 11:07:22.176456494 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_pool.h 2012-11-16 18:01:27.835453892 -0700 @@ -48,9 +48,9 @@ struct dsl_pool; struct dmu_tx; struct dsl_scan; -/* These macros are for indexing into the zfs_all_blkstats_t. */ +/* For indexing into the zfs_all_blkstats_t. */ #define DMU_OT_DEFERRED DMU_OT_NONE -#define DMU_OT_OTHER DMU_OT_NUMTYPES /* place holder for DMU_OT() types */ +#define DMU_OT_OTHER DMU_OT_NUMTYPES /**< place holder for DMU_OT() types */ #define DMU_OT_TOTAL (DMU_OT_NUMTYPES + 1) typedef struct zfs_blkstat { @@ -70,7 +70,9 @@ typedef struct zfs_all_blkstats { typedef struct dsl_pool { - /* Immutable */ + /** + * \name Immutable + * \{ */ spa_t *dp_spa; struct objset *dp_meta_objset; struct dsl_dir *dp_root_dir; @@ -80,19 +82,26 @@ typedef struct dsl_pool { uint64_t dp_root_dir_obj; struct taskq *dp_vnrele_taskq; - /* No lock needed - sync context only */ + /** + * \} + * \name Sync context only + * No lock needed + * \{ */ blkptr_t dp_meta_rootbp; hrtime_t dp_read_overhead; - uint64_t dp_throughput; /* bytes per millisec */ + uint64_t dp_throughput; /**< bytes per millisec */ uint64_t dp_write_limit; uint64_t dp_tmp_userrefs_obj; bpobj_t dp_free_bpobj; uint64_t dp_bptree_obj; uint64_t dp_empty_bpobj; + /** \} */ struct dsl_scan *dp_scan; - /* Uses dp_lock */ + /** + * \name Uses dp_lock + * \{ */ kmutex_t dp_lock; uint64_t dp_space_towrite[TXG_SIZE]; uint64_t dp_tempreserved[TXG_SIZE]; @@ -100,15 +109,20 @@ typedef struct dsl_pool { uint64_t dp_mos_compressed_delta; uint64_t dp_mos_uncompressed_delta; - /* Has its own locking */ + /** + * \} + * \name Has its own locking + * \{ */ tx_state_t dp_tx; txg_list_t dp_dirty_datasets; txg_list_t dp_dirty_zilogs; txg_list_t dp_dirty_dirs; txg_list_t dp_sync_tasks; - /* - * Protects administrative changes (properties, namespace) + /** \} */ + /** + * \brief Protects administrative changes (properties, namespace) + * * It is only held for write in syncing context. Therefore * syncing context does not need to ever have it for read, since * nobody else could possibly have it for write. diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_prop.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_prop.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_prop.h 2012-10-17 17:00:59.847590793 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_prop.h 2012-10-07 20:59:13.888591311 -0600 @@ -37,11 +37,11 @@ extern "C" { struct dsl_dataset; struct dsl_dir; -/* The callback func may not call into the DMU or DSL! */ +/** The callback func may not call into the DMU or DSL! */ typedef void (dsl_prop_changed_cb_t)(void *arg, uint64_t newval); typedef struct dsl_prop_cb_record { - list_node_t cbr_node; /* link on dd_prop_cbs */ + list_node_t cbr_node; /**< link on dd_prop_cbs */ struct dsl_dataset *cbr_ds; const char *cbr_propname; dsl_prop_changed_cb_t *cbr_func; @@ -60,7 +60,7 @@ typedef struct dsl_prop_set_arg { int psa_numints; const void *psa_value; - /* + /** * Used to handle the special requirements of the quota and reservation * properties. */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_scan.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_scan.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_scan.h 2012-11-16 11:07:22.177457694 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/dsl_scan.h 2012-11-15 22:37:24.382482192 -0700 @@ -46,8 +46,8 @@ struct dmu_tx; * purposes. */ typedef struct dsl_scan_phys { - uint64_t scn_func; /* pool_scan_func_t */ - uint64_t scn_state; /* dsl_scan_state_t */ + uint64_t scn_func; /**< pool_scan_func_t */ + uint64_t scn_state; /**< dsl_scan_state_t */ uint64_t scn_queue_obj; uint64_t scn_min_txg; uint64_t scn_max_txg; @@ -55,15 +55,15 @@ typedef struct dsl_scan_phys { uint64_t scn_cur_max_txg; uint64_t scn_start_time; uint64_t scn_end_time; - uint64_t scn_to_examine; /* total bytes to be scanned */ - uint64_t scn_examined; /* bytes scanned so far */ + uint64_t scn_to_examine; /**< total bytes to be scanned */ + uint64_t scn_examined; /**< bytes scanned so far */ uint64_t scn_to_process; uint64_t scn_processed; - uint64_t scn_errors; /* scan I/O error count */ + uint64_t scn_errors; /**< scan I/O error count */ uint64_t scn_ddt_class_max; ddt_bookmark_t scn_ddt_bookmark; zbookmark_t scn_bookmark; - uint64_t scn_flags; /* dsl_scan_flags_t */ + uint64_t scn_flags; /**< dsl_scan_flags_t */ } dsl_scan_phys_t; #define SCAN_PHYS_NUMINTS (sizeof (dsl_scan_phys_t) / sizeof (uint64_t)) @@ -80,10 +80,10 @@ typedef struct dsl_scan { uint64_t scn_sync_start_time; zio_t *scn_zio_root; - /* for freeing blocks */ + /** for freeing blocks */ boolean_t scn_is_bptree; - /* for debugging / information */ + /** for debugging / information */ uint64_t scn_visited_this_txg; dsl_scan_phys_t scn_phys; diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/metaslab_impl.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/metaslab_impl.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/metaslab_impl.h 2012-10-17 17:00:59.848591608 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/metaslab_impl.h 2012-10-07 20:59:13.889590655 -0600 @@ -42,10 +42,10 @@ struct metaslab_class { metaslab_group_t *mc_rotor; space_map_ops_t *mc_ops; uint64_t mc_aliquot; - uint64_t mc_alloc; /* total allocated space */ - uint64_t mc_deferred; /* total deferred frees */ - uint64_t mc_space; /* total space (alloc + free) */ - uint64_t mc_dspace; /* total deflated space */ + uint64_t mc_alloc; /**< total allocated space */ + uint64_t mc_deferred; /**< total deferred frees */ + uint64_t mc_space; /** #include -/* +/** * A reader-writer lock implementation that allows re-entrant reads, but * still gives writers priority on "new" reads. * diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/sa.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/sa.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/sa.h 2012-10-17 17:00:59.858591442 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/sa.h 2012-11-15 16:57:40.884458345 -0700 @@ -28,7 +28,7 @@ #include #include -/* +/** * Currently available byteswap functions. * If it all possible new attributes should used * one of the already defined byteswap functions. @@ -46,21 +46,21 @@ typedef enum sa_bswap_type { typedef uint16_t sa_attr_type_t; -/* +/** * Attribute to register support for. */ typedef struct sa_attr_reg { - char *sa_name; /* attribute name */ + char *sa_name; /**< attribute name */ uint16_t sa_length; - sa_bswap_type_t sa_byteswap; /* bswap functon enum */ - sa_attr_type_t sa_attr; /* filled in during registration */ + sa_bswap_type_t sa_byteswap; /**< bswap functon enum */ + sa_attr_type_t sa_attr; /**< filled in during registration */ } sa_attr_reg_t; typedef void (sa_data_locator_t)(void **, uint32_t *, uint32_t, boolean_t, void *userptr); -/* +/** * array of attributes to store. * * This array should be treated as opaque/private data. @@ -84,14 +84,14 @@ typedef struct sa_bulk_attr { } sa_bulk_attr_t; -/* +/** * special macro for adding entries for bulk attr support - * bulk - sa_bulk_attr_t - * count - integer that will be incremented during each add - * attr - attribute to manipulate - * func - function for accessing data. - * data - pointer to data. - * len - length of data + * - bulk - sa_bulk_attr_t + * - count - integer that will be incremented during each add + * - attr - attribute to manipulate + * - func - function for accessing data. + * - data - pointer to data. + * - len - length of data */ #define SA_ADD_BULK_ATTR(b, idx, attr, func, data, len) \ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/sa_impl.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/sa_impl.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/sa_impl.h 2012-11-16 11:07:22.177457694 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/sa_impl.h 2012-11-15 18:17:57.150456043 -0700 @@ -30,7 +30,7 @@ #include #include -/* +/** * Array of known attributes and their * various characteristics. */ @@ -42,27 +42,29 @@ typedef struct sa_attr_table { char *sa_name; } sa_attr_table_t; -/* - * Zap attribute format for attribute registration - * - * 64 56 48 40 32 24 16 8 0 - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * | unused | len | bswap | attr num | - * +-------+-------+-------+-------+-------+-------+-------+-------+ +/** + * \file sa_impl.h * + * Zap attribute format for attribute registration + \verbatim + 64 56 48 40 32 24 16 8 0 + +-------+-------+-------+-------+-------+-------+-------+-------+ + | unused | len | bswap | attr num | + +-------+-------+-------+-------+-------+-------+-------+-------+ + \endverbatim * Zap attribute format for layout information. * * layout information is stored as an array of attribute numbers * The name of the attribute is the layout number (0, 1, 2, ...) - * - * 16 0 - * +---- ---+ - * | attr # | - * +--------+ - * | attr # | - * +--- ----+ - * ...... - * + \verbatim + 16 0 + +---- ---+ + | attr # | + +--------+ + | attr # | + +--- ----+ + ...... + \endverbatim */ #define ATTR_BSWAP(x) BF32_GET(x, 16, 8) @@ -88,7 +90,7 @@ typedef struct sa_attr_table { #define SA_LAYOUTS "LAYOUTS" #define SA_REGISTRY "REGISTRY" -/* +/** * Each unique layout will have their own table * sa_lot (layout_table) */ @@ -97,23 +99,23 @@ typedef struct sa_lot { avl_node_t lot_hash_node; uint64_t lot_num; uint64_t lot_hash; - sa_attr_type_t *lot_attrs; /* array of attr #'s */ - uint32_t lot_var_sizes; /* how many aren't fixed size */ - uint32_t lot_attr_count; /* total attr count */ - list_t lot_idx_tab; /* should be only a couple of entries */ - int lot_instance; /* used with lot_hash to identify entry */ + sa_attr_type_t *lot_attrs; /**< array of attr #'s */ + uint32_t lot_var_sizes; /**< how many aren't fixed size */ + uint32_t lot_attr_count; /**< total attr count */ + list_t lot_idx_tab; /**< should be only a couple of entries */ + int lot_instance; /**< used with lot_hash to identify entry */ } sa_lot_t; -/* index table of offsets */ +/** index table of offsets */ typedef struct sa_idx_tab { list_node_t sa_next; sa_lot_t *sa_layout; uint16_t *sa_variable_lengths; refcount_t sa_refcount; - uint32_t *sa_idx_tab; /* array of offsets */ + uint32_t *sa_idx_tab; /**< array of offsets */ } sa_idx_tab_t; -/* +/** * Since the offset/index information into the actual data * will usually be identical we can share that information with * all handles that have the exact same offsets. @@ -140,47 +142,48 @@ struct sa_os { uint64_t sa_reg_attr_obj; uint64_t sa_layout_attr_obj; int sa_num_attrs; - sa_attr_table_t *sa_attr_table; /* private attr table */ + sa_attr_table_t *sa_attr_table; /**< private attr table */ sa_update_cb_t *sa_update_cb; - avl_tree_t sa_layout_num_tree; /* keyed by layout number */ - avl_tree_t sa_layout_hash_tree; /* keyed by layout hash value */ + avl_tree_t sa_layout_num_tree; /**< keyed by layout number */ + avl_tree_t sa_layout_hash_tree; /**< keyed by layout hash value */ int sa_user_table_sz; - sa_attr_type_t *sa_user_table; /* user name->attr mapping table */ + sa_attr_type_t *sa_user_table; /**< user name->attr mapping table */ }; -/* +#define SA_MAGIC 0x2F505A /* ZFS SA */ +/** * header for all bonus and spill buffers. + * * The header has a fixed portion with a variable number * of "lengths" depending on the number of variable sized * attribues which are determined by the "layout number" */ - -#define SA_MAGIC 0x2F505A /* ZFS SA */ typedef struct sa_hdr_phys { uint32_t sa_magic; - uint16_t sa_layout_info; /* Encoded with hdrsize and layout number */ - uint16_t sa_lengths[1]; /* optional sizes for variable length attrs */ + /** + * Encoded with hdrsize and layout number + * sa_hdr_phys -> sa_layout_info + * + \verbatim + 16 10 0 + +--------+-------+ + | hdrsz |layout | + +--------+-------+ + \endverbatim + * + * Bits 0-10 are the layout number + * Bits 11-16 are the size of the header. + * The hdrsize is the number * 8 + * + * For example. + * - hdrsz of 1 ==> 8 byte header + * - hdrsz of 2 ==> 16 byte header + */ + uint16_t sa_layout_info; + uint16_t sa_lengths[1]; /**< optional sizes for variable length attrs */ /* ... Data follows the lengths. */ } sa_hdr_phys_t; -/* - * sa_hdr_phys -> sa_layout_info - * - * 16 10 0 - * +--------+-------+ - * | hdrsz |layout | - * +--------+-------+ - * - * Bits 0-10 are the layout number - * Bits 11-16 are the size of the header. - * The hdrsize is the number * 8 - * - * For example. - * hdrsz of 1 ==> 8 byte header - * 2 ==> 16 byte header - * - */ - #define SA_HDR_LAYOUT_NUM(hdr) BF32_GET(hdr->sa_layout_info, 0, 10) #define SA_HDR_SIZE(hdr) BF32_GET_SB(hdr->sa_layout_info, 10, 6, 3, 0) #define SA_HDR_LAYOUT_INFO_ENCODE(x, num, size) \ @@ -202,20 +205,19 @@ typedef enum sa_data_op { SA_REMOVE } sa_data_op_t; -/* +/** * Opaque handle used for most sa functions * * This needs to be kept as small as possible. */ - struct sa_handle { kmutex_t sa_lock; dmu_buf_t *sa_bonus; dmu_buf_t *sa_spill; objset_t *sa_os; void *sa_userp; - sa_idx_tab_t *sa_bonus_tab; /* idx of bonus */ - sa_idx_tab_t *sa_spill_tab; /* only present if spill activated */ + sa_idx_tab_t *sa_bonus_tab; /**< idx of bonus */ + sa_idx_tab_t *sa_spill_tab; /**< only present if spill activated */ }; #define SA_GET_DB(hdl, type) \ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/spa.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/spa.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/spa.h 2012-11-16 11:07:22.178456142 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/spa.h 2012-11-15 18:17:57.156453447 -0700 @@ -53,8 +53,9 @@ typedef struct ddt ddt_t; typedef struct ddt_entry ddt_entry_t; struct dsl_pool; -/* - * General-purpose 32-bit and 64-bit bitfield encodings. +/** + * \name General-purpose 32-bit and 64-bit bitfield encodings. + * \{ */ #define BF32_DECODE(x, low, len) P2PHASE((x) >> (low), 1U << (len)) #define BF64_DECODE(x, low, len) P2PHASE((x) >> (low), 1ULL << (len)) @@ -78,36 +79,37 @@ struct dsl_pool; BF32_SET(x, low, len, ((val) >> (shift)) - (bias)) #define BF64_SET_SB(x, low, len, shift, bias, val) \ BF64_SET(x, low, len, ((val) >> (shift)) - (bias)) +/** \} */ -/* - * We currently support nine block sizes, from 512 bytes to 128K. - * We could go higher, but the benefits are near-zero and the cost - * of COWing a giant block to modify one byte would become excessive. - */ #define SPA_MINBLOCKSHIFT 9 #define SPA_MAXBLOCKSHIFT 17 #define SPA_MINBLOCKSIZE (1ULL << SPA_MINBLOCKSHIFT) #define SPA_MAXBLOCKSIZE (1ULL << SPA_MAXBLOCKSHIFT) +/** + * We currently support nine block sizes, from 512 bytes to 128K. + * We could go higher, but the benefits are near-zero and the cost + * of COWing a giant block to modify one byte would become excessive. + */ #define SPA_BLOCKSIZES (SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1) -/* +/** * Size of block to hold the configuration data (a packed nvlist) */ #define SPA_CONFIG_BLOCKSIZE (1ULL << 14) -/* +#define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */ +#define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */ +/** * The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB. * The ASIZE encoding should be at least 64 times larger (6 more bits) * to support up to 4-way RAID-Z mirror mode with worst-case gang block * overhead, three DVAs per bp, plus one more bit in case we do anything * else that expands the ASIZE. */ -#define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */ -#define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */ #define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */ -/* +/** * All SPA data is represented by 128-bit data virtual addresses (DVAs). * The members of the dva_t should be considered opaque outside the SPA. */ @@ -115,88 +117,89 @@ typedef struct dva { uint64_t dva_word[2]; } dva_t; -/* +/** * Each block has a 256-bit checksum -- strong enough for cryptographic hashes. */ typedef struct zio_cksum { uint64_t zc_word[4]; } zio_cksum_t; -/* +#define SPA_BLKPTRSHIFT 7 /**< blkptr_t is 128 bytes */ +#define SPA_DVAS_PER_BP 3 /**< Number of DVAs in a bp */ +/** * Each block is described by its DVAs, time of birth, checksum, etc. * The word-by-word, bit-by-bit layout of the blkptr is as follows: - * - * 64 56 48 40 32 24 16 8 0 - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * 0 | vdev1 | GRID | ASIZE | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * 1 |G| offset1 | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * 2 | vdev2 | GRID | ASIZE | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * 3 |G| offset2 | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * 4 | vdev3 | GRID | ASIZE | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * 5 |G| offset3 | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * 6 |BDX|lvl| type | cksum | comp | PSIZE | LSIZE | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * 7 | padding | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * 8 | padding | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * 9 | physical birth txg | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * a | logical birth txg | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * b | fill count | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * c | checksum[0] | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * d | checksum[1] | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * e | checksum[2] | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * f | checksum[3] | - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * + \verbatim + 64 56 48 40 32 24 16 8 0 + +-------+-------+-------+-------+-------+-------+-------+-------+ + 0 | vdev1 | GRID | ASIZE | + +-------+-------+-------+-------+-------+-------+-------+-------+ + 1 |G| offset1 | + +-------+-------+-------+-------+-------+-------+-------+-------+ + 2 | vdev2 | GRID | ASIZE | + +-------+-------+-------+-------+-------+-------+-------+-------+ + 3 |G| offset2 | + +-------+-------+-------+-------+-------+-------+-------+-------+ + 4 | vdev3 | GRID | ASIZE | + +-------+-------+-------+-------+-------+-------+-------+-------+ + 5 |G| offset3 | + +-------+-------+-------+-------+-------+-------+-------+-------+ + 6 |BDX|lvl| type | cksum | comp | PSIZE | LSIZE | + +-------+-------+-------+-------+-------+-------+-------+-------+ + 7 | padding | + +-------+-------+-------+-------+-------+-------+-------+-------+ + 8 | padding | + +-------+-------+-------+-------+-------+-------+-------+-------+ + 9 | physical birth txg | + +-------+-------+-------+-------+-------+-------+-------+-------+ + a | logical birth txg | + +-------+-------+-------+-------+-------+-------+-------+-------+ + b | fill count | + +-------+-------+-------+-------+-------+-------+-------+-------+ + c | checksum[0] | + +-------+-------+-------+-------+-------+-------+-------+-------+ + d | checksum[1] | + +-------+-------+-------+-------+-------+-------+-------+-------+ + e | checksum[2] | + +-------+-------+-------+-------+-------+-------+-------+-------+ + f | checksum[3] | + +-------+-------+-------+-------+-------+-------+-------+-------+ + \endverbatim * Legend: - * - * vdev virtual device ID - * offset offset into virtual device - * LSIZE logical size - * PSIZE physical size (after compression) - * ASIZE allocated size (including RAID-Z parity and gang block headers) - * GRID RAID-Z layout information (reserved for future use) - * cksum checksum function - * comp compression function - * G gang block indicator - * B byteorder (endianness) - * D dedup - * X unused - * lvl level of indirection - * type DMU object type - * phys birth txg of block allocation; zero if same as logical birth txg - * log. birth transaction group in which the block was logically born - * fill count number of non-zero blocks under this bp - * checksum[4] 256-bit checksum of the data this bp describes + * - vdev virtual device ID + * - offset offset into virtual device + * - LSIZE logical size + * - PSIZE physical size (after compression) + * - ASIZE allocated size (including RAID-Z parity and gang block + * headers) + * - GRID RAID-Z layout information (reserved for future use) + * - cksum checksum function + * - comp compression function + * - G gang block indicator + * - B byteorder (endianness) + * - D dedup + * - X unused + * - lvl level of indirection + * - type DMU object type + * - phys birth txg of block allocation; zero if same as logical birth + * txg + * - log. birth transaction group in which the block was logically born + * - fill count number of non-zero blocks under this bp + * - checksum[4] 256-bit checksum of the data this bp describes */ -#define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */ -#define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */ - typedef struct blkptr { - dva_t blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */ - uint64_t blk_prop; /* size, compression, type, etc */ - uint64_t blk_pad[2]; /* Extra space for the future */ - uint64_t blk_phys_birth; /* txg when block was allocated */ - uint64_t blk_birth; /* transaction group at birth */ - uint64_t blk_fill; /* fill count */ - zio_cksum_t blk_cksum; /* 256-bit checksum */ + dva_t blk_dva[SPA_DVAS_PER_BP]; /**< Data Virtual Addresses */ + uint64_t blk_prop; /**< size, compression, type, etc */ + uint64_t blk_pad[2]; /**< Extra space for the future */ + uint64_t blk_phys_birth; /**< txg when block was allocated */ + uint64_t blk_birth; /**< transaction group at birth */ + uint64_t blk_fill; /**< fill count */ + zio_cksum_t blk_cksum; /**< 256-bit checksum */ } blkptr_t; -/* - * Macros to get and set fields in a bp or DVA. +/** + * \name Macros to get and set fields in a bp or DVA. + * \{ */ #define DVA_GET_ASIZE(dva) \ BF64_GET_SB((dva)->dva_word[0], 0, 24, SPA_MINBLOCKSHIFT, 0) @@ -304,8 +307,9 @@ typedef struct blkptr { #define BP_IDENTITY(bp) (&(bp)->blk_dva[0]) #define BP_IS_GANG(bp) DVA_GET_GANG(BP_IDENTITY(bp)) #define BP_IS_HOLE(bp) ((bp)->blk_birth == 0) +/** \} */ -/* BP_IS_RAIDZ(bp) assumes no block compression */ +/** BP_IS_RAIDZ(bp) assumes no block compression */ #define BP_IS_RAIDZ(bp) (DVA_GET_ASIZE(&(bp)->blk_dva[0]) > \ BP_GET_PSIZE(bp)) @@ -326,9 +330,9 @@ typedef struct blkptr { ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \ } -/* - * Note: the byteorder is either 0 or -1, both of which are palindromes. - * This simplifies the endianness handling a bit. +/** + * \note The byteorder is either 0 or -1, both of which are palindromes. + * This simplifies the endianness handling a bit. */ #if BYTE_ORDER == _BIG_ENDIAN #define ZFS_HOST_BYTEORDER (0ULL) @@ -340,7 +344,7 @@ typedef struct blkptr { #define BP_SPRINTF_LEN 320 -/* +/** * This macro allows code sharing between zfs, libzpool, and mdb. * 'func' is either snprintf() or mdb_snprintf(). * 'ws' (whitespace) can be ' ' for single-line format, '\n' for multi-line. @@ -449,11 +453,13 @@ extern int spa_scan_get_stats(spa_t *spa #define SPA_ASYNC_REMOVE_DONE 0x40 #define SPA_ASYNC_REMOVE_STOP 0x80 -/* - * Controls the behavior of spa_vdev_remove(). +/** + * \name Controls the behavior of spa_vdev_remove(). + * \{ */ #define SPA_REMOVE_UNSPARE 0x01 #define SPA_REMOVE_DONE 0x02 +/** \} */ /* device manipulation */ extern int spa_vdev_add(spa_t *spa, nvlist_t *nvroot); @@ -486,18 +492,20 @@ extern int spa_scan(spa_t *spa, pool_sca extern int spa_scan_stop(spa_t *spa); /* spa syncing */ -extern void spa_sync(spa_t *spa, uint64_t txg); /* only for DMU use */ +extern void spa_sync(spa_t *spa, uint64_t txg); extern void spa_sync_allpools(void); -/* +/** + * Defer frees after this pass + * * DEFERRED_FREE must be large enough that regular blocks are not * deferred. XXX so can't we change it back to 1? */ -#define SYNC_PASS_DEFERRED_FREE 2 /* defer frees after this pass */ -#define SYNC_PASS_DONT_COMPRESS 4 /* don't compress after this pass */ -#define SYNC_PASS_REWRITE 1 /* rewrite new bps after this pass */ +#define SYNC_PASS_DEFERRED_FREE 2 +#define SYNC_PASS_DONT_COMPRESS 4 /**< don't compress after this pass */ +#define SYNC_PASS_REWRITE 1 /**< rewrite new bps after this pass */ -/* spa namespace global mutex */ +/** spa namespace global mutex */ extern kmutex_t spa_namespace_lock; /* @@ -533,7 +541,7 @@ extern boolean_t spa_refcount_zero(spa_t #define SCL_NONE 0x00 #define SCL_CONFIG 0x01 #define SCL_STATE 0x02 -#define SCL_L2ARC 0x04 /* hack until L2ARC 2.0 */ +#define SCL_L2ARC 0x04 /**< hack until L2ARC 2.0 */ #define SCL_ALLOC 0x08 #define SCL_ZIO 0x10 #define SCL_FREE 0x20 @@ -559,19 +567,18 @@ extern int spa_vdev_exit(spa_t *spa, vde extern void spa_vdev_state_enter(spa_t *spa, int oplock); extern int spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error); -/* Log state */ +/** Log state */ typedef enum spa_log_state { - SPA_LOG_UNKNOWN = 0, /* unknown log state */ - SPA_LOG_MISSING, /* missing log(s) */ - SPA_LOG_CLEAR, /* clear the log(s) */ - SPA_LOG_GOOD, /* log(s) are good */ + SPA_LOG_UNKNOWN = 0, /**< unknown log state */ + SPA_LOG_MISSING, /**< missing log(s) */ + SPA_LOG_CLEAR, /**< clear the log(s) */ + SPA_LOG_GOOD, /**< log(s) are good */ } spa_log_state_t; extern spa_log_state_t spa_get_log_state(spa_t *spa); extern void spa_set_log_state(spa_t *spa, spa_log_state_t state); extern int spa_offline_log(spa_t *spa); -/* Log claim callback */ extern void spa_claim_notify(zio_t *zio); /* Accessor functions */ @@ -637,7 +644,7 @@ extern int spa_mode(spa_t *spa); extern uint64_t zfs_strtonum(const char *str, char **nptr); #define strtonum(str, nptr) zfs_strtonum((str), (nptr)) -/* history logging */ +/** History logging */ typedef enum history_log_type { LOG_CMD_POOL_CREATE, LOG_CMD_NORMAL, @@ -716,7 +723,8 @@ extern boolean_t spa_debug_enabled(spa_t zfs_dbgmsg(__VA_ARGS__); \ } -extern int spa_mode_global; /* mode, e.g. FREAD | FWRITE */ +/** mode, e.g. FREAD | FWRITE */ +extern int spa_mode_global; #ifdef __cplusplus } diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/spa_impl.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/spa_impl.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/spa_impl.h 2012-11-16 11:07:22.178456142 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/spa_impl.h 2012-11-15 23:09:16.568929036 -0700 @@ -50,21 +50,21 @@ typedef struct spa_error_entry { } spa_error_entry_t; typedef struct spa_history_phys { - uint64_t sh_pool_create_len; /* ending offset of zpool create */ - uint64_t sh_phys_max_off; /* physical EOF */ - uint64_t sh_bof; /* logical BOF */ - uint64_t sh_eof; /* logical EOF */ - uint64_t sh_records_lost; /* num of records overwritten */ + uint64_t sh_pool_create_len; /**< ending offset of zpool create */ + uint64_t sh_phys_max_off; /**< physical EOF */ + uint64_t sh_bof; /**< logical BOF */ + uint64_t sh_eof; /**< logical EOF */ + uint64_t sh_records_lost; /**< num of records overwritten */ } spa_history_phys_t; struct spa_aux_vdev { - uint64_t sav_object; /* MOS object for device list */ - nvlist_t *sav_config; /* cached device config */ - vdev_t **sav_vdevs; /* devices */ - int sav_count; /* number devices */ - boolean_t sav_sync; /* sync the device list */ - nvlist_t **sav_pending; /* pending device additions */ - uint_t sav_npending; /* # pending devices */ + uint64_t sav_object; /** PROC_CREATED spa_activate() - * PROC_CREATED -> PROC_ACTIVE spa_thread() - * PROC_ACTIVE -> PROC_DEACTIVATE spa_deactivate() - * PROC_DEACTIVATE -> PROC_GONE spa_thread() - * PROC_GONE -> PROC_NONE spa_deactivate() + * The states transitions are done as follows: + * \verbatim + From To Routine + PROC_NONE -> PROC_CREATED spa_activate() + PROC_CREATED -> PROC_ACTIVE spa_thread() + PROC_ACTIVE -> PROC_DEACTIVATE spa_deactivate() + PROC_DEACTIVATE -> PROC_GONE spa_thread() + PROC_GONE -> PROC_NONE spa_deactivate() + \endverbatim */ typedef enum spa_proc_state { - SPA_PROC_NONE, /* spa_proc = &p0, no process created */ - SPA_PROC_CREATED, /* spa_activate() has proc, is waiting */ - SPA_PROC_ACTIVE, /* taskqs created, spa_proc set */ - SPA_PROC_DEACTIVATE, /* spa_deactivate() requests process exit */ - SPA_PROC_GONE /* spa_thread() is exiting, spa_proc = &p0 */ + SPA_PROC_NONE, /**< spa_proc = &p0, no process created */ + SPA_PROC_CREATED, /**< spa_activate() has proc, is waiting */ + SPA_PROC_ACTIVE, /**< taskqs created, spa_proc set */ + SPA_PROC_DEACTIVATE, /**< spa_deactivate() requests process exit */ + SPA_PROC_GONE /**< spa_thread() is exiting, spa_proc = &p0 */ } spa_proc_state_t; struct spa { - /* - * Fields protected by spa_namespace_lock. + /** + * \name Protected by spa_namespace_lock + * \{ */ - char spa_name[MAXNAMELEN]; /* pool name */ - char *spa_comment; /* comment */ - avl_node_t spa_avl; /* node in spa_namespace_avl */ - nvlist_t *spa_config; /* last synced config */ - nvlist_t *spa_config_syncing; /* currently syncing config */ - nvlist_t *spa_config_splitting; /* config for splitting */ - nvlist_t *spa_load_info; /* info and errors from load */ - uint64_t spa_config_txg; /* txg of last config change */ - int spa_sync_pass; /* iterate-to-convergence */ - pool_state_t spa_state; /* pool state */ - int spa_inject_ref; /* injection references */ - uint8_t spa_sync_on; /* sync threads are running */ - spa_load_state_t spa_load_state; /* current load operation */ - uint64_t spa_import_flags; /* import specific flags */ + char spa_name[MAXNAMELEN]; /**< pool name */ + char *spa_comment; /**< comment */ + avl_node_t spa_avl; /**< node in spa_namespace_avl*/ + nvlist_t *spa_config; /**< last synced config */ + nvlist_t *spa_config_syncing; /**< currently syncing config */ + nvlist_t *spa_config_splitting; /**< config for splitting */ + nvlist_t *spa_load_info; /**< info and errors from load*/ + uint64_t spa_config_txg; /**< txg of last config change*/ + int spa_sync_pass; /**< iterate-to-convergence */ + pool_state_t spa_state; /**< pool state */ + int spa_inject_ref; /**< injection references */ + uint8_t spa_sync_on; /**< sync threads are running */ + spa_load_state_t spa_load_state; /**< current load operation */ + uint64_t spa_import_flags; /**< import specific flags */ taskq_t *spa_zio_taskq[ZIO_TYPES][ZIO_TASKQ_TYPES]; dsl_pool_t *spa_dsl_pool; - boolean_t spa_is_initializing; /* true while opening pool */ - metaslab_class_t *spa_normal_class; /* normal data class */ - metaslab_class_t *spa_log_class; /* intent log data class */ - uint64_t spa_first_txg; /* first txg after spa_open() */ - uint64_t spa_final_txg; /* txg of export/destroy */ - uint64_t spa_freeze_txg; /* freeze pool at this txg */ - uint64_t spa_load_max_txg; /* best initial ub_txg */ - uint64_t spa_claim_max_txg; /* highest claimed birth txg */ - timespec_t spa_loaded_ts; /* 1st successful open time */ - objset_t *spa_meta_objset; /* copy of dp->dp_meta_objset */ - txg_list_t spa_vdev_txg_list; /* per-txg dirty vdev list */ - vdev_t *spa_root_vdev; /* top-level vdev container */ - uint64_t spa_config_guid; /* config pool guid */ - uint64_t spa_load_guid; /* spa_load initialized guid */ - uint64_t spa_last_synced_guid; /* last synced guid */ - list_t spa_config_dirty_list; /* vdevs with dirty config */ - list_t spa_state_dirty_list; /* vdevs with dirty state */ - spa_aux_vdev_t spa_spares; /* hot spares */ - spa_aux_vdev_t spa_l2cache; /* L2ARC cache devices */ - nvlist_t *spa_label_features; /* Features for reading MOS */ - uint64_t spa_config_object; /* MOS object for pool config */ - uint64_t spa_config_generation; /* config generation number */ - uint64_t spa_syncing_txg; /* txg currently syncing */ - bpobj_t spa_deferred_bpobj; /* deferred-free bplist */ - bplist_t spa_free_bplist[TXG_SIZE]; /* bplist of stuff to free */ - uberblock_t spa_ubsync; /* last synced uberblock */ - uberblock_t spa_uberblock; /* current uberblock */ - boolean_t spa_extreme_rewind; /* rewind past deferred frees */ - uint64_t spa_last_io; /* lbolt of last non-scan I/O */ - kmutex_t spa_scrub_lock; /* resilver/scrub lock */ - uint64_t spa_scrub_inflight; /* in-flight scrub I/Os */ - kcondvar_t spa_scrub_io_cv; /* scrub I/O completion */ - uint8_t spa_scrub_active; /* active or suspended? */ - uint8_t spa_scrub_type; /* type of scrub we're doing */ - uint8_t spa_scrub_finished; /* indicator to rotate logs */ - uint8_t spa_scrub_started; /* started since last boot */ - uint8_t spa_scrub_reopen; /* scrub doing vdev_reopen */ - uint64_t spa_scan_pass_start; /* start time per pass/reboot */ - uint64_t spa_scan_pass_exam; /* examined bytes per pass */ - kmutex_t spa_async_lock; /* protect async state */ - kthread_t *spa_async_thread; /* thread doing async task */ - int spa_async_suspended; /* async tasks suspended */ - kcondvar_t spa_async_cv; /* wait for thread_exit() */ - uint16_t spa_async_tasks; /* async task mask */ - char *spa_root; /* alternate root directory */ - uint64_t spa_ena; /* spa-wide ereport ENA */ - int spa_last_open_failed; /* error if last open failed */ - uint64_t spa_last_ubsync_txg; /* "best" uberblock txg */ - uint64_t spa_last_ubsync_txg_ts; /* timestamp from that ub */ - uint64_t spa_load_txg; /* ub txg that loaded */ - uint64_t spa_load_txg_ts; /* timestamp from that ub */ - uint64_t spa_load_meta_errors; /* verify metadata err count */ - uint64_t spa_load_data_errors; /* verify data err count */ - uint64_t spa_verify_min_txg; /* start txg of verify scrub */ - kmutex_t spa_errlog_lock; /* error log lock */ - uint64_t spa_errlog_last; /* last error log object */ - uint64_t spa_errlog_scrub; /* scrub error log object */ - kmutex_t spa_errlist_lock; /* error list/ereport lock */ - avl_tree_t spa_errlist_last; /* last error list */ - avl_tree_t spa_errlist_scrub; /* scrub error list */ - uint64_t spa_deflate; /* should we deflate? */ - uint64_t spa_history; /* history object */ - kmutex_t spa_history_lock; /* history lock */ - vdev_t *spa_pending_vdev; /* pending vdev additions */ - kmutex_t spa_props_lock; /* property lock */ - uint64_t spa_pool_props_object; /* object for properties */ - uint64_t spa_bootfs; /* default boot filesystem */ - uint64_t spa_failmode; /* failure mode for the pool */ - uint64_t spa_delegation; /* delegation on/off */ - list_t spa_config_list; /* previous cache file(s) */ - zio_t *spa_async_zio_root; /* root of all async I/O */ - zio_t *spa_suspend_zio_root; /* root of all suspended I/O */ - kmutex_t spa_suspend_lock; /* protects suspend_zio_root */ - kcondvar_t spa_suspend_cv; /* notification of resume */ - uint8_t spa_suspended; /* pool is suspended */ - uint8_t spa_claiming; /* pool is doing zil_claim() */ - boolean_t spa_debug; /* debug enabled? */ - boolean_t spa_is_root; /* pool is root */ - int spa_minref; /* num refs when first opened */ - int spa_mode; /* FREAD | FWRITE */ - spa_log_state_t spa_log_state; /* log state */ - uint64_t spa_autoexpand; /* lun expansion on/off */ - ddt_t *spa_ddt[ZIO_CHECKSUM_FUNCTIONS]; /* in-core DDTs */ - uint64_t spa_ddt_stat_object; /* DDT statistics */ - uint64_t spa_dedup_ditto; /* dedup ditto threshold */ - uint64_t spa_dedup_checksum; /* default dedup checksum */ - uint64_t spa_dspace; /* dspace in normal class */ - kmutex_t spa_vdev_top_lock; /* dueling offline/remove */ - kmutex_t spa_proc_lock; /* protects spa_proc* */ - kcondvar_t spa_proc_cv; /* spa_proc_state transitions */ - spa_proc_state_t spa_proc_state; /* see definition */ - struct proc *spa_proc; /* "zpool-poolname" process */ - uint64_t spa_did; /* if procp != p0, did of t1 */ - kthread_t *spa_trim_thread; /* thread sending TRIM I/Os */ - kmutex_t spa_trim_lock; /* protects spa_trim_cv */ - kcondvar_t spa_trim_cv; /* used to notify TRIM thread */ - boolean_t spa_autoreplace; /* autoreplace set in open */ - int spa_vdev_locks; /* locks grabbed */ - uint64_t spa_creation_version; /* version at pool creation */ - uint64_t spa_prev_software_version; /* See ub_software_version */ - uint64_t spa_feat_for_write_obj; /* required to write to pool */ - uint64_t spa_feat_for_read_obj; /* required to read from pool */ - uint64_t spa_feat_desc_obj; /* Feature descriptions */ - int64_t spa_ccw_fail_time; /* Conf cache write fail time */ + boolean_t spa_is_initializing; /**< true while opening pool */ + metaslab_class_t *spa_normal_class; /**< normal data class */ + metaslab_class_t *spa_log_class; /**< intent log data class */ + uint64_t spa_first_txg; /**dp_meta_objset*/ + txg_list_t spa_vdev_txg_list; /**< per-txg dirty vdev list */ + vdev_t *spa_root_vdev; /**< top-level vdev container */ + uint64_t spa_config_guid; /**< config pool guid */ + uint64_t spa_load_guid; /**Implementation / Performance Notes * * The ZAP is intended to operate most efficiently on attributes with * short (49 bytes or less) names and single 8-byte values, for which @@ -85,35 +86,41 @@ extern "C" { #endif -/* - * The matchtype specifies which entry will be accessed. - * MT_EXACT: only find an exact match (non-normalized) - * MT_FIRST: find the "first" normalized (case and Unicode - * form) match; the designated "first" match will not change as long - * as the set of entries with this normalization doesn't change - * MT_BEST: if there is an exact match, find that, otherwise find the - * first normalized match +/** + * Specifies matching criteria for ZAP lookups. */ typedef enum matchtype { + /** Only find an exact match (non-normalized) */ MT_EXACT, + /** + * If there is an exact match, find that, otherwise find the + * first normalized match. + */ MT_BEST, + /** + * Find the "first" normalized (case and Unicode form) match; + * the designated "first" match will not change as long as the + * set of entries with this normalization doesn't change. + */ MT_FIRST } matchtype_t; typedef enum zap_flags { - /* Use 64-bit hash value (serialized cursors will always use 64-bits) */ + /** + * Use 64-bit hash value (serialized cursors will always use 64-bits) + */ ZAP_FLAG_HASH64 = 1 << 0, - /* Key is binary, not string (zap_add_uint64() can be used) */ + /** Key is binary, not string (zap_add_uint64() can be used) */ ZAP_FLAG_UINT64_KEY = 1 << 1, - /* + /** * First word of key (which must be an array of uint64) is * already randomly distributed. */ ZAP_FLAG_PRE_HASHED_KEY = 1 << 2, } zap_flags_t; -/* +/** * Create a new zapobj with no attributes and return its object number. * MT_EXACT will cause the zap object to only support MT_EXACT lookups, * otherwise any matchtype can be used for lookups. @@ -136,7 +143,7 @@ uint64_t zap_create_flags(objset_t *os, uint64_t zap_create_link(objset_t *os, dmu_object_type_t ot, uint64_t parent_obj, const char *name, dmu_tx_t *tx); -/* +/** * Create a new zapobj with no attributes from the given (unallocated) * object number. */ @@ -151,7 +158,7 @@ int zap_create_claim_norm(objset_t *ds, * following routines. */ -/* +/** * Destroy this zapobj and all its attributes. * * Frees the object number using dmu_object_free. @@ -164,39 +171,37 @@ int zap_destroy(objset_t *ds, uint64_t z * 'integer_size' is in bytes, and must be 1, 2, 4, or 8. */ -/* +/** * Retrieve the contents of the attribute with the given name. * - * If the requested attribute does not exist, the call will fail and - * return ENOENT. - * - * If 'integer_size' is smaller than the attribute's integer size, the - * call will fail and return EINVAL. - * - * If 'integer_size' is equal to or larger than the attribute's integer - * size, the call will succeed and return 0. * When converting to a - * larger integer size, the integers will be treated as unsigned (ie. no - * sign-extension will be performed). - * - * 'num_integers' is the length (in integers) of 'buf'. + * When converting to a larger integer size, the integers will be treated as + * unsigned (ie. no sign-extension will be performed). * * If the attribute is longer than the buffer, as many integers as will - * fit will be transferred to 'buf'. If the entire attribute was not - * transferred, the call will return EOVERFLOW. + * fit will be transferred to 'buf'. * - * If rn_len is nonzero, realname will be set to the name of the found - * entry (which may be different from the requested name if matchtype is - * not MT_EXACT). + * \param[in] num_integers The length (in integers) of 'buf' * - * If normalization_conflictp is not NULL, it will be set if there is - * another name with the same case/unicode normalized form. + * \retval 0 Success + * \retval ENOENT The requested attribute does not exist + * \retval EINVAL integer_size is smaller than the attribute's integer + * size + * \retval EOVERFLOW The entire attribute was not transferred */ int zap_lookup(objset_t *ds, uint64_t zapobj, const char *name, uint64_t integer_size, uint64_t num_integers, void *buf); + +/** + * \param[in] rn_len If nonzero, realname will be set to the name of the + * found entry (which may be different from the requested + * name if matchtype is not MT_EXACT). + * \param[in,out] ncp If not NULL, it will be set if there is another name + * with the same case/unicode normalized form. + */ int zap_lookup_norm(objset_t *ds, uint64_t zapobj, const char *name, uint64_t integer_size, uint64_t num_integers, void *buf, matchtype_t mt, char *realname, int rn_len, - boolean_t *normalization_conflictp); + boolean_t *ncp); int zap_lookup_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key, int key_numints, uint64_t integer_size, uint64_t num_integers, void *buf); int zap_contains(objset_t *ds, uint64_t zapobj, const char *name); @@ -206,11 +211,10 @@ int zap_prefetch_uint64(objset_t *os, ui int zap_count_write(objset_t *os, uint64_t zapobj, const char *name, int add, uint64_t *towrite, uint64_t *tooverwrite); -/* +/** * Create an attribute with the given name and value. * - * If an attribute with the given name already exists, the call will - * fail and return EEXIST. + * \retval EEXIST An attribute with the given name already exists */ int zap_add(objset_t *ds, uint64_t zapobj, const char *key, int integer_size, uint64_t num_integers, @@ -219,7 +223,7 @@ int zap_add_uint64(objset_t *ds, uint64_ int key_numints, int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx); -/* +/** * Set the attribute with the given name to the given value. If an * attribute with the given name does not exist, it will be created. If * an attribute with the given name already exists, the previous value @@ -233,23 +237,21 @@ int zap_update_uint64(objset_t *os, uint int key_numints, int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx); -/* +/** * Get the length (in integers) and the integer size of the specified * attribute. * - * If the requested attribute does not exist, the call will fail and - * return ENOENT. + * \retval ENOENT The requested attribute does not exist */ int zap_length(objset_t *ds, uint64_t zapobj, const char *name, uint64_t *integer_size, uint64_t *num_integers); int zap_length_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key, int key_numints, uint64_t *integer_size, uint64_t *num_integers); -/* +/** * Remove the specified attribute. * - * If the specified attribute does not exist, the call will fail and - * return ENOENT. + * \retval ENOENT The specified attribute does not exist */ int zap_remove(objset_t *ds, uint64_t zapobj, const char *name, dmu_tx_t *tx); int zap_remove_norm(objset_t *ds, uint64_t zapobj, const char *name, @@ -257,37 +259,47 @@ int zap_remove_norm(objset_t *ds, uint64 int zap_remove_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key, int key_numints, dmu_tx_t *tx); -/* - * Returns (in *count) the number of attributes in the specified zap - * object. +/** + * Get the number of attributes in the specified zap object. + * + * \param[out] count The number of attributes */ int zap_count(objset_t *ds, uint64_t zapobj, uint64_t *count); -/* - * Returns (in name) the name of the entry whose (value & mask) - * (za_first_integer) is value, or ENOENT if not found. The string - * pointed to by name must be at least 256 bytes long. If mask==0, the - * match must be exact (ie, same as mask=-1ULL). +/** + * Search for an entry by za_first_integer + * + * \param[in] mask If mask==0, the match must be exact (ie, same as + * mask=-1ULL) + * \param[out] name The name of the entry whose (za_first_integer & mask) == + * (value & mask). The string pointed to by name must be + * at least 256 bytes long. + * + * \retval ENOENT No entry was found for the given value and mask */ int zap_value_search(objset_t *os, uint64_t zapobj, uint64_t value, uint64_t mask, char *name); -/* +/** * Transfer all the entries from fromobj into intoobj. Only works on * int_size=8 num_integers=1 values. Fails if there are any duplicated * entries. */ int zap_join(objset_t *os, uint64_t fromobj, uint64_t intoobj, dmu_tx_t *tx); -/* Same as zap_join, but set the values to 'value'. */ +/** + * Same as zap_join, but add together any duplicated entries. + */ int zap_join_key(objset_t *os, uint64_t fromobj, uint64_t intoobj, uint64_t value, dmu_tx_t *tx); -/* Same as zap_join, but add together any duplicated entries. */ +/** + * Same as zap_join, but set the values to 'value'. + */ int zap_join_increment(objset_t *os, uint64_t fromobj, uint64_t intoobj, dmu_tx_t *tx); -/* +/** * Manipulate entries where the name + value are the "same" (the name is * a stringified version of the value). */ @@ -297,7 +309,9 @@ int zap_lookup_int(objset_t *os, uint64_ int zap_increment_int(objset_t *os, uint64_t obj, uint64_t key, int64_t delta, dmu_tx_t *tx); -/* Here the key is an int and the value is a different int. */ +/** + * Here the key is an int and the value is a different int. + */ int zap_add_int_key(objset_t *os, uint64_t obj, uint64_t key, uint64_t value, dmu_tx_t *tx); int zap_update_int_key(objset_t *os, uint64_t obj, @@ -323,13 +337,13 @@ typedef struct zap_cursor { typedef struct { int za_integer_length; - /* + /** * za_normalization_conflict will be set if there are additional * entries with this normalized form (eg, "foo" and "Foo"). */ boolean_t za_normalization_conflict; uint64_t za_num_integers; - uint64_t za_first_integer; /* no sign extension for <8byte ints */ + uint64_t za_first_integer;/**< no sign extension for <8byte ints */ char za_name[MAXNAMELEN]; } zap_attribute_t; @@ -340,25 +354,26 @@ typedef struct { * persistent across system calls (and across reboot, even). */ -/* +/** * Initialize a zap cursor, pointing to the "first" attribute of the * zapobj. You must _fini the cursor when you are done with it. */ void zap_cursor_init(zap_cursor_t *zc, objset_t *ds, uint64_t zapobj); void zap_cursor_fini(zap_cursor_t *zc); -/* - * Get the attribute currently pointed to by the cursor. Returns - * ENOENT if at the end of the attributes. +/** + * Get the attribute currently pointed to by the cursor. + * + * \retval ENOENT At the end of the attributes. */ int zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za); -/* +/** * Advance the cursor to the next attribute. */ void zap_cursor_advance(zap_cursor_t *zc); -/* +/** * Get a persistent cookie pointing to the current position of the zap * cursor. The low 4 bits in the cookie are always zero, and thus can * be used as to differentiate a serialized cookie from a different type @@ -386,16 +401,16 @@ void zap_cursor_init_serialized(zap_curs #define ZAP_HISTOGRAM_SIZE 10 typedef struct zap_stats { - /* + /** * Size of the pointer table (in number of entries). * This is always a power of 2, or zero if it's a microzap. * In general, it should be considerably greater than zs_num_leafs. */ uint64_t zs_ptrtbl_len; - uint64_t zs_blocksize; /* size of zap blocks */ + uint64_t zs_blocksize; /**< size of zap blocks */ - /* + /** * The number of blocks used. Note that some blocks may be * wasted because old ptrtbl's and large name/value blocks are * not reused. (Although their space is reclaimed, we don't @@ -403,39 +418,45 @@ typedef struct zap_stats { */ uint64_t zs_num_blocks; - /* - * Pointer table values from zap_ptrtbl in the zap_phys_t - */ - uint64_t zs_ptrtbl_nextblk; /* next (larger) copy start block */ - uint64_t zs_ptrtbl_blks_copied; /* number source blocks copied */ - uint64_t zs_ptrtbl_zt_blk; /* starting block number */ - uint64_t zs_ptrtbl_zt_numblks; /* number of blocks */ - uint64_t zs_ptrtbl_zt_shift; /* bits to index it */ - - /* - * Values of the other members of the zap_phys_t - */ - uint64_t zs_block_type; /* ZBT_HEADER */ - uint64_t zs_magic; /* ZAP_MAGIC */ - uint64_t zs_num_leafs; /* The number of leaf blocks */ - uint64_t zs_num_entries; /* The number of zap entries */ - uint64_t zs_salt; /* salt to stir into hash function */ - - /* - * Histograms. For all histograms, the last index - * (ZAP_HISTOGRAM_SIZE-1) includes any values which are greater - * than what can be represented. For example - * zs_leafs_with_n5_entries[ZAP_HISTOGRAM_SIZE-1] is the number + /** + * \name Pointer table values from zap_ptrtbl in the zap_phys_t + * \{ + */ + uint64_t zs_ptrtbl_nextblk; /**< next (larger) copy start block */ + uint64_t zs_ptrtbl_blks_copied; /**< number source blocks copied */ + uint64_t zs_ptrtbl_zt_blk; /**< starting block number */ + uint64_t zs_ptrtbl_zt_numblks; /**< number of blocks */ + uint64_t zs_ptrtbl_zt_shift; /**< bits to index it */ + /** \} */ + + /** + * \name Values of the other members of the zap_phys_t + * \{ + */ + uint64_t zs_block_type; /**< ZBT_HEADER */ + uint64_t zs_magic; /**< ZAP_MAGIC */ + uint64_t zs_num_leafs; /**< The number of leaf blocks */ + uint64_t zs_num_entries; /**< The number of zap entries */ + uint64_t zs_salt; /**< salt to stir into hash function */ + /** \} */ + + /** + * \name Histograms + * + * For all histograms, the last index (ZAP_HISTOGRAM_SIZE-1) includes + * any values which are greater than what can be represented. For + * example zs_leafs_with_n5_entries[ZAP_HISTOGRAM_SIZE-1] is the number * of leafs with more than 45 entries. + * \{ */ - /* + /** * zs_leafs_with_n_pointers[n] is the number of leafs with * 2^n pointers to it. */ uint64_t zs_leafs_with_2n_pointers[ZAP_HISTOGRAM_SIZE]; - /* + /** * zs_leafs_with_n_entries[n] is the number of leafs with * [n*5, (n+1)*5) entries. In the current implementation, there * can be at most 55 entries in any block, but there may be @@ -444,33 +465,35 @@ typedef struct zap_stats { */ uint64_t zs_blocks_with_n5_entries[ZAP_HISTOGRAM_SIZE]; - /* + /** * zs_leafs_n_tenths_full[n] is the number of leafs whose * fullness is in the range [n/10, (n+1)/10). */ uint64_t zs_blocks_n_tenths_full[ZAP_HISTOGRAM_SIZE]; - /* + /** * zs_entries_using_n_chunks[n] is the number of entries which * consume n 24-byte chunks. (Note, large names/values only use * one chunk, but contribute to zs_num_blocks_large.) */ uint64_t zs_entries_using_n_chunks[ZAP_HISTOGRAM_SIZE]; - /* + /** * zs_buckets_with_n_entries[n] is the number of buckets (each * leaf has 64 buckets) with n entries. * zs_buckets_with_n_entries[1] should be very close to * zs_num_entries. */ uint64_t zs_buckets_with_n_entries[ZAP_HISTOGRAM_SIZE]; + /** \} */ } zap_stats_t; -/* - * Get statistics about a ZAP object. Note: you need to be aware of the - * internal implementation of the ZAP to correctly interpret some of the - * statistics. This interface shouldn't be relied on unless you really - * know what you're doing. +/** + * Get statistics about a ZAP object. + * + * \note You need to be aware of the internal implementation of the ZAP to + * correctly interpret some of the statistics. This interface + * shouldn't be relied on unless you really know what you're doing. */ int zap_get_stats(objset_t *ds, uint64_t zapobj, zap_stats_t *zs); diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zap_impl.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zap_impl.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zap_impl.h 2012-10-17 17:00:59.864590895 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zap_impl.h 2012-11-15 09:31:38.001131922 -0700 @@ -49,24 +49,24 @@ extern int fzap_default_block_shift; typedef struct mzap_ent_phys { uint64_t mze_value; uint32_t mze_cd; - uint16_t mze_pad; /* in case we want to chain them someday */ + uint16_t mze_pad; /**< in case we want to chain them someday */ char mze_name[MZAP_NAME_LEN]; } mzap_ent_phys_t; typedef struct mzap_phys { - uint64_t mz_block_type; /* ZBT_MICRO */ + uint64_t mz_block_type; /**< ZBT_MICRO */ uint64_t mz_salt; uint64_t mz_normflags; uint64_t mz_pad[5]; + /** actually variable size depending on block size */ mzap_ent_phys_t mz_chunk[1]; - /* actually variable size depending on block size */ } mzap_phys_t; typedef struct mzap_ent { avl_node_t mze_node; int mze_chunkid; uint64_t mze_hash; - uint32_t mze_cd; /* copy from mze_phys->mze_cd */ + uint32_t mze_cd; /**< copy from mze_phys->mze_cd */ } mzap_ent_t; #define MZE_PHYS(zap, mze) \ @@ -92,13 +92,13 @@ struct zap_leaf; #define ZBT_MICRO ((1ULL << 63) + 3) /* any other values are ptrtbl blocks */ -/* +/** * the embedded pointer table takes up half a block: * block size / entry size (2^3) / 2 */ #define ZAP_EMBEDDED_PTRTBL_SHIFT(zap) (FZAP_BLOCK_SHIFT(zap) - 3 - 1) -/* +/** * The embedded pointer table starts half-way through the block. Since * the pointer table itself is half the block, it starts at (64-bit) * word number (1<zap_f.zap_phys) \ [(idx) + (1<l_bs) - hash entry size (2) * number of hash * entries - header space (2*chunksize) @@ -49,7 +49,7 @@ struct zap_stats; (((1<<(l)->l_bs) - 2*ZAP_LEAF_HASH_NUMENTRIES(l)) / \ ZAP_LEAF_CHUNKSIZE - 2) -/* +/** * The amount of space within the chunk available for the array is: * chunk size - space for type (1) - space for next pointer (2) */ @@ -58,7 +58,7 @@ struct zap_stats; #define ZAP_LEAF_ARRAY_NCHUNKS(bytes) \ (((bytes)+ZAP_LEAF_ARRAY_BYTES-1)/ZAP_LEAF_ARRAY_BYTES) -/* +/** * Low water mark: when there are only this many chunks free, start * growing the ptrtbl. Ideally, this should be larger than a * "reasonably-sized" entry. 20 chunks is more than enough for the @@ -67,7 +67,7 @@ struct zap_stats; */ #define ZAP_LEAF_LOW_WATER (20) -/* +/** * The leaf hash table has block size / 2^5 (32) number of entries, * which should be more than enough for the maximum number of entries, * which is less than block size / CHUNKSIZE (24) / minimum number of @@ -76,7 +76,7 @@ struct zap_stats; #define ZAP_LEAF_HASH_SHIFT(l) ((l)->l_bs - 5) #define ZAP_LEAF_HASH_NUMENTRIES(l) (1 << ZAP_LEAF_HASH_SHIFT(l)) -/* +/** * The chunks start immediately after the hash table. The end of the * hash table is at l_hash + HASH_NUMENTRIES, which we simply cast to a * chunk_t. @@ -95,28 +95,32 @@ typedef enum zap_chunk_type { #define ZLF_ENTRIES_CDSORTED (1<<0) -/* - * TAKE NOTE: - * If zap_leaf_phys_t is modified, zap_leaf_byteswap() must be modified. +/** + * \note If zap_leaf_phys_t is modified, zap_leaf_byteswap() must be modified. */ typedef struct zap_leaf_phys { struct zap_leaf_header { - uint64_t lh_block_type; /* ZBT_LEAF */ + /** + * \name Accessible to ZAP + * \{ */ + uint64_t lh_block_type; /**< ZBT_LEAF */ uint64_t lh_pad1; - uint64_t lh_prefix; /* hash prefix of this leaf */ - uint32_t lh_magic; /* ZAP_LEAF_MAGIC */ - uint16_t lh_nfree; /* number free chunks */ - uint16_t lh_nentries; /* number of entries */ - uint16_t lh_prefix_len; /* num bits used to id this */ - -/* above is accessable to zap, below is zap_leaf private */ - - uint16_t lh_freelist; /* chunk head of free list */ - uint8_t lh_flags; /* ZLF_* flags */ + uint64_t lh_prefix; /**< hash prefix of this leaf */ + uint32_t lh_magic; /**< ZAP_LEAF_MAGIC */ + uint16_t lh_nfree; /**< number free chunks */ + uint16_t lh_nentries; /**< number of entries */ + uint16_t lh_prefix_len; /**< num bits used to id this */ + /** + * \} + * \name Private to zap_leaf + * \{ */ + uint16_t lh_freelist; /**< chunk head of free list */ + uint8_t lh_flags; /**< ZLF_* flags */ uint8_t lh_pad2[11]; - } l_hdr; /* 2 24-byte chunks */ + /** \} */ + } l_hdr; /**< 2 24-byte chunks */ - /* + /** * The header is followed by a hash table with * ZAP_LEAF_HASH_NUMENTRIES(zap) entries. The hash table is * followed by an array of ZAP_LEAF_NUMCHUNKS(zap) @@ -129,69 +133,81 @@ typedef struct zap_leaf_phys { typedef union zap_leaf_chunk { struct zap_leaf_entry { - uint8_t le_type; /* always ZAP_CHUNK_ENTRY */ - uint8_t le_value_intlen; /* size of value's ints */ - uint16_t le_next; /* next entry in hash chain */ - uint16_t le_name_chunk; /* first chunk of the name */ - uint16_t le_name_numints; /* ints in name (incl null) */ - uint16_t le_value_chunk; /* first chunk of the value */ - uint16_t le_value_numints; /* value length in ints */ - uint32_t le_cd; /* collision differentiator */ - uint64_t le_hash; /* hash value of the name */ + uint8_t le_type; /**< always ZAP_CHUNK_ENTRY */ + uint8_t le_value_intlen; /**< size of value's ints */ + uint16_t le_next; /**< next entry in hash chain */ + uint16_t le_name_chunk; /**< first chunk of the name */ + uint16_t le_name_numints; /**< ints in name (incl null) */ + uint16_t le_value_chunk; /**< first chunk of the value */ + uint16_t le_value_numints; /**< value length in ints */ + uint32_t le_cd; /**< collision differentiator */ + uint64_t le_hash; /**< hash value of the name */ } l_entry; struct zap_leaf_array { - uint8_t la_type; /* always ZAP_CHUNK_ARRAY */ + uint8_t la_type; /**< always ZAP_CHUNK_ARRAY */ uint8_t la_array[ZAP_LEAF_ARRAY_BYTES]; - uint16_t la_next; /* next blk or CHAIN_END */ + uint16_t la_next; /**< next blk or CHAIN_END */ } l_array; struct zap_leaf_free { - uint8_t lf_type; /* always ZAP_CHUNK_FREE */ + uint8_t lf_type; /**< always ZAP_CHUNK_FREE */ uint8_t lf_pad[ZAP_LEAF_ARRAY_BYTES]; - uint16_t lf_next; /* next in free list, or CHAIN_END */ + uint16_t lf_next; /**< next in free list, or CHAIN_END */ } l_free; } zap_leaf_chunk_t; typedef struct zap_leaf { krwlock_t l_rwlock; - uint64_t l_blkid; /* 1<z_fuid_size + (SPA_MINBLOCKSIZE << 1)) #define FUID_INDEX(x) ((x) >> 32) #define FUID_RID(x) ((x) & 0xffffffff) #define FUID_ENCODE(idx, rid) (((uint64_t)(idx) << 32) | (rid)) -/* +/** + * \file zfs_fuid.h + * * FUIDs cause problems for the intent log * we need to replay the creation of the FUID, * but we can't count on the idmapper to be around @@ -65,26 +67,26 @@ typedef enum { * just the unique 12. */ -/* +/** * The FUIDs in the log will index into * domain string table and the bottom half will be the rid. * Used for mapping ephemeral uid/gid during ACL setting to FUIDs */ typedef struct zfs_fuid { list_node_t z_next; - uint64_t z_id; /* uid/gid being converted to fuid */ - uint64_t z_domidx; /* index in AVL domain table */ - uint64_t z_logfuid; /* index for domain in log */ + uint64_t z_id; /**< uid/gid being converted to fuid */ + uint64_t z_domidx; /**< index in AVL domain table */ + uint64_t z_logfuid; /**< index for domain in log */ } zfs_fuid_t; -/* list of unique domains */ +/** list of unique domains */ typedef struct zfs_fuid_domain { list_node_t z_next; - uint64_t z_domidx; /* AVL tree idx */ - const char *z_domain; /* domain string */ + uint64_t z_domidx; /**< AVL tree idx */ + const char *z_domain; /**< domain string */ } zfs_fuid_domain_t; -/* +/** * FUID information necessary for logging create, setattr, and setacl. */ typedef struct zfs_fuid_info { @@ -92,10 +94,10 @@ typedef struct zfs_fuid_info { list_t z_domains; uint64_t z_fuid_owner; uint64_t z_fuid_group; - char **z_domain_table; /* Used during replay */ - uint32_t z_fuid_cnt; /* How many fuids in z_fuids */ - uint32_t z_domain_cnt; /* How many domains */ - size_t z_domain_str_sz; /* len of domain strings z_domain list */ + char **z_domain_table; /**< Used during replay */ + uint32_t z_fuid_cnt; /**< How many fuids in z_fuids */ + uint32_t z_domain_cnt; /**< How many domains */ + size_t z_domain_str_sz; /**< len of domain strings z_domain list */ } zfs_fuid_info_t; #ifdef _KERNEL diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_ioctl.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_ioctl.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_ioctl.h 2012-10-17 17:00:59.866590477 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_ioctl.h 2012-11-15 11:40:16.398963426 -0700 @@ -51,7 +51,7 @@ extern "C" { * send stream header. */ -/* +/** * Header types for zfs send streams. */ typedef enum drr_headertype { @@ -73,32 +73,15 @@ typedef enum drr_headertype { #define DMU_BACKUP_FEATURE_DEDUPPROPS (0x2) #define DMU_BACKUP_FEATURE_SA_SPILL (0x4) -/* +/** * Mask of all supported backup features */ #define DMU_BACKUP_FEATURE_MASK (DMU_BACKUP_FEATURE_DEDUP | \ DMU_BACKUP_FEATURE_DEDUPPROPS | DMU_BACKUP_FEATURE_SA_SPILL) -/* Are all features in the given flag word currently supported? */ +/** Are all features in the given flag word currently supported? */ #define DMU_STREAM_SUPPORTED(x) (!((x) & ~DMU_BACKUP_FEATURE_MASK)) -/* - * The drr_versioninfo field of the dmu_replay_record has the - * following layout: - * - * 64 56 48 40 32 24 16 8 0 - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * | reserved | feature-flags |C|S| - * +-------+-------+-------+-------+-------+-------+-------+-------+ - * - * The low order two bits indicate the header type: SUBSTREAM (0x1) - * or COMPOUNDSTREAM (0x2). Using two bits for this is historical: - * this field used to be a version number, where the two version types - * were 1 and 2. Using two bits for this allows earlier versions of - * the code to be able to recognize send streams that don't use any - * of the features indicated by feature flags. - */ - #define DMU_BACKUP_MAGIC 0x2F5bacbacULL #define DRR_FLAG_CLONE (1<<0) @@ -112,12 +95,32 @@ typedef enum drr_headertype { #define DRR_IS_DEDUP_CAPABLE(flags) ((flags) & DRR_CHECKSUM_DEDUP) -/* - * zfs ioctl command structure +/** + * \brief zfs ioctl command structure */ struct drr_begin { uint64_t drr_magic; - uint64_t drr_versioninfo; /* was drr_version */ + /** + * formerly named drr_version + * + * The drr_versioninfo field of the dmu_replay_record has the + * following layout: + * + \verbatim + 64 56 48 40 32 24 16 8 0 + +-------+-------+-------+-------+-------+-------+-------+-------+ + | reserved | feature-flags |C|S| + +-------+-------+-------+-------+-------+-------+-------+-------+ + \endverbatim + * + * The low order two bits indicate the header type: SUBSTREAM (0x1) + * or COMPOUNDSTREAM (0x2). Using two bits for this is historical: + * this field used to be a version number, where the two version types + * were 1 and 2. Using two bits for this allows earlier versions of + * the code to be able to recognize send streams that don't use any + * of the features indicated by feature flags. + */ + uint64_t drr_versioninfo; uint64_t drr_creation_time; dmu_objset_type_t drr_type; uint32_t drr_flags; @@ -131,6 +134,7 @@ struct drr_end { uint64_t drr_toguid; }; +/** bonus content follows drr-toguid */ struct drr_object { uint64_t drr_object; dmu_object_type_t drr_type; @@ -141,7 +145,6 @@ struct drr_object { uint8_t drr_compress; uint8_t drr_pad[6]; uint64_t drr_toguid; - /* bonus content follows */ }; struct drr_freeobjects { @@ -150,6 +153,7 @@ struct drr_freeobjects { uint64_t drr_toguid; }; +/** content follows drr_key */ struct drr_write { uint64_t drr_object; dmu_object_type_t drr_type; @@ -160,8 +164,7 @@ struct drr_write { uint8_t drr_checksumtype; uint8_t drr_checksumflags; uint8_t drr_pad2[6]; - ddt_key_t drr_key; /* deduplication key */ - /* content follows */ + ddt_key_t drr_key; /**< deduplication key */ }; struct drr_free { @@ -172,28 +175,37 @@ struct drr_free { }; struct drr_write_byref { - /* where to put the data */ + /** + * \name Where to put the data + * \{ */ uint64_t drr_object; uint64_t drr_offset; uint64_t drr_length; uint64_t drr_toguid; - /* where to find the prior copy of the data */ + /** + * \} + * \name Where to find the prior copy of the data + * \{ */ uint64_t drr_refguid; uint64_t drr_refobject; uint64_t drr_refoffset; - /* properties of the data */ + /** + * \} + * \name Properties of the data + * \{ */ uint8_t drr_checksumtype; uint8_t drr_checksumflags; uint8_t drr_pad2[6]; - ddt_key_t drr_key; /* deduplication key */ + ddt_key_t drr_key; /**< deduplication key */ + /** \} */ }; +/** spill data follows drr_pad */ struct drr_spill { uint64_t drr_object; uint64_t drr_length; uint64_t drr_toguid; - uint64_t drr_pad[4]; /* needed for crypto */ - /* spill data follows */ + uint64_t drr_pad[4]; /**< needed for crypto */ }; typedef struct dmu_replay_record { @@ -215,15 +227,15 @@ typedef struct dmu_replay_record { } drr_u; } dmu_replay_record_t; -/* diff record range types */ +/** \brief diff record range types */ typedef enum diff_type { DDR_NONE = 0x1, DDR_INUSE = 0x2, DDR_FREE = 0x4 } diff_type_t; -/* - * The diff reports back ranges of free or in-use objects. +/** + * \brief The diff reports back ranges of free or in-use objects. */ typedef struct dmu_diff_record { uint64_t ddr_type; @@ -255,11 +267,11 @@ typedef struct zinject_record { typedef struct zfs_share { uint64_t z_exportdata; uint64_t z_sharedata; - uint64_t z_sharetype; /* 0 = share, 1 = unshare */ - uint64_t z_sharemax; /* max length of share string */ + uint64_t z_sharetype; /**< 0 = share, 1 = unshare */ + uint64_t z_sharemax; /**< max length of share string */ } zfs_share_t; -/* +/** * ZFS file systems may behave the usual, POSIX-compliant way, where * name lookups are case-sensitive. They may also be set up so that * all the name lookups are case-insensitive, or so that only some @@ -277,20 +289,20 @@ typedef struct zfs_cmd { char zc_string[MAXNAMELEN]; char zc_top_ds[MAXPATHLEN]; uint64_t zc_guid; - uint64_t zc_nvlist_conf; /* really (char *) */ + uint64_t zc_nvlist_conf; /**< really (char *) */ uint64_t zc_nvlist_conf_size; - uint64_t zc_nvlist_src; /* really (char *) */ + uint64_t zc_nvlist_src; /**< really (char *) */ uint64_t zc_nvlist_src_size; - uint64_t zc_nvlist_dst; /* really (char *) */ + uint64_t zc_nvlist_dst; /**< really (char *) */ uint64_t zc_nvlist_dst_size; uint64_t zc_cookie; uint64_t zc_objset_type; uint64_t zc_perm_action; - uint64_t zc_history; /* really (char *) */ + uint64_t zc_history; /**< really (char *) */ uint64_t zc_history_len; uint64_t zc_history_offset; uint64_t zc_obj; - uint64_t zc_iflags; /* internal to zfs(7fs) */ + uint64_t zc_iflags; /**< internal to zfs(7fs) */ zfs_share_t zc_share; uint64_t zc_jailid; dmu_objset_stats_t zc_objset_stats; @@ -334,7 +346,7 @@ extern int zfs_secpolicy_destroy_perms(c extern int zfs_busy(void); extern int zfs_unmount_snap(const char *, void *); -/* +/** * ZFS minor numbers can refer to either a control device instance or * a zvol. Depending on the value of zss_type, zss_data points to either * a zvol_state_t or a zfs_onexit_t. diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_rlock.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_rlock.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_rlock.h 2012-10-17 17:00:59.867589885 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_rlock.h 2012-11-15 11:42:37.558455955 -0700 @@ -43,40 +43,44 @@ typedef enum { } rl_type_t; typedef struct rl { - znode_t *r_zp; /* znode this lock applies to */ - avl_node_t r_node; /* avl node link */ - uint64_t r_off; /* file range offset */ - uint64_t r_len; /* file range length */ - uint_t r_cnt; /* range reference count in tree */ - rl_type_t r_type; /* range type */ - kcondvar_t r_wr_cv; /* cv for waiting writers */ - kcondvar_t r_rd_cv; /* cv for waiting readers */ - uint8_t r_proxy; /* acting for original range */ - uint8_t r_write_wanted; /* writer wants to lock this range */ - uint8_t r_read_wanted; /* reader wants to lock this range */ + znode_t *r_zp; /**< znode this lock applies to */ + avl_node_t r_node; /**< avl node link */ + uint64_t r_off; /**< file range offset */ + uint64_t r_len; /**< file range length */ + uint_t r_cnt; /**< range reference count in tree */ + rl_type_t r_type; /**< range type */ + kcondvar_t r_wr_cv; /**< cv for waiting writers */ + kcondvar_t r_rd_cv; /**< cv for waiting readers */ + uint8_t r_proxy; /**< acting for original range */ + uint8_t r_write_wanted; /**< writer wants to lock this range */ + uint8_t r_read_wanted; /**< reader wants to lock this range */ } rl_t; -/* - * Lock a range (offset, length) as either shared (READER) - * or exclusive (WRITER or APPEND). APPEND is a special type that - * is converted to WRITER that specified to lock from the start of the - * end of file. zfs_range_lock() returns the range lock structure. +/** + * Lock an object range + * + * \param off Offset into the file that begins the range + * \param len Length of the range to lock + * \param type Either shared (RL_READER) or exclusive (RL_WRITER or + * RL_APPEND). APPEND is a special type that is converted + * to WRITER that specified to lock from the start of the + * end of file. + * + * \return The range lock structure for later unlocking or reduce range + * (if entire file previously locked as RL_WRITER). */ rl_t *zfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type); -/* - * Unlock range and destroy range lock structure. - */ void zfs_range_unlock(rl_t *rl); -/* - * Reduce range locked as RW_WRITER from whole file to specified range. - * Asserts the whole file was previously locked. +/** + * Unlock range and destroy range lock structure. */ void zfs_range_reduce(rl_t *rl, uint64_t off, uint64_t len); -/* - * AVL comparison function used to compare range locks +/** + * AVL comparison function used to order range locks + * Locks are ordered on the start offset of the range. */ int zfs_range_compare(const void *arg1, const void *arg2); diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_sa.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_sa.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_sa.h 2012-10-17 17:00:59.867589885 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_sa.h 2012-11-15 11:49:27.848456124 -0700 @@ -41,15 +41,16 @@ extern "C" { #endif -/* - * This is the list of known attributes - * to the ZPL. The values of the actual - * attributes are not defined by the order - * the enums. It is controlled by the attribute - * registration mechanism. Two different file system - * could have different numeric values for the same - * attributes. this list is only used for dereferencing - * into the table that will hold the actual numeric value. +/** + * List of attributes known to the ZPL. + * + * The values of the actual attributes are not defined + * by the order the enums. It is controlled by the + * attribute registration mechanism. Two different + * file systems could have different numeric values for + * the same attributes. This list is only used for + * dereferencing into the table that will hold the + * actual numeric value. */ typedef enum zpl_attr { ZPL_ATIME, @@ -89,39 +90,40 @@ typedef enum zpl_attr { extern sa_attr_reg_t zfs_attr_table[ZPL_END + 1]; extern sa_attr_reg_t zfs_legacy_attr_table[ZPL_END + 1]; -/* +/** * This is a deprecated data structure that only exists for * dealing with file systems create prior to ZPL version 5. + * + * Data may pad out any remaining bytes in the znode buffer, eg: + \verbatim + + |<---------------------- dnode_phys (512) ------------------------>| + |<-- dnode (192) --->|<----------- "bonus" buffer (320) ---------->| + |<---- znode (264) ---->|<---- data (56) ---->| + + \endverbatim + * At present, we use this space for the following: + * - symbolic links + * - 32-byte anti-virus scanstamp (regular files only) */ typedef struct znode_phys { - uint64_t zp_atime[2]; /* 0 - last file access time */ - uint64_t zp_mtime[2]; /* 16 - last file modification time */ - uint64_t zp_ctime[2]; /* 32 - last file change time */ - uint64_t zp_crtime[2]; /* 48 - creation time */ - uint64_t zp_gen; /* 64 - generation (txg of creation) */ - uint64_t zp_mode; /* 72 - file mode bits */ - uint64_t zp_size; /* 80 - size of file */ - uint64_t zp_parent; /* 88 - directory parent (`..') */ - uint64_t zp_links; /* 96 - number of links to file */ - uint64_t zp_xattr; /* 104 - DMU object for xattrs */ - uint64_t zp_rdev; /* 112 - dev_t for VBLK & VCHR files */ - uint64_t zp_flags; /* 120 - persistent flags */ - uint64_t zp_uid; /* 128 - file owner */ - uint64_t zp_gid; /* 136 - owning group */ - uint64_t zp_zap; /* 144 - extra attributes */ - uint64_t zp_pad[3]; /* 152 - future */ - zfs_acl_phys_t zp_acl; /* 176 - 263 ACL */ - /* - * Data may pad out any remaining bytes in the znode buffer, eg: - * - * |<---------------------- dnode_phys (512) ------------------------>| - * |<-- dnode (192) --->|<----------- "bonus" buffer (320) ---------->| - * |<---- znode (264) ---->|<---- data (56) ---->| - * - * At present, we use this space for the following: - * - symbolic links - * - 32-byte anti-virus scanstamp (regular files only) - */ + uint64_t zp_atime[2]; /**< 0 - last file access time */ + uint64_t zp_mtime[2]; /**< 16 - last file modification time */ + uint64_t zp_ctime[2]; /**< 32 - last file change time */ + uint64_t zp_crtime[2]; /**< 48 - creation time */ + uint64_t zp_gen; /**< 64 - generation (txg of creation) */ + uint64_t zp_mode; /**< 72 - file mode bits */ + uint64_t zp_size; /**< 80 - size of file */ + uint64_t zp_parent; /**< 88 - directory parent (`..') */ + uint64_t zp_links; /**< 96 - number of links to file */ + uint64_t zp_xattr; /**< 104 - DMU object for xattrs */ + uint64_t zp_rdev; /**< 112 - dev_t for VBLK & VCHR files */ + uint64_t zp_flags; /**< 120 - persistent flags */ + uint64_t zp_uid; /**< 128 - file owner */ + uint64_t zp_gid; /**< 136 - owning group */ + uint64_t zp_zap; /**< 144 - extra attributes */ + uint64_t zp_pad[3]; /**< 152 - future */ + zfs_acl_phys_t zp_acl; /**< 176 - 263 ACL */ } znode_phys_t; #ifdef _KERNEL diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_stat.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_stat.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_stat.h 2012-10-17 17:00:59.867589885 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_stat.h 2012-10-07 20:59:13.918590501 -0600 @@ -34,7 +34,7 @@ extern "C" { #endif -/* +/** * A limited number of zpl level stats are retrievable * with an ioctl. zfs diff is the current consumer. */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_vfsops.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_vfsops.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_vfsops.h 2012-10-17 17:00:59.867589885 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_vfsops.h 2012-10-07 20:59:13.918590501 -0600 @@ -42,51 +42,53 @@ typedef struct zfsvfs zfsvfs_t; struct znode; struct zfsvfs { - vfs_t *z_vfs; /* generic fs struct */ - zfsvfs_t *z_parent; /* parent fs */ - objset_t *z_os; /* objset reference */ - uint64_t z_root; /* id of root znode */ - uint64_t z_unlinkedobj; /* id of unlinked zapobj */ - uint64_t z_max_blksz; /* maximum block size for files */ - uint64_t z_fuid_obj; /* fuid table object number */ - uint64_t z_fuid_size; /* fuid table size */ - avl_tree_t z_fuid_idx; /* fuid tree keyed by index */ - avl_tree_t z_fuid_domain; /* fuid tree keyed by domain */ - krwlock_t z_fuid_lock; /* fuid lock */ - boolean_t z_fuid_loaded; /* fuid tables are loaded */ - boolean_t z_fuid_dirty; /* need to sync fuid table ? */ - struct zfs_fuid_info *z_fuid_replay; /* fuid info for replay */ - zilog_t *z_log; /* intent log pointer */ - uint_t z_acl_mode; /* acl chmod/mode behavior */ - uint_t z_acl_inherit; /* acl inheritance behavior */ - zfs_case_t z_case; /* case-sense */ - boolean_t z_utf8; /* utf8-only */ - int z_norm; /* normalization flags */ - boolean_t z_atime; /* enable atimes mount option */ - boolean_t z_unmounted; /* unmounted */ + vfs_t *z_vfs; /**< generic fs struct */ + zfsvfs_t *z_parent; /**< parent fs */ + objset_t *z_os; /**< objset reference */ + uint64_t z_root; /**< id of root znode */ + uint64_t z_unlinkedobj; /**< id of unlinked zapobj */ + uint64_t z_max_blksz; /**< maximum block size for files */ + uint64_t z_fuid_obj; /**< fuid table object number */ + uint64_t z_fuid_size; /**< fuid table size */ + avl_tree_t z_fuid_idx; /**< fuid tree keyed by index */ + avl_tree_t z_fuid_domain; /**< fuid tree keyed by domain */ + krwlock_t z_fuid_lock; /**< fuid lock */ + boolean_t z_fuid_loaded; /**< fuid tables are loaded */ + boolean_t z_fuid_dirty; /**< need to sync fuid table ? */ + struct zfs_fuid_info *z_fuid_replay; /**< fuid info for replay */ + zilog_t *z_log; /**< intent log pointer */ + uint_t z_acl_mode; /**< acl chmod/mode behavior */ + uint_t z_acl_inherit; /**< acl inheritance behavior */ + zfs_case_t z_case; /**< case-sense */ + boolean_t z_utf8; /**< utf8-only */ + int z_norm; /**< normalization flags */ + boolean_t z_atime; /**< enable atimes mount option */ + boolean_t z_unmounted; /**< unmounted */ rrwlock_t z_teardown_lock; krwlock_t z_teardown_inactive_lock; - list_t z_all_znodes; /* all vnodes in the fs */ - kmutex_t z_znodes_lock; /* lock for z_all_znodes */ - vnode_t *z_ctldir; /* .zfs directory pointer */ - boolean_t z_show_ctldir; /* expose .zfs in the root dir */ - boolean_t z_issnap; /* true if this is a snapshot */ - boolean_t z_vscan; /* virus scan on/off */ - boolean_t z_use_fuids; /* version allows fuids */ - boolean_t z_replay; /* set during ZIL replay */ - boolean_t z_use_sa; /* version allow system attributes */ - uint64_t z_version; /* ZPL version */ - uint64_t z_shares_dir; /* hidden shares dir */ + list_t z_all_znodes; /**< all vnodes in the fs */ + kmutex_t z_znodes_lock; /**< lock for z_all_znodes */ + vnode_t *z_ctldir; /**< .zfs directory pointer */ + boolean_t z_show_ctldir; /**< expose .zfs in the root dir */ + boolean_t z_issnap; /**< true if this is a snapshot */ + boolean_t z_vscan; /**< virus scan on/off */ + boolean_t z_use_fuids; /**< version allows fuids */ + boolean_t z_replay; /**< set during ZIL replay */ + boolean_t z_use_sa; /**< version allow system attributes */ + uint64_t z_version; /**< ZPL version */ + uint64_t z_shares_dir; /**< hidden shares dir */ kmutex_t z_lock; uint64_t z_userquota_obj; uint64_t z_groupquota_obj; - uint64_t z_replay_eof; /* New end of file - replay only */ - sa_attr_type_t *z_attr_table; /* SA attr mapping->id */ + uint64_t z_replay_eof; /**< New end of file - replay only */ + sa_attr_type_t *z_attr_table; /**< SA attr mapping->id */ #define ZFS_OBJ_MTX_SZ 64 - kmutex_t z_hold_mtx[ZFS_OBJ_MTX_SZ]; /* znode hold locks */ + kmutex_t z_hold_mtx[ZFS_OBJ_MTX_SZ]; /**< znode hold locks */ }; -/* +/** + * \brief File IDs for normal filesystems + * * Normal filesystems (those not under .zfs/snapshot) have a total * file ID size limited to 12 bytes (including the length field) due to * NFSv2 protocol's limitation of 32 bytes for a filehandle. For historical @@ -105,11 +107,13 @@ struct zfsvfs { */ typedef struct zfid_short { uint16_t zf_len; - uint8_t zf_object[6]; /* obj[i] = obj >> (8 * i) */ - uint8_t zf_gen[4]; /* gen[i] = gen >> (8 * i) */ + uint8_t zf_object[6]; /**< obj[i] = obj >> (8 * i) */ + uint8_t zf_gen[4]; /**< gen[i] = gen >> (8 * i) */ } zfid_short_t; -/* +/** + * \brief File IDs for snapshot filesystems + * * Filesystems under .zfs/snapshot have a total file ID size of 22 bytes * (including the length field). This makes files under .zfs/snapshot * accessible by NFSv3 and NFSv4, but not NFSv2. @@ -127,8 +131,8 @@ typedef struct zfid_short { */ typedef struct zfid_long { zfid_short_t z_fid; - uint8_t zf_setid[6]; /* obj[i] = obj >> (8 * i) */ - uint8_t zf_setgen[2]; /* gen[i] = gen >> (8 * i) */ + uint8_t zf_setid[6]; /**< obj[i] = obj >> (8 * i) */ + uint8_t zf_setgen[2]; /**< gen[i] = gen >> (8 * i) */ } zfid_long_t; #define SHORT_FID_LEN (sizeof (zfid_short_t) - sizeof (uint16_t)) diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_znode.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_znode.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_znode.h 2012-10-17 17:00:59.868590444 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zfs_znode.h 2012-11-15 11:51:53.449454830 -0700 @@ -41,9 +41,10 @@ extern "C" { #endif -/* - * Additional file level attributes, that are stored - * in the upper half of zp_flags +/** + * \name Additional file level attributes + * Stored in the upper half of zp_flags + * \{ */ #define ZFS_READONLY 0x0000000100000000 #define ZFS_HIDDEN 0x0000000200000000 @@ -59,6 +60,7 @@ extern "C" { #define ZFS_REPARSE 0x0000080000000000 #define ZFS_OFFLINE 0x0000100000000000 #define ZFS_SPARSE 0x0000200000000000 +/** \} */ #define ZFS_ATTR_SET(zp, attr, value, pflags, tx) \ { \ @@ -70,18 +72,20 @@ extern "C" { &pflags, sizeof (pflags), tx)); \ } -/* - * Define special zfs pflags - */ -#define ZFS_XATTR 0x1 /* is an extended attribute */ -#define ZFS_INHERIT_ACE 0x2 /* ace has inheritable ACEs */ -#define ZFS_ACL_TRIVIAL 0x4 /* files ACL is trivial */ -#define ZFS_ACL_OBJ_ACE 0x8 /* ACL has CMPLX Object ACE */ -#define ZFS_ACL_PROTECTED 0x10 /* ACL protected */ -#define ZFS_ACL_DEFAULTED 0x20 /* ACL should be defaulted */ -#define ZFS_ACL_AUTO_INHERIT 0x40 /* ACL should be inherited */ -#define ZFS_BONUS_SCANSTAMP 0x80 /* Scanstamp in bonus area */ -#define ZFS_NO_EXECS_DENIED 0x100 /* exec was given to everyone */ +/** + * \name Special zfs pflags + * \{ + */ +#define ZFS_XATTR 0x1 /**< is an extended attribute */ +#define ZFS_INHERIT_ACE 0x2 /**< ace has inheritable ACEs */ +#define ZFS_ACL_TRIVIAL 0x4 /**< files ACL is trivial */ +#define ZFS_ACL_OBJ_ACE 0x8 /**< ACL has CMPLX Object ACE */ +#define ZFS_ACL_PROTECTED 0x10 /**< ACL protected */ +#define ZFS_ACL_DEFAULTED 0x20 /**< ACL should be defaulted */ +#define ZFS_ACL_AUTO_INHERIT 0x40 /**< ACL should be inherited */ +#define ZFS_BONUS_SCANSTAMP 0x80 /**< Scanstamp in bonus area */ +#define ZFS_NO_EXECS_DENIED 0x100 /**< exec was given to everyone */ +/* \} */ #define SA_ZPL_ATIME(z) z->z_attr_table[ZPL_ATIME] #define SA_ZPL_MTIME(z) z->z_attr_table[ZPL_MTIME] @@ -104,12 +108,12 @@ extern "C" { #define SA_ZPL_ZNODE_ACL(z) z->z_attr_table[ZPL_ZNODE_ACL] #define SA_ZPL_PAD(z) z->z_attr_table[ZPL_PAD] -/* +/** * Is ID ephemeral? */ #define IS_EPHEMERAL(x) (x > MAXUID) -/* +/** * Should we use FUIDs? */ #define USE_FUIDS(version, os) (version >= ZPL_VERSION_FUID && \ @@ -119,10 +123,11 @@ extern "C" { #define MASTER_NODE_OBJ 1 -/* - * Special attributes for master node. +/** + * \name Special attributes for master node. * "userquota@" and "groupquota@" are also valid (from * zfs_userquota_prop_prefixes[]). + * \{ */ #define ZFS_FSID "FSID" #define ZFS_UNLINKED_SET "DELETE_QUEUE" @@ -131,11 +136,12 @@ extern "C" { #define ZFS_FUID_TABLES "FUID" #define ZFS_SHARES_DIR "SHARES" #define ZFS_SA_ATTRS "SA_ATTRS" - +/** \} */ #define ZFS_MAX_BLOCKSIZE (SPA_MAXBLOCKSIZE) -/* Path component length */ -/* +/** + * Path component length + * * The generic fs code uses MAXNAMELEN to represent * what the largest component length is. Unfortunately, * this length includes the terminating NULL. ZFS needs @@ -144,7 +150,7 @@ extern "C" { */ #define ZFS_MAXNAMELEN (MAXNAMELEN - 1) -/* +/** * Convert mode bits (zp_mode) to BSD-style DT_* values for storing in * the directory entries. */ @@ -152,78 +158,85 @@ extern "C" { #define IFTODT(mode) (((mode) & S_IFMT) >> 12) #endif -/* +/** + * \name Directory Entries + * * The directory entry has the type (currently unused on Solaris) in the * top 4 bits, and the object number in the low 48 bits. The "middle" * 12 bits are unused. + * /{ */ #define ZFS_DIRENT_TYPE(de) BF64_GET(de, 60, 4) #define ZFS_DIRENT_OBJ(de) BF64_GET(de, 0, 48) +/** \} */ -/* +#ifdef _KERNEL +/** + * Directory Entry Locks + * * Directory entry locks control access to directory entries. * They are used to protect creates, deletes, and renames. * Each directory znode has a mutex and a list of locked names. */ -#ifdef _KERNEL typedef struct zfs_dirlock { - char *dl_name; /* directory entry being locked */ - uint32_t dl_sharecnt; /* 0 if exclusive, > 0 if shared */ - uint8_t dl_namelock; /* 1 if z_name_lock is NOT held */ - uint16_t dl_namesize; /* set if dl_name was allocated */ - kcondvar_t dl_cv; /* wait for entry to be unlocked */ - struct znode *dl_dzp; /* directory znode */ - struct zfs_dirlock *dl_next; /* next in z_dirlocks list */ + char *dl_name; /**< directory entry being locked */ + uint32_t dl_sharecnt; /**< 0 if exclusive, > 0 if shared */ + uint8_t dl_namelock; /**< 1 if z_name_lock is NOT held */ + uint16_t dl_namesize; /**< set if dl_name was allocated */ + kcondvar_t dl_cv; /**< wait for entry to be unlocked */ + struct znode *dl_dzp; /**< directory znode */ + struct zfs_dirlock *dl_next; /**< next in z_dirlocks list */ } zfs_dirlock_t; typedef struct znode { struct zfsvfs *z_zfsvfs; vnode_t *z_vnode; - uint64_t z_id; /* object ID for this znode */ - kmutex_t z_lock; /* znode modification lock */ - krwlock_t z_parent_lock; /* parent lock for directories */ - krwlock_t z_name_lock; /* "master" lock for dirent locks */ - zfs_dirlock_t *z_dirlocks; /* directory entry lock list */ - kmutex_t z_range_lock; /* protects changes to z_range_avl */ - avl_tree_t z_range_avl; /* avl tree of file range locks */ - uint8_t z_unlinked; /* file has been unlinked */ - uint8_t z_atime_dirty; /* atime needs to be synced */ - uint8_t z_zn_prefetch; /* Prefetch znodes? */ - uint8_t z_moved; /* Has this znode been moved? */ - uint_t z_blksz; /* block size in bytes */ - uint_t z_seq; /* modification sequence number */ - uint64_t z_mapcnt; /* number of pages mapped to file */ - uint64_t z_gen; /* generation (cached) */ - uint64_t z_size; /* file size (cached) */ - uint64_t z_atime[2]; /* atime (cached) */ - uint64_t z_links; /* file links (cached) */ - uint64_t z_pflags; /* pflags (cached) */ - uint64_t z_uid; /* uid fuid (cached) */ - uint64_t z_gid; /* gid fuid (cached) */ - mode_t z_mode; /* mode (cached) */ - uint32_t z_sync_cnt; /* synchronous open count */ - kmutex_t z_acl_lock; /* acl data lock */ - zfs_acl_t *z_acl_cached; /* cached acl */ - list_node_t z_link_node; /* all znodes in fs link */ - sa_handle_t *z_sa_hdl; /* handle to sa data */ - boolean_t z_is_sa; /* are we native sa? */ - /* FreeBSD-specific field. */ + uint64_t z_id; /**< object ID for this znode */ + kmutex_t z_lock; /**< znode modification lock */ + krwlock_t z_parent_lock; /**< parent lock for directories */ + krwlock_t z_name_lock; /**< "master" lock for dirent locks */ + zfs_dirlock_t *z_dirlocks; /**< directory entry lock list */ + kmutex_t z_range_lock; /**< protects changes to z_range_avl */ + avl_tree_t z_range_avl; /**< avl tree of file range locks */ + uint8_t z_unlinked; /**< file has been unlinked */ + uint8_t z_atime_dirty; /**< atime needs to be synced */ + uint8_t z_zn_prefetch; /**< Prefetch znodes? */ + uint8_t z_moved; /**< Has this znode been moved? */ + uint_t z_blksz; /**< block size in bytes */ + uint_t z_seq; /**< modification sequence number */ + uint64_t z_mapcnt; /**< number of pages mapped to file */ + uint64_t z_gen; /**< generation (cached) */ + uint64_t z_size; /**< file size (cached) */ + uint64_t z_atime[2]; /**< atime (cached) */ + uint64_t z_links; /**< file links (cached) */ + uint64_t z_pflags; /**< pflags (cached) */ + uint64_t z_uid; /**< uid fuid (cached) */ + uint64_t z_gid; /**< gid fuid (cached) */ + mode_t z_mode; /**< mode (cached) */ + uint32_t z_sync_cnt; /**< synchronous open count */ + kmutex_t z_acl_lock; /**< acl data lock */ + zfs_acl_t *z_acl_cached; /**< cached acl */ + list_node_t z_link_node; /**< all znodes in fs link */ + sa_handle_t *z_sa_hdl; /**< handle to sa data */ + boolean_t z_is_sa; /**< are we native sa? */ + /** FreeBSD-specific field. */ struct task z_task; } znode_t; -/* - * Range locking rules - * -------------------- - * 1. When truncating a file (zfs_create, zfs_setattr, zfs_space) the whole +/** + * \file zfs_znode.h + * + *

Range locking rules

+ * -# When truncating a file (zfs_create, zfs_setattr, zfs_space) the whole * file range needs to be locked as RL_WRITER. Only then can the pages be * freed etc and zp_size reset. zp_size must be set within range lock. - * 2. For writes and punching holes (zfs_write & zfs_space) just the range + * -# For writes and punching holes (zfs_write & zfs_space) just the range * being written or freed needs to be locked as RL_WRITER. * Multiple writes at the end of the file must coordinate zp_size updates * to ensure data isn't lost. A compare and swap loop is currently used * to ensure the file size is at least the offset last written. - * 3. For reads (zfs_read, zfs_get_data & zfs_putapage) just the range being + * -# For reads (zfs_read, zfs_get_data & zfs_putapage) just the range being * read needs to be locked as RL_READER. A check against zp_size can then * be made for reading beyond end of file. */ @@ -253,12 +266,7 @@ VTOZ(vnode_t *vp) #define VTOZ(VP) ((znode_t *)(VP)->v_data) #endif -/* - * ZFS_ENTER() is called on entry to each ZFS vnode and vfs operation. - * ZFS_ENTER_NOERROR() is called when we can't return EIO. - * ZFS_EXIT() must be called before exitting the vop. - * ZFS_VERIFY_ZP() verifies the znode is valid. - */ +/** Called on entry to each ZFS vnode and vfs operation */ #define ZFS_ENTER(zfsvfs) \ { \ rrw_enter(&(zfsvfs)->z_teardown_lock, RW_READER, FTAG); \ @@ -268,19 +276,23 @@ VTOZ(vnode_t *vp) } \ } +/** Called when we can't return EIO. */ #define ZFS_ENTER_NOERROR(zfsvfs) \ rrw_enter(&(zfsvfs)->z_teardown_lock, RW_READER, FTAG) +/** Must be called before exitting the vop */ #define ZFS_EXIT(zfsvfs) rrw_exit(&(zfsvfs)->z_teardown_lock, FTAG) +/** Verifies the znode is valid */ #define ZFS_VERIFY_ZP(zp) \ if ((zp)->z_sa_hdl == NULL) { \ ZFS_EXIT((zp)->z_zfsvfs); \ return (EIO); \ } \ -/* - * Macros for dealing with dmu_buf_hold +/** + * \name Macros for dealing with dmu_buf_hold + * \{ */ #define ZFS_OBJ_HASH(obj_num) ((obj_num) & (ZFS_OBJ_MTX_SZ - 1)) #define ZFS_OBJ_MUTEX(zfsvfs, obj_num) \ @@ -291,9 +303,10 @@ VTOZ(vnode_t *vp) mutex_tryenter(ZFS_OBJ_MUTEX((zfsvfs), (obj_num))) #define ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num) \ mutex_exit(ZFS_OBJ_MUTEX((zfsvfs), (obj_num))) +/** \} */ -/* - * Macros to encode/decode ZFS stored time values from/to struct timespec +/** + * Encode ZFS stored time value from a struct timespec */ #define ZFS_TIME_ENCODE(tp, stmp) \ { \ @@ -301,18 +314,23 @@ VTOZ(vnode_t *vp) (stmp)[1] = (uint64_t)(tp)->tv_nsec; \ } +/** + * Decode ZFS stored time value to a struct timespec + */ #define ZFS_TIME_DECODE(tp, stmp) \ { \ (tp)->tv_sec = (time_t)(stmp)[0]; \ (tp)->tv_nsec = (long)(stmp)[1]; \ } -/* - * Timestamp defines +/** + * \name Timestamp defines + * \{ */ #define ACCESSED (AT_ATIME) #define STATE_CHANGED (AT_CTIME) #define CONTENT_MODIFIED (AT_MTIME | AT_CTIME) +/** \} */ #define ZFS_ACCESSTIME_STAMP(zfsvfs, zp) \ if ((zfsvfs)->z_atime && !((zfsvfs)->z_vfs->vfs_flag & VFS_RDONLY)) \ @@ -347,7 +365,7 @@ extern int zfs_log_create_txtype(zil_cre vattr_t *vap); extern void zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, znode_t *dzp, char *name, uint64_t foid); -#define ZFS_NO_OBJECT 0 /* no object id */ +#define ZFS_NO_OBJECT 0 /**< no object id */ extern void zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, znode_t *dzp, znode_t *zp, char *name); extern void zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zil.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zil.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zil.h 2012-11-16 11:07:22.182456015 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zil.h 2012-11-15 18:17:57.219456173 -0700 @@ -37,8 +37,9 @@ extern "C" { #endif -/* - * Intent log format: +/** + * \file zil.h + * Intent log format * * Each objset has its own intent log. The log header (zil_header_t) * for objset N's intent log is kept in the Nth object of the SPA's @@ -49,28 +50,32 @@ extern "C" { * with a common structure that defines the type, length, and txg. */ -/* - * Intent log header - this on disk structure holds fields to manage - * the log. All fields are 64 bit to easily handle cross architectures. +/** + * Intent log header + * + * This on disk structure holds fields to manage the log. All fields are 64 + * bit to easily handle cross architectures. */ typedef struct zil_header { - uint64_t zh_claim_txg; /* txg in which log blocks were claimed */ - uint64_t zh_replay_seq; /* highest replayed sequence number */ - blkptr_t zh_log; /* log chain */ - uint64_t zh_claim_blk_seq; /* highest claimed block sequence number */ - uint64_t zh_flags; /* header flags */ - uint64_t zh_claim_lr_seq; /* highest claimed lr sequence number */ + uint64_t zh_claim_txg; /**< txg in which log blocks were claimed */ + uint64_t zh_replay_seq; /**< highest replayed sequence number */ + blkptr_t zh_log; /**< log chain */ + uint64_t zh_claim_blk_seq; /**< highest claimed block sequence number */ + uint64_t zh_flags; /**< header flags */ + uint64_t zh_claim_lr_seq; /**< highest claimed lr sequence number */ uint64_t zh_pad[3]; } zil_header_t; -/* - * zh_flags bit settings - */ -#define ZIL_REPLAY_NEEDED 0x1 /* replay needed - internal only */ -#define ZIL_CLAIM_LR_SEQ_VALID 0x2 /* zh_claim_lr_seq field is valid */ +/** + * \name zh_flags bit settings + * \{ + */ +#define ZIL_REPLAY_NEEDED 0x1 /**< replay needed - internal only */ +#define ZIL_CLAIM_LR_SEQ_VALID 0x2 /**< zh_claim_lr_seq field is valid */ +/** \} */ -/* - * Log block chaining. +/** + * Log block chaining * * Log blocks are chained together. Originally they were chained at the * end of the block. For performance reasons the chain was moved to the @@ -84,21 +89,23 @@ typedef struct zil_header { */ typedef struct zil_chain { uint64_t zc_pad; - blkptr_t zc_next_blk; /* next block in chain */ - uint64_t zc_nused; /* bytes in log block used */ - zio_eck_t zc_eck; /* block trailer */ + blkptr_t zc_next_blk; /**< next block in chain */ + uint64_t zc_nused; /**< bytes in log block used */ + zio_eck_t zc_eck; /**< block trailer */ } zil_chain_t; #define ZIL_MIN_BLKSZ 4096ULL #define ZIL_MAX_BLKSZ SPA_MAXBLOCKSIZE -/* - * The words of a log block checksum. +/** + * \name The words of a log block checksum. + * \{ */ #define ZIL_ZC_GUID_0 0 #define ZIL_ZC_GUID_1 1 #define ZIL_ZC_OBJSET 2 #define ZIL_ZC_SEQ 3 +/** \} */ typedef enum zil_create { Z_FILE, @@ -106,8 +113,9 @@ typedef enum zil_create { Z_XATTRDIR, } zil_create_t; -/* +/** * size of xvattr log section. + * * its composed of lr_attr_t + xvattr bitmap + 2 64 bit timestamps * for create time and a single 64 bit integer for all of the attributes, * and 4 64 bit integers (32 bytes) for the scanstamp. @@ -118,46 +126,48 @@ typedef enum zil_create { sizeof (lr_attr_t) + (sizeof (uint32_t) * (mapsize - 1)) + \ (sizeof (uint64_t) * 7) -/* +/** * Size of ACL in log. The ACE data is padded out to properly align * on 8 byte boundary. */ #define ZIL_ACE_LENGTH(x) (roundup(x, sizeof (uint64_t))) -/* - * Intent log transaction types and record structures - */ -#define TX_CREATE 1 /* Create file */ -#define TX_MKDIR 2 /* Make directory */ -#define TX_MKXATTR 3 /* Make XATTR directory */ -#define TX_SYMLINK 4 /* Create symbolic link to a file */ -#define TX_REMOVE 5 /* Remove file */ -#define TX_RMDIR 6 /* Remove directory */ -#define TX_LINK 7 /* Create hard link to a file */ -#define TX_RENAME 8 /* Rename a file */ -#define TX_WRITE 9 /* File write */ -#define TX_TRUNCATE 10 /* Truncate a file */ -#define TX_SETATTR 11 /* Set file attributes */ -#define TX_ACL_V0 12 /* Set old formatted ACL */ -#define TX_ACL 13 /* Set ACL */ -#define TX_CREATE_ACL 14 /* create with ACL */ -#define TX_CREATE_ATTR 15 /* create + attrs */ -#define TX_CREATE_ACL_ATTR 16 /* create with ACL + attrs */ -#define TX_MKDIR_ACL 17 /* mkdir with ACL */ -#define TX_MKDIR_ATTR 18 /* mkdir with attr */ -#define TX_MKDIR_ACL_ATTR 19 /* mkdir with ACL + attrs */ -#define TX_WRITE2 20 /* dmu_sync EALREADY write */ -#define TX_MAX_TYPE 21 /* Max transaction type */ +/** + * \name Intent log transaction types and record structures + * \{ + */ +#define TX_CREATE 1 /**< Create file */ +#define TX_MKDIR 2 /**< Make directory */ +#define TX_MKXATTR 3 /**< Make XATTR directory */ +#define TX_SYMLINK 4 /**< Create symbolic link to a file */ +#define TX_REMOVE 5 /**< Remove file */ +#define TX_RMDIR 6 /**< Remove directory */ +#define TX_LINK 7 /**< Create hard link to a file */ +#define TX_RENAME 8 /**< Rename a file */ +#define TX_WRITE 9 /**< File write */ +#define TX_TRUNCATE 10 /**< Truncate a file */ +#define TX_SETATTR 11 /**< Set file attributes */ +#define TX_ACL_V0 12 /**< Set old formatted ACL */ +#define TX_ACL 13 /**< Set ACL */ +#define TX_CREATE_ACL 14 /**< create with ACL */ +#define TX_CREATE_ATTR 15 /**< create + attrs */ +#define TX_CREATE_ACL_ATTR 16 /**< create with ACL + attrs */ +#define TX_MKDIR_ACL 17 /**< mkdir with ACL */ +#define TX_MKDIR_ATTR 18 /**< mkdir with attr */ +#define TX_MKDIR_ACL_ATTR 19 /**< mkdir with ACL + attrs */ +#define TX_WRITE2 20 /**< dmu_sync EALREADY write */ +#define TX_MAX_TYPE 21 /**< Max transaction type */ +/** \} */ -/* +/** * The transactions for mkdir, symlink, remove, rmdir, link, and rename * may have the following bit set, indicating the original request * specified case-insensitive handling of names. */ #define TX_CI ((uint64_t)0x1 << 63) /* case-insensitive behavior requested */ -/* +/** * Transactions for write, truncate, setattr, acl_v0, and acl can be logged * out of order. For convenience in the code, all such records must have * lr_foid at the same offset. @@ -170,7 +180,7 @@ typedef enum zil_create { (txtype) == TX_ACL || \ (txtype) == TX_WRITE2) -/* +/** * Format of log records. * The fields are carefully defined to allow them to be aligned * and sized the same on sparc & intel architectures. @@ -179,22 +189,22 @@ typedef enum zil_create { * The log record on disk (lrc_seq) holds the sequence number of all log * records which is used to ensure we don't replay the same record. */ -typedef struct { /* common log record header */ - uint64_t lrc_txtype; /* intent log transaction type */ - uint64_t lrc_reclen; /* transaction record length */ - uint64_t lrc_txg; /* dmu transaction group number */ - uint64_t lrc_seq; /* see comment above */ +typedef struct { /**< common log record header */ + uint64_t lrc_txtype; /**< intent log transaction type */ + uint64_t lrc_reclen; /**< transaction record length */ + uint64_t lrc_txg; /**< dmu transaction group number */ + uint64_t lrc_seq; /**< see comment above */ } lr_t; -/* +/** * Common start of all out-of-order record types (TX_OOO() above). */ typedef struct { - lr_t lr_common; /* common portion of log record */ - uint64_t lr_foid; /* object id */ + lr_t lr_common; /**< common portion of log record */ + uint64_t lr_foid; /**< object id */ } lr_ooo_t; -/* +/** * Handle option extended vattr attributes. * * Whenever new attributes are added the version number @@ -202,25 +212,25 @@ typedef struct { * zfs_log.c and zfs_replay.c */ typedef struct { - uint32_t lr_attr_masksize; /* number of elements in array */ - uint32_t lr_attr_bitmap; /* First entry of array */ + uint32_t lr_attr_masksize; /**< number of elements in array */ + uint32_t lr_attr_bitmap; /**< First entry of array */ /* remainder of array and any additional fields */ } lr_attr_t; -/* +/** * log record for creates without optional ACL. * This log record does support optional xvattr_t attributes. */ typedef struct { - lr_t lr_common; /* common portion of log record */ - uint64_t lr_doid; /* object id of directory */ - uint64_t lr_foid; /* object id of created file object */ - uint64_t lr_mode; /* mode of object */ - uint64_t lr_uid; /* uid of object */ - uint64_t lr_gid; /* gid of object */ - uint64_t lr_gen; /* generation (txg of creation) */ - uint64_t lr_crtime[2]; /* creation time */ - uint64_t lr_rdev; /* rdev of object to create */ + lr_t lr_common; /**< common portion of log record */ + uint64_t lr_doid; /**< object id of directory */ + uint64_t lr_foid; /**< object id of created file object */ + uint64_t lr_mode; /**< mode of object */ + uint64_t lr_uid; /**< uid of object */ + uint64_t lr_gid; /**< gid of object */ + uint64_t lr_gen; /**< generation (txg of creation) */ + uint64_t lr_crtime[2]; /**< creation time */ + uint64_t lr_rdev; /**< rdev of object to create */ /* name of object to create follows this */ /* for symlinks, link content follows name */ /* for creates with xvattr data, the name follows the xvattr info */ @@ -236,105 +246,103 @@ typedef struct { * since it may not be available). */ -/* +/** * Log record for creates with optional ACL * This log record is also used for recording any FUID * information needed for replaying the create. If the * file doesn't have any actual ACEs then the lr_aclcnt * would be zero. + * + * After lr_acl_flags, there are a lr_acl_bytes number of variable sized ace's. + * if create is also setting xvattr's, then acl data follows xvattr if ACE + * FUIDs are needed then they will follow the xvattr_t Following the FUIDs will + * be the domain table information. The FUIDs for the owner and group will be + * in the lr_create portion of the record. name follows ACL data */ typedef struct { - lr_create_t lr_create; /* common create portion */ - uint64_t lr_aclcnt; /* number of ACEs in ACL */ - uint64_t lr_domcnt; /* number of unique domains */ - uint64_t lr_fuidcnt; /* number of real fuids */ - uint64_t lr_acl_bytes; /* number of bytes in ACL */ - uint64_t lr_acl_flags; /* ACL flags */ - /* lr_acl_bytes number of variable sized ace's follows */ - /* if create is also setting xvattr's, then acl data follows xvattr */ - /* if ACE FUIDs are needed then they will follow the xvattr_t */ - /* Following the FUIDs will be the domain table information. */ - /* The FUIDs for the owner and group will be in the lr_create */ - /* portion of the record. */ - /* name follows ACL data */ + lr_create_t lr_create; /**< common create portion */ + uint64_t lr_aclcnt; /**< number of ACEs in ACL */ + uint64_t lr_domcnt; /**< number of unique domains */ + uint64_t lr_fuidcnt; /**< number of real fuids */ + uint64_t lr_acl_bytes; /**< number of bytes in ACL */ + uint64_t lr_acl_flags; /**< ACL flags */ } lr_acl_create_t; +/** \note name of object to remove follows lr_doid */ typedef struct { - lr_t lr_common; /* common portion of log record */ - uint64_t lr_doid; /* obj id of directory */ - /* name of object to remove follows this */ + lr_t lr_common; /**< common portion of log record */ + uint64_t lr_doid; /**< obj id of directory */ } lr_remove_t; +/** \note name of object to link follows lr_link_obj */ typedef struct { - lr_t lr_common; /* common portion of log record */ - uint64_t lr_doid; /* obj id of directory */ - uint64_t lr_link_obj; /* obj id of link */ - /* name of object to link follows this */ + lr_t lr_common; /**< common portion of log record */ + uint64_t lr_doid; /**< obj id of directory */ + uint64_t lr_link_obj; /**< obj id of link */ } lr_link_t; +/** \note 2 strings: names of source and destination follow lr_tdoid */ typedef struct { - lr_t lr_common; /* common portion of log record */ - uint64_t lr_sdoid; /* obj id of source directory */ - uint64_t lr_tdoid; /* obj id of target directory */ - /* 2 strings: names of source and destination follow this */ + lr_t lr_common; /**< common portion of log record */ + uint64_t lr_sdoid; /**< obj id of source directory */ + uint64_t lr_tdoid; /**< obj id of target directory */ } lr_rename_t; +/** \note write data will follow lr_blkptr for small writes */ typedef struct { - lr_t lr_common; /* common portion of log record */ - uint64_t lr_foid; /* file object to write */ - uint64_t lr_offset; /* offset to write to */ - uint64_t lr_length; /* user data length to write */ - uint64_t lr_blkoff; /* no longer used */ - blkptr_t lr_blkptr; /* spa block pointer for replay */ - /* write data will follow for small writes */ + lr_t lr_common; /**< common portion of log record */ + uint64_t lr_foid; /**< file object to write */ + uint64_t lr_offset; /**< offset to write to */ + uint64_t lr_length; /**< user data length to write */ + uint64_t lr_blkoff; /**< no longer used */ + blkptr_t lr_blkptr; /**< spa block pointer for replay */ } lr_write_t; typedef struct { - lr_t lr_common; /* common portion of log record */ - uint64_t lr_foid; /* object id of file to truncate */ - uint64_t lr_offset; /* offset to truncate from */ - uint64_t lr_length; /* length to truncate */ + lr_t lr_common; /**< common portion of log record */ + uint64_t lr_foid; /**< object id of file to truncate */ + uint64_t lr_offset; /**< offset to truncate from */ + uint64_t lr_length; /**< length to truncate */ } lr_truncate_t; +/** \note optional attribute lr_attr_t may follor lr_mtime */ typedef struct { - lr_t lr_common; /* common portion of log record */ - uint64_t lr_foid; /* file object to change attributes */ - uint64_t lr_mask; /* mask of attributes to set */ - uint64_t lr_mode; /* mode to set */ - uint64_t lr_uid; /* uid to set */ - uint64_t lr_gid; /* gid to set */ - uint64_t lr_size; /* size to set */ - uint64_t lr_atime[2]; /* access time */ - uint64_t lr_mtime[2]; /* modification time */ - /* optional attribute lr_attr_t may be here */ + lr_t lr_common; /**< common portion of log record */ + uint64_t lr_foid; /**< file object to change attributes */ + uint64_t lr_mask; /**< mask of attributes to set */ + uint64_t lr_mode; /**< mode to set */ + uint64_t lr_uid; /**< uid to set */ + uint64_t lr_gid; /**< gid to set */ + uint64_t lr_size; /**< size to set */ + uint64_t lr_atime[2]; /**< access time */ + uint64_t lr_mtime[2]; /**< modification time */ } lr_setattr_t; +/** \note lr_aclcnt number of ace_t entries follow lr_aclcnt */ typedef struct { - lr_t lr_common; /* common portion of log record */ - uint64_t lr_foid; /* obj id of file */ - uint64_t lr_aclcnt; /* number of acl entries */ - /* lr_aclcnt number of ace_t entries follow this */ + lr_t lr_common; /**< common portion of log record */ + uint64_t lr_foid; /**< obj id of file */ + uint64_t lr_aclcnt; /**< number of acl entries */ } lr_acl_v0_t; +/** \note lr_acl_bytes number of variable sized ace's follows lr_acl_flags */ typedef struct { - lr_t lr_common; /* common portion of log record */ - uint64_t lr_foid; /* obj id of file */ - uint64_t lr_aclcnt; /* number of ACEs in ACL */ - uint64_t lr_domcnt; /* number of unique domains */ - uint64_t lr_fuidcnt; /* number of real fuids */ - uint64_t lr_acl_bytes; /* number of bytes in ACL */ - uint64_t lr_acl_flags; /* ACL flags */ - /* lr_acl_bytes number of variable sized ace's follows */ + lr_t lr_common; /**< common portion of log record */ + uint64_t lr_foid; /**< obj id of file */ + uint64_t lr_aclcnt; /**< number of ACEs in ACL */ + uint64_t lr_domcnt; /**< number of unique domains */ + uint64_t lr_fuidcnt; /**< number of real fuids */ + uint64_t lr_acl_bytes; /**< number of bytes in ACL */ + uint64_t lr_acl_flags; /**< ACL flags */ } lr_acl_t; /* * ZIL structure definitions, interface function prototype and globals. */ -/* +/** * Writes are handled in three different ways: - * - * WR_INDIRECT: + * - WR_INDIRECT:
* In this mode, if we need to commit the write later, then the block * is immediately written into the file system (using dmu_sync), * and a pointer to the block is put into the log record. @@ -345,32 +353,35 @@ typedef struct { * - not using slogs (as slogs are assumed to always be faster * than writing into the main pool) * - the write occupies only one block - * WR_COPIED: + * - WR_COPIED:
* If we know we'll immediately be committing the * transaction (FSYNC or FDSYNC), the we allocate a larger * log record here for the data and copy the data in. - * WR_NEED_COPY: + * - WR_NEED_COPY:
* Otherwise we don't allocate a buffer, and *if* we need to * flush the write later then a buffer is allocated and * we retrieve the data using the dmu. */ typedef enum { - WR_INDIRECT, /* indirect - a large write (dmu_sync() data */ - /* and put blkptr in log, rather than actual data) */ - WR_COPIED, /* immediate - data is copied into lr_write_t */ - WR_NEED_COPY, /* immediate - data needs to be copied if pushed */ - WR_NUM_STATES /* number of states */ + WR_INDIRECT, /**< indirect - a large write (dmu_sync() data */ + /**< and put blkptr in log, rather than actual data) */ + WR_COPIED, /**< immediate - data is copied into lr_write_t */ + WR_NEED_COPY, /**< immediate - data needs to be copied if pushed */ + WR_NUM_STATES /**< number of states */ } itx_wr_state_t; +/** + * \note itx_lr is followed by type-specific part of lr_xx_t + * and its immediate data + */ typedef struct itx { - list_node_t itx_node; /* linkage on zl_itx_list */ - void *itx_private; /* type-specific opaque data */ - itx_wr_state_t itx_wr_state; /* write state */ - uint8_t itx_sync; /* synchronous transaction */ - uint64_t itx_sod; /* record size on disk */ - uint64_t itx_oid; /* object id */ - lr_t itx_lr; /* common part of log record */ - /* followed by type-specific part of lr_xx_t and its immediate data */ + list_node_t itx_node; /**< linkage on zl_itx_list */ + void *itx_private; /**< type-specific opaque data */ + itx_wr_state_t itx_wr_state; /**< write state */ + uint8_t itx_sync; /**< synchronous transaction */ + uint64_t itx_sod; /**< record size on disk */ + uint64_t itx_oid; /**< object id */ + lr_t itx_lr; /**< common part of log record */ } itx_t; typedef int zil_parse_blk_func_t(zilog_t *zilog, blkptr_t *bp, void *arg, diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zil_impl.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zil_impl.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zil_impl.h 2012-11-16 11:07:22.183456448 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zil_impl.h 2012-11-15 22:51:06.154723162 -0700 @@ -35,103 +35,103 @@ extern "C" { #endif -/* +/** * Log write buffer. */ typedef struct lwb { - zilog_t *lwb_zilog; /* back pointer to log struct */ - blkptr_t lwb_blk; /* on disk address of this log blk */ - int lwb_nused; /* # used bytes in buffer */ - int lwb_sz; /* size of block and buffer */ - char *lwb_buf; /* log write buffer */ - zio_t *lwb_zio; /* zio for this buffer */ - dmu_tx_t *lwb_tx; /* tx for log block allocation */ - uint64_t lwb_max_txg; /* highest txg in this lwb */ - list_node_t lwb_node; /* zilog->zl_lwb_list linkage */ + zilog_t *lwb_zilog; /**< back pointer to log struct */ + blkptr_t lwb_blk; /**< on disk address of this log blk */ + int lwb_nused; /**< # used bytes in buffer */ + int lwb_sz; /**< size of block and buffer */ + char *lwb_buf; /**< log write buffer */ + zio_t *lwb_zio; /**< zio for this buffer */ + dmu_tx_t *lwb_tx; /**< tx for log block allocation */ + uint64_t lwb_max_txg; /**< highest txg in this lwb */ + list_node_t lwb_node; /**< zilog->zl_lwb_list linkage */ } lwb_t; -/* +/** * Intent log transaction lists */ typedef struct itxs { - list_t i_sync_list; /* list of synchronous itxs */ - avl_tree_t i_async_tree; /* tree of foids for async itxs */ + list_t i_sync_list; /**< list of synchronous itxs */ + avl_tree_t i_async_tree; /**< tree of foids for async itxs */ } itxs_t; typedef struct itxg { - kmutex_t itxg_lock; /* lock for this structure */ - uint64_t itxg_txg; /* txg for this chain */ - uint64_t itxg_sod; /* total size on disk for this txg */ - itxs_t *itxg_itxs; /* sync and async itxs */ + kmutex_t itxg_lock; /**< lock for this structure */ + uint64_t itxg_txg; /**< txg for this chain */ + uint64_t itxg_sod; /**< total size on disk for this txg */ + itxs_t *itxg_itxs; /**< sync and async itxs */ } itxg_t; -/* for async nodes we build up an AVL tree of lists of async itxs per file */ +/** for async nodes we build up an AVL tree of lists of async itxs per file */ typedef struct itx_async_node { - uint64_t ia_foid; /* file object id */ - list_t ia_list; /* list of async itxs for this foid */ - avl_node_t ia_node; /* AVL tree linkage */ + uint64_t ia_foid; /**< file object id */ + list_t ia_list; /**< list of async itxs for this foid */ + avl_node_t ia_node; /**< AVL tree linkage */ } itx_async_node_t; -/* +/** * Vdev flushing: during a zil_commit(), we build up an AVL tree of the vdevs * we've touched so we know which ones need a write cache flush at the end. */ typedef struct zil_vdev_node { - uint64_t zv_vdev; /* vdev to be flushed */ - avl_node_t zv_node; /* AVL tree linkage */ + uint64_t zv_vdev; /**< vdev to be flushed */ + avl_node_t zv_node; /**< AVL tree linkage */ } zil_vdev_node_t; #define ZIL_PREV_BLKS 16 -/* +/** * Stable storage intent log management structure. One per dataset. */ struct zilog { - kmutex_t zl_lock; /* protects most zilog_t fields */ - struct dsl_pool *zl_dmu_pool; /* DSL pool */ - spa_t *zl_spa; /* handle for read/write log */ - const zil_header_t *zl_header; /* log header buffer */ - objset_t *zl_os; /* object set we're logging */ - zil_get_data_t *zl_get_data; /* callback to get object content */ - zio_t *zl_root_zio; /* log writer root zio */ - uint64_t zl_lr_seq; /* on-disk log record sequence number */ - uint64_t zl_commit_lr_seq; /* last committed on-disk lr seq */ - uint64_t zl_destroy_txg; /* txg of last zil_destroy() */ - uint64_t zl_replayed_seq[TXG_SIZE]; /* last replayed rec seq */ - uint64_t zl_replaying_seq; /* current replay seq number */ - uint32_t zl_suspend; /* log suspend count */ - kcondvar_t zl_cv_writer; /* log writer thread completion */ - kcondvar_t zl_cv_suspend; /* log suspend completion */ - uint8_t zl_suspending; /* log is currently suspending */ - uint8_t zl_keep_first; /* keep first log block in destroy */ - uint8_t zl_replay; /* replaying records while set */ - uint8_t zl_stop_sync; /* for debugging */ - uint8_t zl_writer; /* boolean: write setup in progress */ - uint8_t zl_logbias; /* latency or throughput */ - uint8_t zl_sync; /* synchronous or asynchronous */ - int zl_parse_error; /* last zil_parse() error */ - uint64_t zl_parse_blk_seq; /* highest blk seq on last parse */ - uint64_t zl_parse_lr_seq; /* highest lr seq on last parse */ - uint64_t zl_parse_blk_count; /* number of blocks parsed */ - uint64_t zl_parse_lr_count; /* number of log records parsed */ - uint64_t zl_next_batch; /* next batch number */ - uint64_t zl_com_batch; /* committed batch number */ - kcondvar_t zl_cv_batch[2]; /* batch condition variables */ - itxg_t zl_itxg[TXG_SIZE]; /* intent log txg chains */ - list_t zl_itx_commit_list; /* itx list to be committed */ - uint64_t zl_itx_list_sz; /* total size of records on list */ - uint64_t zl_cur_used; /* current commit log size used */ - list_t zl_lwb_list; /* in-flight log write list */ - kmutex_t zl_vdev_lock; /* protects zl_vdev_tree */ - avl_tree_t zl_vdev_tree; /* vdevs to flush in zil_commit() */ - taskq_t *zl_clean_taskq; /* runs lwb and itx clean tasks */ - avl_tree_t zl_bp_tree; /* track bps during log parse */ - clock_t zl_replay_time; /* lbolt of when replay started */ - uint64_t zl_replay_blks; /* number of log blocks replayed */ - zil_header_t zl_old_header; /* debugging aid */ - uint_t zl_prev_blks[ZIL_PREV_BLKS]; /* size - sector rounded */ - uint_t zl_prev_rotor; /* rotor for zl_prev[] */ - txg_node_t zl_dirty_link; /* protected by dp_dirty_zilogs list */ + kmutex_t zl_lock; /**< protects most zilog_t fields */ + struct dsl_pool *zl_dmu_pool; /**< DSL pool */ + spa_t *zl_spa; /**< handle for read/write log */ + const zil_header_t *zl_header; /**< log header buffer */ + objset_t *zl_os; /**< object set we're logging */ + zil_get_data_t *zl_get_data; /**< callback to get object content */ + zio_t *zl_root_zio; /**< log writer root zio */ + uint64_t zl_lr_seq; /** that uniquely * identifies any block in the pool. By convention, the meta-objset (MOS) * is objset 0, and the meta-dnode is object 0. This covers all blocks @@ -242,12 +242,12 @@ extern char *zio_type_name[ZIO_TYPES]; * ZIL blocks are bookmarked . * dmu_sync()ed ZIL data blocks are bookmarked . * - * Note: this structure is called a bookmark because its original purpose - * was to remember where to resume a pool-wide traverse. + * \note This structure is called a bookmark because its original purpose + * was to remember where to resume a pool-wide traverse. * - * Note: this structure is passed between userland and the kernel. - * Therefore it must not change size or alignment between 32/64 bit - * compilation options. + * \note This structure is passed between userland and the kernel. + * Therefore it must not change size or alignment between 32/64 bit + * compilation options. */ typedef struct zbookmark { uint64_t zb_objset; @@ -305,14 +305,14 @@ struct zio_cksum_report { nvlist_t *zcr_ereport; nvlist_t *zcr_detector; void *zcr_cbdata; - size_t zcr_cbinfo; /* passed to zcr_free() */ + size_t zcr_cbinfo; /**< passed to zcr_free() */ uint64_t zcr_align; uint64_t zcr_length; zio_cksum_finish_f *zcr_finish; zio_cksum_free_f *zcr_free; /* internal use only */ - struct zio_bad_cksum *zcr_ckinfo; /* information from failure */ + struct zio_bad_cksum *zcr_ckinfo; /**< information from failure */ }; typedef void zio_vsd_cksum_report_f(zio_t *zio, zio_cksum_report_t *zcr, @@ -345,7 +345,7 @@ typedef struct zio_transform { typedef int zio_pipe_stage_t(zio_t *zio); -/* +/** * The io_reexecute flags are distinct from io_flags because the child must * be able to propagate them to the parent. The normal io_flags are local * to the zio, not protected by any lock, and not modifiable by children; @@ -396,7 +396,11 @@ extern zio_trim_stats_t zio_trim_stats; ZIO_TRIM_STAT_INCR(stat, 1); struct zio { - /* Core information about this I/O */ + /** + * \name Core + * Core information about this I/O + * \{ + */ zbookmark_t io_bookmark; zio_prop_t io_prop; zio_type_t io_type; @@ -416,20 +420,32 @@ struct zio { zio_t *io_logical; zio_transform_t *io_transform_stack; - /* Callback info */ + /** + * \} + * \name Callback + * Callback info + * \{ */ zio_done_func_t *io_ready; zio_done_func_t *io_done; void *io_private; - int64_t io_prev_space_delta; /* DMU private */ + int64_t io_prev_space_delta; /**< DMU private */ blkptr_t io_bp_orig; - /* Data represented by this I/O */ + /** + * \} + * \name Data + * Data represented by this I/O + * \{ */ void *io_data; void *io_orig_data; uint64_t io_size; uint64_t io_orig_size; - /* Stuff for the vdev stack */ + /** + * \} + * \name Stack + * Stuff for the vdev stack + * \{ */ vdev_t *io_vd; void *io_vsd; const zio_vsd_ops_t *io_vsd_ops; @@ -440,7 +456,11 @@ struct zio { avl_node_t io_deadline_node; avl_tree_t *io_vdev_tree; - /* Internal pipeline state */ + /** + * \} + * \name Pipeline + * Internal pipeline state + * \{ */ enum zio_flag io_flags; enum zio_stage io_stage; enum zio_stage io_pipeline; @@ -460,13 +480,23 @@ struct zio { kmutex_t io_lock; kcondvar_t io_cv; - /* FMA state */ + /** + * \} + * \name FMA + * FMA state + * \{*/ zio_cksum_report_t *io_cksum_report; uint64_t io_ena; + /** \} */ + /** \} */ #ifdef _KERNEL - /* FreeBSD only. */ + /** + * \name OS + * FreeBSD only. + * \{ */ struct ostask io_task; + /** \} */ #endif avl_node_t io_trim_node; list_node_t io_trim_link; @@ -600,12 +630,12 @@ extern void zfs_ereport_finish_checksum( extern void zfs_ereport_send_interim_checksum(zio_cksum_report_t *report); extern void zfs_ereport_free_checksum(zio_cksum_report_t *report); -/* If we have the good data in hand, this function can be used */ +/** If we have the good data in hand, this function can be used */ extern void zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd, struct zio *zio, uint64_t offset, uint64_t length, const void *good_data, const void *bad_data, struct zio_bad_cksum *info); -/* Called from spa_sync(), but primarily an injection handler */ +/** Called from spa_sync(), but primarily an injection handler */ extern void spa_handle_ignored_writes(spa_t *spa); /* zbookmark functions */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zio_checksum.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zio_checksum.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zio_checksum.h 2012-10-17 17:00:59.869590556 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zio_checksum.h 2012-11-15 12:03:17.557457014 -0700 @@ -31,20 +31,20 @@ extern "C" { #endif -/* +/** * Signature for checksum functions. */ typedef void zio_checksum_t(const void *data, uint64_t size, zio_cksum_t *zcp); -/* +/** * Information about each checksum function. */ typedef struct zio_checksum_info { - zio_checksum_t *ci_func[2]; /* checksum function for each byteorder */ - int ci_correctable; /* number of correctable bits */ - int ci_eck; /* uses zio embedded checksum? */ - int ci_dedup; /* strong enough for dedup? */ - char *ci_name; /* descriptive name */ + zio_checksum_t *ci_func[2]; /**< checksum function for each byteorder*/ + int ci_correctable; /**< number of correctable bits */ + int ci_eck; /**< uses zio embedded checksum? */ + int ci_dedup; /**< strong enough for dedup? */ + char *ci_name; /**< descriptive name */ } zio_checksum_info_t; typedef struct zio_bad_cksum { @@ -53,7 +53,7 @@ typedef struct zio_bad_cksum { const char *zbc_checksum_name; uint8_t zbc_byteswapped; uint8_t zbc_injected; - uint8_t zbc_has_cksum; /* expected/actual valid */ + uint8_t zbc_has_cksum; /**< expected/actual valid */ } zio_bad_cksum_t; extern zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS]; diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zio_compress.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zio_compress.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zio_compress.h 2012-10-17 17:00:59.869590556 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zio_compress.h 2012-11-15 12:03:57.696456224 -0700 @@ -33,22 +33,25 @@ extern "C" { #endif -/* - * Common signature for all zio compress/decompress functions. +/** + * Common signature for all zio compress functions. */ typedef size_t zio_compress_func_t(void *src, void *dst, size_t s_len, size_t d_len, int); +/** + * Common signature for all zio decompress functions. + */ typedef int zio_decompress_func_t(void *src, void *dst, size_t s_len, size_t d_len, int); -/* +/** * Information about each compression function. */ typedef struct zio_compress_info { - zio_compress_func_t *ci_compress; /* compression function */ - zio_decompress_func_t *ci_decompress; /* decompression function */ - int ci_level; /* level parameter */ - char *ci_name; /* algorithm name */ + zio_compress_func_t *ci_compress; /**< compression function */ + zio_decompress_func_t *ci_decompress; /**< decompression function */ + int ci_level; /**< level parameter */ + char *ci_name; /**< algorithm name */ } zio_compress_info_t; extern zio_compress_info_t zio_compress_table[ZIO_COMPRESS_FUNCTIONS]; diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zio_impl.h SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zio_impl.h --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zio_impl.h 2012-11-16 11:07:22.184456368 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/zio_impl.h 2012-11-15 22:51:45.716588190 -0700 @@ -33,40 +33,40 @@ extern "C" { #endif -/* +/** * zio pipeline stage definitions */ enum zio_stage { - ZIO_STAGE_OPEN = 1 << 0, /* RWFCI */ + ZIO_STAGE_OPEN = 1 << 0, /**< RWFCI */ - ZIO_STAGE_READ_BP_INIT = 1 << 1, /* R---- */ - ZIO_STAGE_FREE_BP_INIT = 1 << 2, /* --F-- */ - ZIO_STAGE_ISSUE_ASYNC = 1 << 3, /* RWF-- */ - ZIO_STAGE_WRITE_BP_INIT = 1 << 4, /* -W--- */ + ZIO_STAGE_READ_BP_INIT = 1 << 1, /**< R---- */ + ZIO_STAGE_FREE_BP_INIT = 1 << 2, /**< --F-- */ + ZIO_STAGE_ISSUE_ASYNC = 1 << 3, /**< RWF-- */ + ZIO_STAGE_WRITE_BP_INIT = 1 << 4, /**< -W--- */ - ZIO_STAGE_CHECKSUM_GENERATE = 1 << 5, /* -W--- */ + ZIO_STAGE_CHECKSUM_GENERATE = 1 << 5, /**< -W--- */ - ZIO_STAGE_DDT_READ_START = 1 << 6, /* R---- */ - ZIO_STAGE_DDT_READ_DONE = 1 << 7, /* R---- */ - ZIO_STAGE_DDT_WRITE = 1 << 8, /* -W--- */ - ZIO_STAGE_DDT_FREE = 1 << 9, /* --F-- */ + ZIO_STAGE_DDT_READ_START = 1 << 6, /**< R---- */ + ZIO_STAGE_DDT_READ_DONE = 1 << 7, /**< R---- */ + ZIO_STAGE_DDT_WRITE = 1 << 8, /**< -W--- */ + ZIO_STAGE_DDT_FREE = 1 << 9, /**< --F-- */ - ZIO_STAGE_GANG_ASSEMBLE = 1 << 10, /* RWFC- */ - ZIO_STAGE_GANG_ISSUE = 1 << 11, /* RWFC- */ + ZIO_STAGE_GANG_ASSEMBLE = 1 << 10, /**< RWFC- */ + ZIO_STAGE_GANG_ISSUE = 1 << 11, /**< RWFC- */ - ZIO_STAGE_DVA_ALLOCATE = 1 << 12, /* -W--- */ - ZIO_STAGE_DVA_FREE = 1 << 13, /* --F-- */ - ZIO_STAGE_DVA_CLAIM = 1 << 14, /* ---C- */ + ZIO_STAGE_DVA_ALLOCATE = 1 << 12, /**< -W--- */ + ZIO_STAGE_DVA_FREE = 1 << 13, /**< --F-- */ + ZIO_STAGE_DVA_CLAIM = 1 << 14, /**< ---C- */ - ZIO_STAGE_READY = 1 << 15, /* RWFCI */ + ZIO_STAGE_READY = 1 << 15, /**< RWFCI */ - ZIO_STAGE_VDEV_IO_START = 1 << 16, /* RWF-I */ - ZIO_STAGE_VDEV_IO_DONE = 1 << 17, /* RWF-- */ - ZIO_STAGE_VDEV_IO_ASSESS = 1 << 18, /* RWF-I */ + ZIO_STAGE_VDEV_IO_START = 1 << 16, /**< RWF-I */ + ZIO_STAGE_VDEV_IO_DONE = 1 << 17, /**< RWF-- */ + ZIO_STAGE_VDEV_IO_ASSESS = 1 << 18, /**< RWF-I */ - ZIO_STAGE_CHECKSUM_VERIFY = 1 << 19, /* R---- */ + ZIO_STAGE_CHECKSUM_VERIFY = 1 << 19, /**< R---- */ - ZIO_STAGE_DONE = 1 << 20 /* RWFCI */ + ZIO_STAGE_DONE = 1 << 20 /**< RWFCI */ }; #define ZIO_INTERLOCK_STAGES \ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/txg.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/txg.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/txg.c 2012-11-16 11:07:22.185457056 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/txg.c 2012-11-15 18:17:57.235456994 -0700 @@ -32,14 +32,20 @@ #include #include -/* +/** + * \file txg.c * Pool-wide transaction groups. */ static void txg_sync_thread(void *arg); static void txg_quiesce_thread(void *arg); -int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */ +/** + * max seconds worth of delta per txg + * + * \ingroup tunables + */ +int zfs_txg_timeout = 5; SYSCTL_DECL(_vfs_zfs); SYSCTL_NODE(_vfs_zfs, OID_AUTO, txg, CTLFLAG_RW, 0, "ZFS TXG"); @@ -47,8 +53,8 @@ TUNABLE_INT("vfs.zfs.txg.timeout", &zfs_ SYSCTL_INT(_vfs_zfs_txg, OID_AUTO, timeout, CTLFLAG_RW, &zfs_txg_timeout, 0, "Maximum seconds worth of delta per txg"); -/* - * Prepare the txg subsystem. +/** + * Prepares the txg subsystem. */ void txg_init(dsl_pool_t *dp, uint64_t txg) @@ -83,8 +89,8 @@ txg_init(dsl_pool_t *dp, uint64_t txg) tx->tx_open_txg = txg; } -/* - * Close down the txg subsystem. +/** + * Closes down the txg subsystem. */ void txg_fini(dsl_pool_t *dp) @@ -120,8 +126,8 @@ txg_fini(dsl_pool_t *dp) bzero(tx, sizeof (tx_state_t)); } -/* - * Start syncing transaction groups. +/** + * Starts syncing transaction groups. */ void txg_sync_start(dsl_pool_t *dp) @@ -181,8 +187,8 @@ txg_thread_wait(tx_state_t *tx, callb_cp CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock); } -/* - * Stop syncing transaction groups. +/** + * Stops syncing transaction groups. */ void txg_sync_stop(dsl_pool_t *dp) @@ -273,7 +279,12 @@ txg_rele_to_sync(txg_handle_t *th) th->th_cpu = NULL; /* defensive */ } -/* Quiesce, adj.: to render temporarily inactive or disabled */ +/** + * Blocks until all transactions in the group are released. + * + * On exit, the transaction group has reached a stable state in which it can + * then be passed off to the syncing context. + */ static void txg_quiesce(dsl_pool_t *dp, uint64_t txg) { @@ -322,8 +333,11 @@ txg_do_callbacks(void *arg) kmem_free(cb_list, sizeof (list_t)); } -/* - * Dispatch the commit callbacks registered on this txg to worker threads. +/** + * Dispatch the commit callbacks registered on this txg. + * + * If no callbacks are registered for a given TXG, nothing happens. + * This function creates a taskq for the associated pool, if needed. */ static void txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg) @@ -334,7 +348,10 @@ txg_dispatch_callbacks(dsl_pool_t *dp, u for (c = 0; c < max_ncpus; c++) { tx_cpu_t *tc = &tx->tx_cpu[c]; - /* No need to lock tx_cpu_t at this point */ + /* + * No need to lock tx_cpu_t at this point, since this can + * only be called once a txg has been synced. + */ int g = txg & TXG_MASK; @@ -483,7 +500,7 @@ txg_quiesce_thread(void *arg) } } -/* +/** * Delay this thread by 'ticks' if we are still in the open transaction * group and there is already a waiting txg quiesing or quiesced. Abort * the delay if this txg stalls or enters the quiesing state. @@ -494,7 +511,7 @@ txg_delay(dsl_pool_t *dp, uint64_t txg, tx_state_t *tx = &dp->dp_tx; clock_t timeout = ddi_get_lbolt() + ticks; - /* don't delay if this txg could transition to quiesing immediately */ + /* don't delay if this txg could transition to quiescing immediately */ if (tx->tx_open_txg > txg || tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1) return; @@ -572,7 +589,7 @@ txg_sync_waiting(dsl_pool_t *dp) tx->tx_quiesced_txg != 0); } -/* +/** * Per-txg object lists. */ void @@ -605,9 +622,10 @@ txg_list_empty(txg_list_t *tl, uint64_t return (tl->tl_head[txg & TXG_MASK] == NULL); } -/* +/** * Add an entry to the list. - * Returns 0 if it's a new entry, 1 if it's already there. + * + * \return 0 if it's a new entry, 1 if it's already there. */ int txg_list_add(txg_list_t *tl, void *p, uint64_t txg) @@ -628,9 +646,10 @@ txg_list_add(txg_list_t *tl, void *p, ui return (already_on_list); } -/* +/** * Add an entry to the end of the list (walks list to find end). - * Returns 0 if it's a new entry, 1 if it's already there. + * + * \return 0 if it's a new entry, 1 if it's already there. */ int txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg) @@ -656,7 +675,7 @@ txg_list_add_tail(txg_list_t *tl, void * return (already_on_list); } -/* +/** * Remove the head of the list and return it. */ void * @@ -678,7 +697,7 @@ txg_list_remove(txg_list_t *tl, uint64_t return (p); } -/* +/** * Remove a specific item from the list and return it. */ void * @@ -713,8 +732,8 @@ txg_list_member(txg_list_t *tl, void *p, return (tn->tn_member[t]); } -/* - * Walk a txg list -- only safe if you know it's not changing. +/** + * Walk a txg list - only safe if you know it's not changing. */ void * txg_list_head(txg_list_t *tl, uint64_t txg) diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev.c 2012-11-16 11:07:22.187456705 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev.c 2012-11-15 18:17:57.274457368 -0700 @@ -44,13 +44,14 @@ #include #include -SYSCTL_DECL(_vfs_zfs); -SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV"); - -/* +/** + * \file vdev.c * Virtual device management. */ +SYSCTL_DECL(_vfs_zfs); +SYSCTL_NODE(_vfs_zfs, OID_AUTO, vdev, CTLFLAG_RW, 0, "ZFS VDEV"); + static vdev_ops_t *vdev_ops_table[] = { &vdev_root_ops, &vdev_raidz_ops, @@ -68,8 +69,7 @@ static vdev_ops_t *vdev_ops_table[] = { NULL }; - -/* +/** * Given a vdev type, return the appropriate ops vector. */ static vdev_ops_t * @@ -84,9 +84,11 @@ vdev_getops(const char *type) return (ops); } -/* - * Default asize function: return the MAX of psize with the asize of - * all children. This is what's used by anything other than RAID-Z. +/** + * Default asize function + * + * Return the MAX of psize with the asize of all children. + * This is what's used by anything other than RAID-Z. */ uint64_t vdev_default_asize(vdev_t *vd, uint64_t psize) @@ -102,8 +104,8 @@ vdev_default_asize(vdev_t *vd, uint64_t return (asize); } -/* - * Get the minimum allocatable size. We define the allocatable size as +/** + * Get the minimum allocatable size. We define the allocatable size as * the vdev's asize rounded to the nearest metaslab. This allows us to * replace or attach devices which don't have the same physical size but * can still satisfy the same number of allocations. @@ -251,7 +253,7 @@ vdev_remove_child(vdev_t *pvd, vdev_t *c pvd->vdev_guid_sum -= cvd->vdev_guid_sum; } -/* +/** * Remove any holes in the child array. */ void @@ -281,7 +283,7 @@ vdev_compact_children(vdev_t *pvd) pvd->vdev_children = newc; } -/* +/** * Allocate and minimally initialize a vdev_t. */ vdev_t * @@ -339,7 +341,7 @@ vdev_alloc_common(spa_t *spa, uint_t id, return (vd); } -/* +/** * Allocate a new vdev. The 'alloctype' is used to control whether we are * creating a new vdev or loading an existing one - the behavior is slightly * different for each case. @@ -649,7 +651,7 @@ vdev_free(vdev_t *vd) kmem_free(vd, sizeof (vdev_t)); } -/* +/** * Transfer top-level vdev state from svd to tvd. */ static void @@ -727,7 +729,7 @@ vdev_top_update(vdev_t *tvd, vdev_t *vd) vdev_top_update(tvd, vd->vdev_child[c]); } -/* +/** * Add a mirror/replacing vdev above an existing vdev. */ vdev_t * @@ -760,7 +762,7 @@ vdev_add_parent(vdev_t *cvd, vdev_ops_t return (mvd); } -/* +/** * Remove a 1-way mirror/replacing vdev from the tree. */ void @@ -961,10 +963,12 @@ vdev_probe_done(zio_t *zio) } } -/* - * Determine whether this device is accessible by reading and writing - * to several known locations: the pad regions of each vdev label - * but the first (which we leave alone in case it contains a VTOC). +/** + * Determine whether this device is accessible. + * + * Read and write to several known locations: the pad regions of each + * vdev label but the first (which we leave alone in case it contains + * a VTOC). */ zio_t * vdev_probe(vdev_t *vd, zio_t *zio) @@ -1106,7 +1110,7 @@ vdev_open_children(vdev_t *vd) taskq_destroy(tq); } -/* +/** * Prepare a virtual device for access. */ int @@ -1299,7 +1303,9 @@ vdev_open(vdev_t *vd) return (0); } -/* +/** + * Validates vdev label contents + * * Called once the vdevs are all opened, this routine validates the label * contents. This needs to be done before vdev_load() so that we don't * inadvertently do repair I/Os to the wrong device. @@ -1423,7 +1429,7 @@ vdev_validate(vdev_t *vd, boolean_t stri return (0); } -/* +/** * Close a virtual device. */ void @@ -1491,7 +1497,7 @@ vdev_rele(vdev_t *vd) vd->vdev_ops->vdev_op_rele(vd); } -/* +/** * Reopen all interior vdevs and any unopened leaves. We don't actually * reopen leaf vdevs which had previously been opened as they might deadlock * on the spa_config_lock. Instead we only obtain the leaf's physical size. @@ -1669,7 +1675,7 @@ vdev_dtl_empty(vdev_t *vd, vdev_dtl_type return (empty); } -/* +/** * Reassess DTLs after a config change or scrub completion. */ void @@ -1860,7 +1866,7 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg) dmu_tx_commit(tx); } -/* +/** * Determine whether the specified vdev can be offlined/detached/removed * without losing data. */ @@ -1894,7 +1900,7 @@ vdev_dtl_required(vdev_t *vd) return (required); } -/* +/** * Determine if resilver is needed, and if so the txg range. */ boolean_t @@ -1963,7 +1969,7 @@ vdev_load(vdev_t *vd) VDEV_AUX_CORRUPT_DATA); } -/* +/** * The special vdev case is used for hot spares and l2cache devices. Its * sole purpose it to set the vdev state for the associated vdev. To do this, * we make sure that we can open the underlying device, then try to read the @@ -2099,7 +2105,7 @@ vdev_psize_to_asize(vdev_t *vd, uint64_t return (vd->vdev_ops->vdev_op_asize(vd, psize)); } -/* +/** * Mark the given vdev faulted. A faulted vdev behaves as if the device could * not be opened, and no I/O is attempted. */ @@ -2154,7 +2160,7 @@ vdev_fault(spa_t *spa, uint64_t guid, vd return (spa_vdev_state_exit(spa, vd, 0)); } -/* +/** * Mark the given vdev degraded. A degraded vdev is purely an indication to the * user that something is wrong. The vdev continues to operate as normal as far * as I/O is concerned. @@ -2186,11 +2192,13 @@ vdev_degrade(spa_t *spa, uint64_t guid, return (spa_vdev_state_exit(spa, vd, 0)); } -/* - * Online the given vdev. If 'unspare' is set, it implies two things. First, - * any attached spare device should be detached when the device finishes - * resilvering. Second, the online should be treated like a 'test' online case, - * so no FMA events are generated if the device fails to open. +/** + * Online the given vdev. + * + * If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached + * spare device should be detached when the device finishes resilvering. + * Second, the online should be treated like a 'test' online case, so no FMA + * events are generated if the device fails to open. */ int vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate) @@ -2349,7 +2357,7 @@ vdev_offline(spa_t *spa, uint64_t guid, return (error); } -/* +/** * Clear the error counts associated with this vdev. Unlike vdev_online() and * vdev_offline(), we assume the spa config is locked. We also clear all * children. If 'vd' is NULL, then the user wants to clear all vdevs. @@ -2676,7 +2684,7 @@ vdev_stat_update(zio_t *zio, uint64_t ps } } -/* +/** * Update the in-core space usage stats for this vdev, its metaslab class, * and the root vdev. */ @@ -2726,7 +2734,7 @@ vdev_space_update(vdev_t *vd, int64_t al } } -/* +/** * Mark a top-level vdev's config as dirty, placing it on the dirty list * so that it will be written out next time the vdev configuration is synced. * If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs. @@ -2817,7 +2825,7 @@ vdev_config_clean(vdev_t *vd) list_remove(&spa->spa_config_dirty_list, vd); } -/* +/** * Mark a top-level vdev's state as dirty, so that the next pass of * spa_sync() can convert this into vdev_config_dirty(). We distinguish * the state changes from larger config changes because they require @@ -2858,7 +2866,7 @@ vdev_state_clean(vdev_t *vd) list_remove(&spa->spa_state_dirty_list, vd); } -/* +/** * Propagate vdev state up from children to parent. */ void @@ -2917,7 +2925,7 @@ vdev_propagate_state(vdev_t *vd) vdev_propagate_state(vd->vdev_parent); } -/* +/** * Set a vdev's state. If this is during an open, we don't update the parent * state, because we're in the process of opening children depth-first. * Otherwise, we propagate the change to the parent. @@ -3052,7 +3060,7 @@ vdev_set_state(vdev_t *vd, boolean_t iso vdev_propagate_state(vd->vdev_parent); } -/* +/** * Check the vdev configuration to ensure that it's capable of supporting * a root pool. * @@ -3091,7 +3099,7 @@ vdev_is_bootable(vdev_t *vd) return (B_TRUE); } -/* +/** * Load the state from the original vdev tree (ovd) which * we've retrieved from the MOS config object. If the original * vdev was offline or faulted then we transfer that state to the @@ -3120,7 +3128,7 @@ vdev_load_log_state(vdev_t *nvd, vdev_t } } -/* +/** * Determine if a log device has valid content. If the vdev was * removed or faulted in the MOS config then we know that * the content on the log device has already been written to the pool. @@ -3139,7 +3147,7 @@ vdev_log_state_valid(vdev_t *vd) return (B_FALSE); } -/* +/** * Expand a vdev if possible. */ void @@ -3154,7 +3162,7 @@ vdev_expand(vdev_t *vd, uint64_t txg) } } -/* +/** * Split a vdev. */ void diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_cache.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_cache.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_cache.c 2012-10-17 17:00:59.873590233 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_cache.c 2012-11-15 17:21:07.783456643 -0700 @@ -29,7 +29,8 @@ #include #include -/* +/** + * \file vdev_cache.c * Virtual device read-ahead caching. * * This file implements a simple LRU read-ahead cache. When the DMU reads @@ -66,22 +67,27 @@ /* * These tunables are for performance analysis. */ -/* +/** + * \addtogroup tunables + * \{ + */ +/** * All i/os smaller than zfs_vdev_cache_max will be turned into * 1<ve_data + cache_phase, zio->io_data, zio->io_size); } -/* +/** * Fill a previously allocated cache entry with data. */ static void @@ -256,8 +262,10 @@ vdev_cache_fill(zio_t *fio) mutex_exit(&vc->vc_lock); } -/* - * Read data from the cache. Returns 0 on cache hit, errno on a miss. +/** + * Read data from the cache. + * + * \return 0 on cache hit, errno on a miss. */ int vdev_cache_read(zio_t *zio) @@ -333,7 +341,7 @@ vdev_cache_read(zio_t *zio) return (0); } -/* +/** * Update cache contents upon write completion. */ void diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_disk.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_disk.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_disk.c 2012-10-17 17:00:59.874591369 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_disk.c 2012-11-15 12:21:16.725455333 -0700 @@ -34,7 +34,8 @@ #include #include -/* +/** + * \file * Virtual device vector for disks. */ @@ -572,7 +573,7 @@ vdev_ops_t vdev_disk_ops = { B_TRUE /* leaf vdev */ }; -/* +/** * Given the root disk device devid or pathname, read the label from * the device, and construct a configuration nvlist. */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_file.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_file.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_file.c 2012-11-16 11:07:22.187456705 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_file.c 2012-11-15 18:17:57.299457271 -0700 @@ -31,7 +31,8 @@ #include #include -/* +/** + * \file vdev_file.c * Virtual device vector for files. */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c 2012-11-16 11:07:22.188457457 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_geom.c 2012-11-15 22:53:24.208955159 -0700 @@ -38,8 +38,11 @@ #include #include -/* +/** + * \file vdev_geom.c * Virtual device vector for GEOM. + * + * Creates a GEOM class to represent vdevs. */ struct g_class zfs_vdev_class = { @@ -50,12 +53,24 @@ struct g_class zfs_vdev_class = { DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev); SYSCTL_DECL(_vfs_zfs_vdev); -/* Don't send BIO_FLUSH. */ +/** + * Don't send BIO_FLUSH. + * + * If set, the zvs_vdev class will not send BIO_FLUSH. + * + * \ingroup tunables + */ static int vdev_geom_bio_flush_disable = 0; TUNABLE_INT("vfs.zfs.vdev.bio_flush_disable", &vdev_geom_bio_flush_disable); SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RW, &vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH"); -/* Don't send BIO_DELETE. */ +/** + * Don't send BIO_DELETE. + * + * If set, the zvs_vdev class will not send BIO_DELETE. + * + * \ingroup tunables + */ static int vdev_geom_bio_delete_disable = 0; TUNABLE_INT("vfs.zfs.vdev.bio_delete_disable", &vdev_geom_bio_delete_disable); SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RW, @@ -901,6 +916,10 @@ vdev_geom_rele(vdev_t *vd) { } +/** + * Vector table for the vdev_geom module. This is the only entry point for + * vdev_geom + */ vdev_ops_t vdev_geom_ops = { vdev_geom_open, vdev_geom_close, diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_label.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_label.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_label.c 2012-11-16 11:07:22.189456289 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_label.c 2012-11-16 18:03:10.058455295 -0700 @@ -24,26 +24,22 @@ * Copyright (c) 2012 by Delphix. All rights reserved. */ -/* +/** + * \file vdev_label.c * Virtual Device Labels - * --------------------- * * The vdev label serves several distinct purposes: * - * 1. Uniquely identify this device as part of a ZFS pool and confirm its + * -# Uniquely identify this device as part of a ZFS pool and confirm its * identity within the pool. - * - * 2. Verify that all the devices given in a configuration are present - * within the pool. - * - * 3. Determine the uberblock for the pool. - * - * 4. In case of an import operation, determine the configuration of the - * toplevel vdev of which it is a part. - * - * 5. If an import operation cannot find all the devices in the pool, - * provide enough information to the administrator to determine which - * devices are missing. + * -# Verify that all the devices given in a configuration are present + * within the pool. + * -# Determine the uberblock for the pool. + * -# In case of an import operation, determine the configuration of the + * toplevel vdev of which it is a part. + * -# If an import operation cannot find all the devices in the pool, + * provide enough information to the administrator to determine which + * devices are missing. * * It is important to note that while the kernel is responsible for writing the * label, it only consumes the information in the first three cases. The @@ -51,8 +47,7 @@ * configuration to import a pool. * * - * Label Organization - * ------------------ + *

Label Organization

* * Before describing the contents of the label, it's important to understand how * the labels are written and updated with respect to the uberblock. @@ -63,12 +58,14 @@ * are updated before and after the uberblock is synced. Assuming we have * labels and an uberblock with the following transaction groups: * - * L1 UB L2 - * +------+ +------+ +------+ - * | | | | | | - * | t10 | | t10 | | t10 | - * | | | | | | - * +------+ +------+ +------+ + * \verbatim + L1 UB L2 + +------+ +------+ +------+ + | | | | | | + | t10 | | t10 | | t10 | + | | | | | | + +------+ +------+ +------+ + \endverbatim * * In this stable state, the labels and the uberblock were all updated within * the same transaction group (10). Each label is mirrored and checksummed, so @@ -77,9 +74,9 @@ * In order to identify which labels are valid, the labels are written in the * following manner: * - * 1. For each vdev, update 'L1' to the new label - * 2. Update the uberblock - * 3. For each vdev, update 'L2' to the new label + * -# For each vdev, update 'L1' to the new label + * -# Update the uberblock + * -# For each vdev, update 'L2' to the new label * * Given arbitrary failure, we can determine the correct label to use based on * the transaction group. If we fail after updating L1 but before updating the @@ -95,8 +92,7 @@ * on another vdev. * * - * On-disk Format - * -------------- + *

On-disk Format

* * The vdev label consists of two distinct parts, and is wrapped within the * vdev_label_t structure. The label includes 8k of padding to permit legacy @@ -112,24 +108,25 @@ * vdev for the 'best' uberblock. * * - * Configuration Information - * ------------------------- + *

Configuration Information

* * The nvlist describing the pool and vdev contains the following elements: * - * version ZFS on-disk version - * name Pool name - * state Pool state - * txg Transaction group in which this label was written - * pool_guid Unique identifier for this pool - * vdev_tree An nvlist describing vdev tree. - * features_for_read + * - version ZFS on-disk version + * - name Pool name + * - state Pool state + * - txg Transaction group in which this label was + * written + * - pool_guid Unique identifier for this pool + * - vdev_tree An nvlist describing vdev tree. + * - features_for_read * An nvlist of the features necessary for reading the MOS. * * Each leaf device label also contains the following: * - * top_guid Unique ID for top-level vdev in which this is contained - * guid Unique ID for the leaf vdev + * - top_guid Unique ID for top-level vdev in which this is + * contained + * - guid Unique ID for the leaf vdev * * The 'vs' configuration follows the format described in 'spa_config.c'. */ @@ -148,7 +145,7 @@ #include #include -/* +/** * Basic routines to read and write from a vdev label. * Used throughout the rest of this file. */ @@ -162,7 +159,7 @@ vdev_label_offset(uint64_t psize, int l, 0 : psize - VDEV_LABELS * sizeof (vdev_label_t))); } -/* +/** * Returns back the vdev label associated with the passed in offset. */ int @@ -208,7 +205,7 @@ vdev_label_write(zio_t *zio, vdev_t *vd, ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE)); } -/* +/** * Generate the nvlist representing this vdev's config. */ nvlist_t * @@ -400,7 +397,7 @@ vdev_config_generate(spa_t *spa, vdev_t return (nv); } -/* +/** * Generate a view of the top-level vdevs. If we currently have holes * in the namespace, then generate an array which contains a list of holey * vdevs. Additionally, add the number of top-level children that currently @@ -433,7 +430,7 @@ vdev_top_config_generate(spa_t *spa, nvl kmem_free(array, rvd->vdev_children * sizeof (uint64_t)); } -/* +/** * Returns the configuration from the label of the given vdev. For vdevs * which don't have a txg value stored on their label (i.e. spares/cache) * or have not been completely initialized (txg = 0) just return @@ -510,7 +507,7 @@ retry: return (config); } -/* +/** * Determine if a device is in use. The 'spare_guid' parameter will be filled * in with the device guid if this spare is active elsewhere on the system. */ @@ -624,7 +621,7 @@ vdev_inuse(vdev_t *vd, uint64_t crtxg, v return (state == POOL_STATE_ACTIVE); } -/* +/** * Initialize a vdev label. We check to make sure each leaf device is not in * use, and writable. We put down an initial label which we will later * overwrite with a complete label. Note that it's important to do this @@ -872,7 +869,7 @@ retry: * ========================================================================== */ -/* +/** * Consider the following situation: txg is safely synced to disk. We've * written the first uberblock for txg + 1, and then we lose power. When we * come back up, we fail to see the uberblock for txg + 1 because, say, @@ -953,7 +950,7 @@ vdev_uberblock_load_impl(zio_t *zio, vde } } -/* +/** * Reads the 'best' uberblock from disk along with its associated * configuration. First, we read the uberblock array of each label of each * vdev, keeping track of the uberblock with the highest txg in each array. @@ -993,7 +990,7 @@ vdev_uberblock_load(vdev_t *rvd, uberblo spa_config_exit(spa, SCL_ALL, FTAG); } -/* +/** * On success, increment root zio's count of good writes. * We only get credit for writes to known-visible vdevs; see spa_vdev_add(). */ @@ -1006,7 +1003,7 @@ vdev_uberblock_sync_done(zio_t *zio) atomic_add_64(good_writes, 1); } -/* +/** * Write the uberblock to all labels of all leaves of the specified vdev. */ static void @@ -1039,6 +1036,9 @@ vdev_uberblock_sync(zio_t *zio, uberbloc zio_buf_free(ubbuf, VDEV_UBERBLOCK_SIZE(vd)); } +/** + * Sync the uberblocks to all vdevs in svd[] + */ int vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags) { @@ -1068,7 +1068,7 @@ vdev_uberblock_sync_list(vdev_t **svd, i return (good_writes >= 1 ? 0 : EIO); } -/* +/** * On success, increment the count of good writes for our top-level vdev. */ static void @@ -1080,7 +1080,7 @@ vdev_label_sync_done(zio_t *zio) atomic_add_64(good_writes, 1); } -/* +/** * If there weren't enough good writes, indicate failure to the parent. */ static void @@ -1094,7 +1094,7 @@ vdev_label_sync_top_done(zio_t *zio) kmem_free(good_writes, sizeof (uint64_t)); } -/* +/** * We ignore errors for log and cache devices, simply free the private data. */ static void @@ -1103,7 +1103,7 @@ vdev_label_sync_ignore_done(zio_t *zio) kmem_free(zio->io_private, sizeof (uint64_t)); } -/* +/** * Write all even or odd labels to all leaves of the specified vdev. */ static void @@ -1190,7 +1190,7 @@ vdev_label_sync_list(spa_t *spa, int l, return (error); } -/* +/** * Sync the uberblock and any changes to the vdev configuration. * * The order of operations is carefully crafted to ensure that diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_mirror.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_mirror.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_mirror.c 2012-11-16 11:07:22.190455890 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_mirror.c 2012-11-15 18:17:57.326455668 -0700 @@ -33,7 +33,8 @@ #include #include -/* +/** + * \file vdev_mirror.c * Virtual device vector for mirroring. */ @@ -208,7 +209,7 @@ vdev_mirror_scrub_done(zio_t *zio) mc->mc_skipped = 0; } -/* +/** * Try to find a child whose DTL doesn't contain the block we want to read. * If we can't, try the read on any vdev we haven't already tried. */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_missing.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_missing.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_missing.c 2012-10-17 17:00:59.876591144 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_missing.c 2012-11-14 13:17:35.674114010 -0700 @@ -27,7 +27,10 @@ * Copyright (c) 2012 by Delphix. All rights reserved. */ -/* +/** + * \file vdev_missing.c + * Routines for handling the special "missing" vdev + * * The 'missing' vdev is a special vdev type used only during import. It * signifies a placeholder in the root vdev for some vdev that we know is * missing. We pass it down to the kernel to allow the rest of the diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_queue.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_queue.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_queue.c 2012-10-17 17:00:59.876591144 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_queue.c 2012-11-14 23:41:39.659455080 -0700 @@ -28,25 +28,29 @@ #include #include -/* - * These tunables are for performance analysis. +/* These tunables are for performance analysis. */ +/** + * \addtogroup tunables + * \{ */ -/* - * zfs_vdev_max_pending is the maximum number of i/os concurrently - * pending to each device. zfs_vdev_min_pending is the initial number - * of i/os pending to each device (before it starts ramping up to - * max_pending). +/** + * The maximum number of i/os concurrently pending to each device. */ int zfs_vdev_max_pending = 10; + +/** + * The initial number of i/os pending to each device (before it starts ramping + * up to max_pending). + */ int zfs_vdev_min_pending = 4; -/* deadline = pri + ddi_get_lbolt64() >> time_shift) */ +/** deadline = pri + ddi_get_lbolt64() >> time_shift) */ int zfs_vdev_time_shift = 6; -/* exponential I/O issue ramp-up rate */ +/** exponential I/O issue ramp-up rate */ int zfs_vdev_ramp_rate = 2; -/* +/** * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O. * For read I/Os, we also aggregate across small adjacency gaps; for writes * we include spans of optional I/Os to aid aggregation at the disk even when @@ -55,6 +59,7 @@ int zfs_vdev_ramp_rate = 2; int zfs_vdev_aggregation_limit = SPA_MAXBLOCKSIZE; int zfs_vdev_read_gap_limit = 32 << 10; int zfs_vdev_write_gap_limit = 4 << 10; +/** \} */ SYSCTL_DECL(_vfs_zfs_vdev); TUNABLE_INT("vfs.zfs.vdev.max_pending", &zfs_vdev_max_pending); @@ -83,7 +88,7 @@ SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, writ &zfs_vdev_write_gap_limit, 0, "Acceptable gap between two writes being aggregated"); -/* +/** * Virtual device vector for disk I/O scheduling. */ int @@ -189,7 +194,7 @@ vdev_queue_agg_io_done(zio_t *aio) zio_buf_free(aio->io_data, aio->io_size); } -/* +/** * Compute the range spanned by two i/os, which is the endpoint of the last * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset). * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio); diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_raidz.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_raidz.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_raidz.c 2012-11-16 11:07:22.191456642 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_raidz.c 2012-11-15 18:17:57.338457260 -0700 @@ -32,9 +32,12 @@ #include #include -/* +/** + * \file vdev_raidz.c * Virtual device vector for RAID-Z. * + *

Encoding

+ * * This vdev supports single, double, and triple parity. For single parity, * we use a simple XOR of all the data columns. For double or triple parity, * we use a special case of Reed-Solomon coding. This extends the @@ -56,18 +59,20 @@ * integers mod 2^N. In our case we choose N=8 for GF(8) so that all elements * can be expressed with a single byte. Briefly, the operations on the * field are defined as follows: + * - addition (+) is represented by a bitwise XOR + * - subtraction (-) is therefore identical to addition: A + B = A - B + * - multiplication of A by 2 is defined by the following bitwise expression: * - * o addition (+) is represented by a bitwise XOR - * o subtraction (-) is therefore identical to addition: A + B = A - B - * o multiplication of A by 2 is defined by the following bitwise expression: - * (A * 2)_7 = A_6 - * (A * 2)_6 = A_5 - * (A * 2)_5 = A_4 - * (A * 2)_4 = A_3 + A_7 - * (A * 2)_3 = A_2 + A_7 - * (A * 2)_2 = A_1 + A_7 - * (A * 2)_1 = A_0 - * (A * 2)_0 = A_7 + * \verbatim + (A * 2)_7 = A_6 + (A * 2)_6 = A_5 + (A * 2)_5 = A_4 + (A * 2)_4 = A_3 + A_7 + (A * 2)_3 = A_2 + A_7 + (A * 2)_2 = A_1 + A_7 + (A * 2)_1 = A_0 + (A * 2)_0 = A_7 + \endverbatim * * In C, multiplying by 2 is therefore ((a << 1) ^ ((a & 0x80) ? 0x1d : 0)). * As an aside, this multiplication is derived from the error correcting @@ -83,11 +88,13 @@ * The up-to-three parity columns, P, Q, R over several data columns, * D_0, ... D_n-1, can be expressed by field operations: * - * P = D_0 + D_1 + ... + D_n-2 + D_n-1 - * Q = 2^n-1 * D_0 + 2^n-2 * D_1 + ... + 2^1 * D_n-2 + 2^0 * D_n-1 - * = ((...((D_0) * 2 + D_1) * 2 + ...) * 2 + D_n-2) * 2 + D_n-1 - * R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1 - * = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1 + * \verbatim + P = D_0 + D_1 + ... + D_n-2 + D_n-1 + Q = 2^n-1 * D_0 + 2^n-2 * D_1 + ... + 2^1 * D_n-2 + 2^0 * D_n-1 + = ((...((D_0) * 2 + D_1) * 2 + ...) * 2 + D_n-2) * 2 + D_n-1 + R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1 + = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1 + \endverbatim * * We chose 1, 2, and 4 as our generators because 1 corresponds to the trival * XOR operation, and 2 and 4 can be computed quickly and generate linearly- @@ -99,31 +106,31 @@ */ typedef struct raidz_col { - uint64_t rc_devidx; /* child device index for I/O */ - uint64_t rc_offset; /* device offset */ - uint64_t rc_size; /* I/O size */ - void *rc_data; /* I/O data */ - void *rc_gdata; /* used to store the "good" version */ - int rc_error; /* I/O error for this device */ - uint8_t rc_tried; /* Did we attempt this I/O column? */ - uint8_t rc_skipped; /* Did we skip this I/O column? */ + uint64_t rc_devidx; /**< child device index for I/O */ + uint64_t rc_offset; /**< device offset */ + uint64_t rc_size; /**< I/O size */ + void *rc_data; /**< I/O data */ + void *rc_gdata; /**< used to store the "good" version */ + int rc_error; /**< I/O error for this device */ + uint8_t rc_tried; /**< Did we attempt this I/O column? */ + uint8_t rc_skipped; /**< Did we skip this I/O column? */ } raidz_col_t; typedef struct raidz_map { - uint64_t rm_cols; /* Regular column count */ - uint64_t rm_scols; /* Count including skipped columns */ - uint64_t rm_bigcols; /* Number of oversized columns */ - uint64_t rm_asize; /* Actual total I/O size */ - uint64_t rm_missingdata; /* Count of missing data devices */ - uint64_t rm_missingparity; /* Count of missing parity devices */ - uint64_t rm_firstdatacol; /* First data column/parity count */ - uint64_t rm_nskip; /* Skipped sectors for padding */ - uint64_t rm_skipstart; /* Column index of padding start */ - void *rm_datacopy; /* rm_asize-buffer of copied data */ - uintptr_t rm_reports; /* # of referencing checksum reports */ - uint8_t rm_freed; /* map no longer has referencing ZIO */ - uint8_t rm_ecksuminjected; /* checksum error was injected */ - raidz_col_t rm_col[1]; /* Flexible array of I/O columns */ + uint64_t rm_cols; /**< Regular column count */ + uint64_t rm_scols; /**< Count including skipped columns */ + uint64_t rm_bigcols; /**< Number of oversized columns */ + uint64_t rm_asize; /**< Actual total I/O size */ + uint64_t rm_missingdata; /**< Count of missing data devices */ + uint64_t rm_missingparity; /**< Count of missing parity devices */ + uint64_t rm_firstdatacol; /**< First data column/parity count */ + uint64_t rm_nskip; /**< Skipped sectors for padding */ + uint64_t rm_skipstart; /**< Column index of padding start */ + void *rm_datacopy; /**< rm_asize-buffer of copied data */ + uintptr_t rm_reports; /**< # of referencing checksum reports*/ + uint8_t rm_freed; /**< map no longer has referencing ZIO*/ + uint8_t rm_ecksuminjected; /**< checksum error was injected */ + raidz_col_t rm_col[1]; /**< Flexible array of I/O columns */ } raidz_map_t; #define VDEV_RAIDZ_P 0 @@ -133,7 +140,7 @@ typedef struct raidz_map { #define VDEV_RAIDZ_MUL_2(x) (((x) << 1) ^ (((x) & 0x80) ? 0x1d : 0)) #define VDEV_RAIDZ_MUL_4(x) (VDEV_RAIDZ_MUL_2(VDEV_RAIDZ_MUL_2(x))) -/* +/** * We provide a mechanism to perform the field multiplication operation on a * 64-bit value all at once rather than a byte at a time. This works by * creating a mask from the top bit in each byte and using that to @@ -153,15 +160,12 @@ typedef struct raidz_map { VDEV_RAIDZ_64MUL_2((x), mask); \ } -/* +/** * Force reconstruction to use the general purpose method. */ int vdev_raidz_default_to_general; -/* - * These two tables represent powers and logs of 2 in the Galois field defined - * above. These values were computed by repeatedly multiplying by 2 as above. - */ +/** Powers of 2 in the Galois field defined above. */ static const uint8_t vdev_raidz_pow2[256] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1d, 0x3a, 0x74, 0xe8, 0xcd, 0x87, 0x13, 0x26, @@ -196,6 +200,7 @@ static const uint8_t vdev_raidz_pow2[256 0x2c, 0x58, 0xb0, 0x7d, 0xfa, 0xe9, 0xcf, 0x83, 0x1b, 0x36, 0x6c, 0xd8, 0xad, 0x47, 0x8e, 0x01 }; +/** Logs of 2 in the Galois field defined above. */ static const uint8_t vdev_raidz_log2[256] = { 0x00, 0x00, 0x01, 0x19, 0x02, 0x32, 0x1a, 0xc6, 0x03, 0xdf, 0x33, 0xee, 0x1b, 0x68, 0xc7, 0x4b, @@ -233,7 +238,7 @@ static const uint8_t vdev_raidz_log2[256 static void vdev_raidz_generate_parity(raidz_map_t *rm); -/* +/** * Multiply a given number by 2 raised to the given power. */ static uint8_t @@ -375,7 +380,7 @@ vdev_raidz_cksum_finish(zio_cksum_report zfs_ereport_finish_checksum(zcr, good, bad, B_TRUE); } -/* +/** * Invoked indirectly by zfs_ereport_start_checksum(), called * below when our read operation fails completely. The main point * is to keep a copy of everything we read from disk, so that at @@ -433,12 +438,18 @@ static const zio_vsd_ops_t vdev_raidz_vs vdev_raidz_cksum_report }; +/** + * Divides the IO evenly across all child vdevs + * + * \param[in] dcols Usually, the number of children in the target vdev + */ static raidz_map_t * vdev_raidz_map_alloc(zio_t *zio, uint64_t unit_shift, uint64_t dcols, uint64_t nparity) { raidz_map_t *rm; uint64_t b = zio->io_offset >> unit_shift; + /* The zio's size in units of the vdev's preferred sector size */ uint64_t s = zio->io_size >> unit_shift; uint64_t f = b % dcols; uint64_t o = (b / dcols) << unit_shift; @@ -701,7 +712,7 @@ vdev_raidz_generate_parity_pqr(raidz_map } } -/* +/** * Generate RAID parity in the first virtual columns according to the number of * parity columns available. */ @@ -911,38 +922,45 @@ vdev_raidz_reconstruct_pq(raidz_map_t *r return ((1 << VDEV_RAIDZ_P) | (1 << VDEV_RAIDZ_Q)); } -/* BEGIN CSTYLED */ -/* +/** + * \file vdev_raidz.c + * + *

Reconstruction

+ * * In the general case of reconstruction, we must solve the system of linear * equations defined by the coeffecients used to generate parity as well as * the contents of the data and parity disks. This can be expressed with * vectors for the original data (D) and the actual data (d) and parity (p) * and a matrix composed of the identity matrix (I) and a dispersal matrix (V): * - * __ __ __ __ - * | | __ __ | p_0 | - * | V | | D_0 | | p_m-1 | - * | | x | : | = | d_0 | - * | I | | D_n-1 | | : | - * | | ~~ ~~ | d_n-1 | - * ~~ ~~ ~~ ~~ + * \verbatim + __ __ __ __ + | | __ __ | p_0 | + | V | | D_0 | | p_m-1 | + | | x | : | = | d_0 | + | I | | D_n-1 | | : | + | | ~~ ~~ | d_n-1 | + ~~ ~~ ~~ ~~ + \endverbatim * * I is simply a square identity matrix of size n, and V is a vandermonde * matrix defined by the coeffecients we chose for the various parity columns * (1, 2, 4). Note that these values were chosen both for simplicity, speedy * computation as well as linear separability. * - * __ __ __ __ - * | 1 .. 1 1 1 | | p_0 | - * | 2^n-1 .. 4 2 1 | __ __ | : | - * | 4^n-1 .. 16 4 1 | | D_0 | | p_m-1 | - * | 1 .. 0 0 0 | | D_1 | | d_0 | - * | 0 .. 0 0 0 | x | D_2 | = | d_1 | - * | : : : : | | : | | d_2 | - * | 0 .. 1 0 0 | | D_n-1 | | : | - * | 0 .. 0 1 0 | ~~ ~~ | : | - * | 0 .. 0 0 1 | | d_n-1 | - * ~~ ~~ ~~ ~~ + * \verbatim + __ __ __ __ + | 1 .. 1 1 1 | | p_0 | + | 2^n-1 .. 4 2 1 | __ __ | : | + | 4^n-1 .. 16 4 1 | | D_0 | | p_m-1 | + | 1 .. 0 0 0 | | D_1 | | d_0 | + | 0 .. 0 0 0 | x | D_2 | = | d_1 | + | : : : : | | : | | d_2 | + | 0 .. 1 0 0 | | D_n-1 | | : | + | 0 .. 0 1 0 | ~~ ~~ | : | + | 0 .. 0 0 1 | | d_n-1 | + ~~ ~~ ~~ ~~ + \endverbatim * * Note that I, V, d, and p are known. To compute D, we must invert the * matrix and use the known data and parity values to reconstruct the unknown @@ -952,106 +970,110 @@ vdev_raidz_reconstruct_pq(raidz_map_t *r * to generate (V|I)' and (d|p)'. We can then generate the inverse of (V|I)' * using Gauss-Jordan elimination. In the example below we use m=3 parity * columns, n=8 data columns, with errors in d_1, d_2, and p_1: - * __ __ - * | 1 1 1 1 1 1 1 1 | - * | 128 64 32 16 8 4 2 1 | <-----+-+-- missing disks - * | 19 205 116 29 64 16 4 1 | / / - * | 1 0 0 0 0 0 0 0 | / / - * | 0 1 0 0 0 0 0 0 | <--' / - * (V|I) = | 0 0 1 0 0 0 0 0 | <---' - * | 0 0 0 1 0 0 0 0 | - * | 0 0 0 0 1 0 0 0 | - * | 0 0 0 0 0 1 0 0 | - * | 0 0 0 0 0 0 1 0 | - * | 0 0 0 0 0 0 0 1 | - * ~~ ~~ - * __ __ - * | 1 1 1 1 1 1 1 1 | - * | 128 64 32 16 8 4 2 1 | - * | 19 205 116 29 64 16 4 1 | - * | 1 0 0 0 0 0 0 0 | - * | 0 1 0 0 0 0 0 0 | - * (V|I)' = | 0 0 1 0 0 0 0 0 | - * | 0 0 0 1 0 0 0 0 | - * | 0 0 0 0 1 0 0 0 | - * | 0 0 0 0 0 1 0 0 | - * | 0 0 0 0 0 0 1 0 | - * | 0 0 0 0 0 0 0 1 | - * ~~ ~~ + * \verbatim + __ __ + | 1 1 1 1 1 1 1 1 | + | 128 64 32 16 8 4 2 1 | <-----+-+-- missing disks + | 19 205 116 29 64 16 4 1 | / / + | 1 0 0 0 0 0 0 0 | / / + | 0 1 0 0 0 0 0 0 | <--' / + (V|I) = | 0 0 1 0 0 0 0 0 | <---' + | 0 0 0 1 0 0 0 0 | + | 0 0 0 0 1 0 0 0 | + | 0 0 0 0 0 1 0 0 | + | 0 0 0 0 0 0 1 0 | + | 0 0 0 0 0 0 0 1 | + ~~ ~~ + __ __ + | 1 1 1 1 1 1 1 1 | + | 128 64 32 16 8 4 2 1 | + | 19 205 116 29 64 16 4 1 | + | 1 0 0 0 0 0 0 0 | + | 0 1 0 0 0 0 0 0 | + (V|I)' = | 0 0 1 0 0 0 0 0 | + | 0 0 0 1 0 0 0 0 | + | 0 0 0 0 1 0 0 0 | + | 0 0 0 0 0 1 0 0 | + | 0 0 0 0 0 0 1 0 | + | 0 0 0 0 0 0 0 1 | + ~~ ~~ + \endverbatim * * Here we employ Gauss-Jordan elimination to find the inverse of (V|I)'. We * have carefully chosen the seed values 1, 2, and 4 to ensure that this * matrix is not singular. - * __ __ - * | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 | - * | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 | - * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 | - * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 | - * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 | - * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 | - * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 | - * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 | - * ~~ ~~ - * __ __ - * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 | - * | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 | - * | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 | - * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 | - * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 | - * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 | - * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 | - * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 | - * ~~ ~~ - * __ __ - * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 | - * | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 | - * | 0 205 116 0 0 0 0 0 0 1 19 29 64 16 4 1 | - * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 | - * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 | - * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 | - * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 | - * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 | - * ~~ ~~ - * __ __ - * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 | - * | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 | - * | 0 0 185 0 0 0 0 0 205 1 222 208 141 221 201 204 | - * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 | - * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 | - * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 | - * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 | - * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 | - * ~~ ~~ - * __ __ - * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 | - * | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 | - * | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 | - * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 | - * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 | - * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 | - * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 | - * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 | - * ~~ ~~ - * __ __ - * | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 | - * | 0 1 0 0 0 0 0 0 167 100 5 41 159 169 217 208 | - * | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 | - * | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 | - * | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 | - * | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 | - * | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 | - * | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 | - * ~~ ~~ - * __ __ - * | 0 0 1 0 0 0 0 0 | - * | 167 100 5 41 159 169 217 208 | - * | 166 100 4 40 158 168 216 209 | - * (V|I)'^-1 = | 0 0 0 1 0 0 0 0 | - * | 0 0 0 0 1 0 0 0 | - * | 0 0 0 0 0 1 0 0 | - * | 0 0 0 0 0 0 1 0 | - * | 0 0 0 0 0 0 0 1 | - * ~~ ~~ + * \verbatim + __ __ + | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 | + | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 | + | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 | + | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 | + | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 | + | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 | + | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 | + | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 | + ~~ ~~ + __ __ + | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 | + | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 | + | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 | + | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 | + | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 | + | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 | + | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 | + | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 | + ~~ ~~ + __ __ + | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 | + | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 | + | 0 205 116 0 0 0 0 0 0 1 19 29 64 16 4 1 | + | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 | + | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 | + | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 | + | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 | + | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 | + ~~ ~~ + __ __ + | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 | + | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 | + | 0 0 185 0 0 0 0 0 205 1 222 208 141 221 201 204 | + | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 | + | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 | + | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 | + | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 | + | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 | + ~~ ~~ + __ __ + | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 | + | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 | + | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 | + | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 | + | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 | + | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 | + | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 | + | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 | + ~~ ~~ + __ __ + | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 | + | 0 1 0 0 0 0 0 0 167 100 5 41 159 169 217 208 | + | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 | + | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 | + | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 | + | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 | + | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 | + | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 | + ~~ ~~ + __ __ + | 0 0 1 0 0 0 0 0 | + | 167 100 5 41 159 169 217 208 | + | 166 100 4 40 158 168 216 209 | + (V|I)'^-1 = | 0 0 0 1 0 0 0 0 | + | 0 0 0 0 1 0 0 0 | + | 0 0 0 0 0 1 0 0 | + | 0 0 0 0 0 0 1 0 | + | 0 0 0 0 0 0 0 1 | + ~~ ~~ + \endverbatim * * We can then simply compute D = (V|I)'^-1 x (d|p)' to discover the values * of the missing data. @@ -1063,7 +1085,6 @@ vdev_raidz_reconstruct_pq(raidz_map_t *r * that reason, we only build the coefficients in the rows that correspond to * targeted columns. */ -/* END CSTYLED */ static void vdev_raidz_matrix_init(raidz_map_t *rm, int n, int nmap, int *map, @@ -1449,6 +1470,9 @@ vdev_raidz_reconstruct(raidz_map_t *rm, return (code); } +/** + * Called (via vector tables) by vdev_open. + */ static int vdev_raidz_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize, uint64_t *ashift) @@ -1518,6 +1542,9 @@ vdev_raidz_asize(vdev_t *vd, uint64_t ps return (asize); } +/** + * Record the completion of a column's child IO. + */ static void vdev_raidz_child_done(zio_t *zio) { @@ -1528,6 +1555,22 @@ vdev_raidz_child_done(zio_t *zio) rc->rc_skipped = 0; } +/** + * Start an IO operation on a RAIDZ VDev + * + * Outline: + * - For Write operations: + * -# Generate the parity data + * -# Send async write operations to each column (data and parity)'s VDev + * -# If the column skips any sectors for padding, generate dummy write + * operations for those areas to improve aggregation continuity. + * - For read operations: + * -# Send an async read operation to each data column's VDev to read the + * range of data required for zio. + * -# If this is a scrub or resilver operation, or if any of the data + * vdevs have had errors, then send async read operations to the parity + * columns' VDevs as well. + */ static int vdev_raidz_io_start(zio_t *zio) { @@ -1628,7 +1671,7 @@ vdev_raidz_io_start(zio_t *zio) } -/* +/** * Report a checksum error for a child of a RAID-Z device. */ static void @@ -1653,7 +1696,7 @@ raidz_checksum_error(zio_t *zio, raidz_c } } -/* +/** * We keep track of whether or not there were any injected errors, so that * any ereports we generate can note it. */ @@ -1670,8 +1713,8 @@ raidz_checksum_verify(zio_t *zio) return (ret); } -/* - * Generate the parity from the data columns. If we tried and were able to +/** + * Generate the parity from the data columns. If we tried and were able to * read the parity without error, verify that the generated parity matches the * data we read. If it doesn't, we fire off a checksum error. Return the * number such failures. @@ -1708,7 +1751,7 @@ raidz_parity_verify(zio_t *zio, raidz_ma return (ret); } -/* +/** * Keep statistics on all the ways that we used parity to correct data. */ static uint64_t raidz_corrected[1 << VDEV_RAIDZ_MAXPARITY]; @@ -1724,7 +1767,7 @@ vdev_raidz_worst_error(raidz_map_t *rm) return (error); } -/* +/** * Iterate over all combinations of bad data and attempt a reconstruction. * Note that the algorithm below is non-optimal because it doesn't take into * account how reconstruction is actually performed. For example, with @@ -1880,6 +1923,26 @@ done: return (ret); } +/** + * Complete an IO operation on a RAIDZ VDev + * + * Outline: + * - For write ops: + * -# Check for errors on the child IOs. + * -# Return, setting an error code if too few child VDevs were written + * to reconstruct the data later. + * - For read ops: + * -# Check for errors on the child IOs. + * -# If data errors occurred: + * -# Try to reassemble the data from the parity available. + * -# If we haven't yet read the parity drives, read them now. + * -# If all parity drives have been read but the data still doesn't + * reassemble with a correct checksum, then try combinatorial + * reconstruction. + * -# If that doesn't work, return an error. + * -# If there were unexpected errors or this is a resilver operation, + * rewrite the vdevs that had errors. + */ static void vdev_raidz_io_done(zio_t *zio) { @@ -1922,15 +1985,15 @@ vdev_raidz_io_done(zio_t *zio) } if (zio->io_type == ZIO_TYPE_WRITE) { - /* - * XXX -- for now, treat partial writes as a success. - * (If we couldn't write enough columns to reconstruct - * the data, the I/O failed. Otherwise, good enough.) - * - * Now that we support write reallocation, it would be better - * to treat partial failure as real failure unless there are - * no non-degraded top-level vdevs left, and not update DTLs - * if we intend to reallocate. + /** + * \todo For now, treat partial writes as a success. + * (If we couldn't write enough columns to + * reconstruct the data, the I/O failed. Otherwise, + * good enough.) Now that we support write reallocation, + * it would be better to treat partial failure as real + * failure unless there are no non-degraded top-level + * vdevs left, and not update DTLs if we intend to + * reallocate. */ /* XXPOLICY */ if (total_errors > rm->rm_firstdatacol) diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_root.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_root.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_root.c 2012-10-17 17:00:59.878590214 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/vdev_root.c 2012-10-17 16:12:43.142605287 -0600 @@ -33,11 +33,12 @@ #include #include -/* +/** + * \file vdev_root.c * Virtual device vector for the pool's root vdev. */ -/* +/** * We should be able to tolerate one failure with absolutely no damage * to our metadata. Two failures will take out space maps, a bunch of * indirect block trees, meta dnodes, dnodes, etc. Probably not a happy diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zap.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zap.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zap.c 2012-11-16 11:07:22.192457906 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zap.c 2012-11-15 18:17:57.344461832 -0700 @@ -23,9 +23,11 @@ * Copyright (c) 2012 by Delphix. All rights reserved. */ -/* +/** + * \file zap.c + * * This file contains the top half of the zfs directory structure - * implementation. The bottom half is in zap_leaf.c. + * implementation. The bottom half is in zap_leaf.c. * * The zdir is an extendable hash data structure. There is a table of * pointers to buckets (zap_t->zd_data->zd_leafs). The buckets are @@ -137,7 +139,7 @@ zap_tryupgradedir(zap_t *zap, dmu_tx_t * return (0); } -/* +/** * Generic routines for dealing with the pointer & cookie tables. */ @@ -300,7 +302,7 @@ zap_table_load(zap_t *zap, zap_table_phy return (err); } -/* +/** * Routines for growing the ptrtbl. */ @@ -748,7 +750,7 @@ fzap_check(zap_name_t *zn, uint64_t inte return (fzap_checksize(integer_size, num_integers)); } -/* +/** * Routines for manipulating attributes. */ int diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zap_leaf.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zap_leaf.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zap_leaf.c 2012-10-17 17:00:59.879589814 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zap_leaf.c 2012-11-15 17:24:08.324457420 -0700 @@ -22,7 +22,8 @@ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. */ -/* +/** + * \file zap_leaf.c * The 512-byte leaf is broken into 32 16-byte chunks. * chunk number n means l_chunk[n], even though the header precedes it. * the names are stored null-terminated. @@ -269,7 +270,10 @@ zap_leaf_array_free(zap_leaf_t *l, uint1 } } -/* array_len and buf_len are in integers, not bytes */ +/** + * \param array_len In units of integers, not bytes + * \param buf_len In units of integers, not bytes + */ static void zap_leaf_array_read(zap_leaf_t *l, uint16_t chunk, int array_int_len, int array_len, int buf_int_len, uint64_t buf_len, @@ -658,8 +662,9 @@ zap_entry_create(zap_leaf_t *l, zap_name return (0); } -/* +/** * Determine if there is another entry with the same normalized form. + * * For performance purposes, either zn or name must be provided (the * other can be NULL). Note, there usually won't be any hash * conflicts, in which case we don't need the concatenated/normalized @@ -784,7 +789,7 @@ zap_leaf_transfer_entry(zap_leaf_t *l, i nl->l_phys->l_hdr.lh_nentries++; } -/* +/** * Transfer the entries whose hash prefix ends in 1 to the new leaf. */ void diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zap_micro.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zap_micro.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zap_micro.c 2012-11-16 11:07:22.193456035 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zap_micro.c 2012-11-15 18:17:57.361451565 -0700 @@ -721,7 +721,7 @@ zap_count(objset_t *os, uint64_t zapobj, return (err); } -/* +/** * zn may be NULL; if not specified, it will be computed if needed. * See also the comment above zap_entry_normalization_conflict(). */ @@ -762,7 +762,7 @@ again: return (B_FALSE); } -/* +/** * Routines for manipulating attributes. */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_acl.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_acl.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_acl.c 2012-10-17 17:00:59.882590724 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_acl.c 2012-11-16 14:47:07.036454551 -0700 @@ -319,12 +319,15 @@ static acl_ops_t zfs_acl_fuid_ops = { zfs_ace_fuid_data }; -/* - * The following three functions are provided for compatibility with - * older ZPL version in order to determine if the file use to have - * an external ACL and what version of ACL previously existed on the - * file. Would really be nice to not need this, sigh. +/** + * \name External ACL (Old ZPL Version) Compatibility + * + * Provided for compatibility with older ZPL versions in order to determine if + * the file used to have an external ACL and what version of ACL previously + * existed on the file. Would really be nice to not need this, sigh. + * \{ */ + uint64_t zfs_external_acl(znode_t *zp) { @@ -356,7 +359,7 @@ zfs_external_acl(znode_t *zp) } } -/* +/** * Determine size of ACL in bytes * * This is more complicated than it should be since we have to deal @@ -429,6 +432,7 @@ zfs_znode_acl_version(znode_t *zp) } } } +/** \} */ static int zfs_acl_version(int version) @@ -647,8 +651,9 @@ zfs_acl_curr_node(zfs_acl_t *aclp) return (aclp->z_curr_node); } -/* +/** * Copy ACE to internal ZFS format. + * * While processing the ACL each ACE will be validated for correctness. * ACE FUIDs will be created later. */ @@ -711,7 +716,7 @@ zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vt return (0); } -/* +/** * Copy ZFS ACEs to fixed size ace_t layout */ static void @@ -794,8 +799,8 @@ zfs_copy_ace_2_oldace(vtype_t obj_type, return (0); } -/* - * convert old ACL format to new +/** + * Convert old ACL format to new */ void zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr) @@ -850,7 +855,7 @@ zfs_acl_xform(znode_t *zp, zfs_acl_t *ac } -/* +/** * Convert unix access mask to v4 access mask */ static uint32_t @@ -881,7 +886,7 @@ zfs_set_ace(zfs_acl_t *aclp, void *acep, aclp->z_ops.ace_who_set(acep, fuid); } -/* +/** * Determine mode of file based on ACL. * Also, create FUIDs for any User/Group ACEs */ @@ -1052,7 +1057,7 @@ zfs_mode_compute(uint64_t fmode, zfs_acl return (mode); } -/* +/** * Read an external acl object. If the intent is to modify, always * create a new acl and leave any cached acl in place. */ @@ -1169,8 +1174,8 @@ zfs_acl_chown_setattr(znode_t *zp) return (error); } -/* - * common code for setting ACLs. +/** + * Common code for setting ACLs. * * This function is called from zfs_mode_update, zfs_perm_init, and zfs_setacl. * zfs_setacl passes a non-NULL inherit pointer (ihp) to indicate that it's @@ -1460,8 +1465,8 @@ zfs_acl_chmod_setattr(znode_t *zp, zfs_a return (error); } -/* - * strip off write_owner and write_acl +/** + * Strip off write_owner and write_acl */ static void zfs_restricted_update(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, void *acep) @@ -1475,7 +1480,7 @@ zfs_restricted_update(zfsvfs_t *zfsvfs, } } -/* +/** * Should ACE be inherited? */ static int @@ -1491,8 +1496,8 @@ zfs_ace_can_use(vtype_t vtype, uint16_t return (0); } -/* - * inherit inheritable ACEs from parent +/** + * Inherit inheritable ACEs from parent */ static zfs_acl_t * zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp, @@ -1610,7 +1615,7 @@ zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_ return (aclp); } -/* +/** * Create file system object initial permissions * including inheritable ACEs. */ @@ -1744,7 +1749,7 @@ zfs_acl_ids_create(znode_t *dzp, int fla return (0); } -/* +/** * Free ACL and fuid_infop, but not the acl_ids structure */ void @@ -1765,7 +1770,7 @@ zfs_acl_ids_overquota(zfsvfs_t *zfsvfs, zfs_fuid_overquota(zfsvfs, B_TRUE, acl_ids->z_fgid)); } -/* +/** * Retrieve a files ACL */ int @@ -1920,8 +1925,8 @@ zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, vtype_ return (0); } -/* - * Set a files ACL +/** + * Set a file's ACL */ int zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr) @@ -2025,10 +2030,12 @@ done: return (error); } -/* +/** * Check accesses of interest (AoI) against attributes of the dataset - * such as read-only. Returns zero if no AoI conflict with dataset - * attributes, otherwise an appropriate errno is returned. + * such as read-only. + * + * \return zero if no AoI conflict with dataset attributes, + * otherwise an appropriate errno is returned. */ static int zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode) @@ -2076,7 +2083,7 @@ zfs_zaccess_dataset_check(znode_t *zp, u return (0); } -/* +/** * The primary usage of this function is to loop through all of the * ACEs in the znode, determining what accesses of interest (AoI) to * the caller are allowed or denied. The AoI are expressed as bits in @@ -2220,7 +2227,7 @@ zfs_zaccess_aces_check(znode_t *zp, uint return (0); } -/* +/** * Return true if any access whatsoever granted, we don't actually * care what access is granted. */ @@ -2352,8 +2359,9 @@ slow: return (error); } -/* +/** * Determine whether Access should be granted/denied. + * * The least priv subsytem is always consulted as a basic privilege * can define any form of access. */ @@ -2516,7 +2524,7 @@ zfs_zaccess(znode_t *zp, int mode, int f return (error); } -/* +/** * Translate traditional unix VREAD/VWRITE/VEXEC mode into * native ACL format and call zfs_zaccess() */ @@ -2526,7 +2534,7 @@ zfs_zaccess_rwx(znode_t *zp, mode_t mode return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr)); } -/* +/** * Access function for secpolicy_vnode_setattr */ int @@ -2555,7 +2563,7 @@ zfs_delete_final_check(znode_t *zp, znod return (error); } -/* +/** * Determine whether Access should be granted/deny, without * consulting least priv subsystem. * @@ -2563,32 +2571,36 @@ zfs_delete_final_check(znode_t *zp, znod * The following chart is the recommended NFSv4 enforcement for * ability to delete an object. * - * ------------------------------------------------------- - * | Parent Dir | Target Object Permissions | - * | permissions | | - * ------------------------------------------------------- - * | | ACL Allows | ACL Denies| Delete | - * | | Delete | Delete | unspecified| - * ------------------------------------------------------- - * | ACL Allows | Permit | Permit | Permit | - * | DELETE_CHILD | | - * ------------------------------------------------------- - * | ACL Denies | Permit | Deny | Deny | - * | DELETE_CHILD | | | | - * ------------------------------------------------------- - * | ACL specifies | | | | - * | only allow | Permit | Permit | Permit | - * | write and | | | | - * | execute | | | | - * ------------------------------------------------------- - * | ACL denies | | | | - * | write and | Permit | Deny | Deny | - * | execute | | | | - * ------------------------------------------------------- - * ^ - * | - * No search privilege, can't even look up file? + * \verbatim + ------------------------------------------------------- + | Parent Dir | Target Object Permissions | + | permissions | | + ------------------------------------------------------- + | | ACL Allows | ACL Denies| Delete | + | | Delete | Delete | unspecified| + ------------------------------------------------------- + | ACL Allows | Permit | Permit | Permit | + | DELETE_CHILD | | + ------------------------------------------------------- + | ACL Denies | Permit | Deny | Deny | + | DELETE_CHILD | | | | + ------------------------------------------------------- + | ACL specifies | | | | + | only allow | Permit | Permit | Permit | + | write and | | | | + | execute | | | | + ------------------------------------------------------- + | ACL denies | | | | + | write and | Permit | Deny | Deny | + | execute | | | | + ------------------------------------------------------- + ^ + | + No search privilege, can't even look up file? + \endverbatim * + * \note If the parent dir's ACL denies write and execute permissions, + * then it may be impossible to even lookup the file */ int zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr) diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_byteswap.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_byteswap.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_byteswap.c 2012-10-17 17:00:59.882590724 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_byteswap.c 2012-10-07 21:13:24.091589747 -0600 @@ -43,7 +43,7 @@ zfs_oldace_byteswap(ace_t *ace, int ace_ } } -/* +/** * swap ace_t and ace_oject_t */ void diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c 2012-11-16 11:07:22.195456387 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c 2012-11-15 18:17:57.368449464 -0700 @@ -24,7 +24,8 @@ * All rights reserved. */ -/* +/** + * \file zfs_ctldir.c * ZFS control directory (a.k.a. ".zfs") * * This directory provides a common location for all ZFS meta-objects. @@ -36,11 +37,13 @@ * this would take up a huge amount of space in /etc/mnttab. We have three * types of objects: * - * ctldir ------> snapshotdir -------> snapshot - * | - * | - * V - * mounted fs + \verbatim + ctldir ------> snapshotdir -------> snapshot + | + | + V + mounted fs + \endverbatim * * The 'snapshot' node contains just enough information to lookup '..' and act * as a mountpoint for the snapshot. Whenever we lookup a specific snapshot, we @@ -82,7 +85,7 @@ typedef struct zfsctl_node { gfs_dir_t zc_gfs_private; uint64_t zc_id; - timestruc_t zc_cmtime; /* ctime and mtime, always the same */ + timestruc_t zc_cmtime; /**< ctime and mtime, always the same */ } zfsctl_node_t; typedef struct zfsctl_snapdir { @@ -147,7 +150,7 @@ static gfs_opsvec_t zfsctl_opsvec[] = { }; #endif /* sun */ -/* +/** * Root directory elements. We only have two entries * snapshot and shares. */ @@ -162,7 +165,7 @@ static gfs_dirent_t zfsctl_root_entries[ sizeof (gfs_dirent_t)) + 1) -/* +/** * Initialize the various GFS pieces we'll need to create and manipulate .zfs * directories. This is called from the ZFS init routine, and initializes the * vnode ops vectors that we'll be using. @@ -212,7 +215,7 @@ zfsctl_is_node(vnode_t *vp) } -/* +/** * Return the inode number associated with the 'snapshot' or * 'shares' directory. */ @@ -230,7 +233,7 @@ zfsctl_root_inode_cb(vnode_t *vp, int in return (zfsvfs->z_shares_dir); } -/* +/** * Create the '.zfs' directory. This directory is cached as part of the VFS * structure. This results in a hold on the vfs_t. The code in zfs_umount() * therefore checks against a vfs_count of 2 instead of 1. This reference @@ -269,7 +272,7 @@ zfsctl_create(zfsvfs_t *zfsvfs) VOP_UNLOCK(vp, 0); } -/* +/** * Destroy the '.zfs' directory. Only called when the filesystem is unmounted. * There might still be more references if we were force unmounted, but only * new zfs_inactive() calls can occur and they don't reference .zfs @@ -281,7 +284,7 @@ zfsctl_destroy(zfsvfs_t *zfsvfs) zfsvfs->z_ctldir = NULL; } -/* +/** * Given a root znode, retrieve the associated .zfs directory. * Add a hold to the vnode and return it. */ @@ -293,7 +296,7 @@ zfsctl_root(znode_t *zp) return (zp->z_zfsvfs->z_ctldir); } -/* +/** * Common open routine. Disallow any write access. */ /* ARGSUSED */ @@ -308,7 +311,7 @@ zfsctl_common_open(struct vop_open_args return (0); } -/* +/** * Common close routine. Nothing to do here. */ /* ARGSUSED */ @@ -318,7 +321,7 @@ zfsctl_common_close(struct vop_close_arg return (0); } -/* +/** * Common access routine. Disallow writes. */ /* ARGSUSED */ @@ -348,7 +351,7 @@ zfsctl_common_access(ap) return (0); } -/* +/** * Common getattr function. Fill in basic information. */ static void @@ -464,7 +467,7 @@ zfsctl_common_reclaim(ap) return (0); } -/* +/** * .zfs inode namespace * * We need to generate unique inode numbers for all files and directories @@ -478,7 +481,7 @@ zfsctl_common_reclaim(ap) #define ZFSCTL_INO_SNAP(id) (id) -/* +/** * Get root directory attributes. */ /* ARGSUSED */ @@ -507,7 +510,7 @@ zfsctl_root_getattr(ap) return (0); } -/* +/** * Special case the handling of "..". */ /* ARGSUSED */ @@ -576,7 +579,7 @@ static const fs_operation_def_t zfsctl_t }; #endif /* sun */ -/* +/** * Special case the handling of "..". */ /* ARGSUSED */ @@ -858,7 +861,7 @@ zfsctl_snapdir_remove(vnode_t *dvp, char } #endif /* sun */ -/* +/** * This creates a snapshot under '.zfs/snapshot'. */ /* ARGSUSED */ @@ -910,7 +913,7 @@ zfsctl_freebsd_snapdir_mkdir(ap) ap->a_vpp, ap->a_cnp->cn_cred, NULL, 0, NULL)); } -/* +/** * Lookup entry point for the 'snapshot' directory. Try to open the * snapshot if it exist, creating the pseudo filesystem vnode as necessary. * Perform a mount of the associated dataset on top of the vnode. @@ -1202,12 +1205,13 @@ zfsctl_shares_readdir(ap) return (error); } -/* - * pvp is the '.zfs' directory (zfsctl_node_t). +/** * Creates vp, which is '.zfs/snapshot' (zfsctl_snapdir_t). * * This function is the callback to create a GFS vnode for '.zfs/snapshot' * when a lookup is performed on .zfs for "snapshot". + * + * \param pvp the '.zfs' directory (zfsctl_node_t) */ vnode_t * zfsctl_mknode_snapdir(vnode_t *pvp) @@ -1395,12 +1399,12 @@ static struct vop_vector zfsctl_ops_shar }; #endif /* !sun */ -/* - * pvp is the GFS vnode '.zfs/snapshot'. - * +/** * This creates a GFS node under '.zfs/snapshot' representing each * snapshot. This newly created GFS node is what we mount snapshot * vfs_t's ontop of. + * + * \param pvp the GFS vnode '.zfs/snapshot' */ static vnode_t * zfsctl_snapshot_mknode(vnode_t *pvp, uint64_t objset) @@ -1603,7 +1607,7 @@ zfsctl_snapshot_vptocnp(struct vop_vptoc return (error); } -/* +/** * These VP's should never see the light of day. They should always * be covered. */ @@ -1675,7 +1679,7 @@ zfsctl_lookup_objset(vfs_t *vfsp, uint64 return (error); } -/* +/** * Unmount any snapshots for the given filesystem. This is called from * zfs_umount() - if we have a ctldir, then go through and unmount all the * snapshots. diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_debug.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_debug.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_debug.c 2012-11-16 11:07:22.195456387 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_debug.c 2012-11-15 18:17:57.399456883 -0700 @@ -52,7 +52,7 @@ zfs_dbgmsg_fini(void) ASSERT0(zfs_dbgmsg_size); } -/* +/** * Print these messages by running: * echo ::zfs_dbgmsg | mdb -k * diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_dir.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_dir.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_dir.c 2012-11-16 11:07:22.196457395 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_dir.c 2012-11-15 18:17:57.404457951 -0700 @@ -56,7 +56,7 @@ #include #include -/* +/** * zfs_match_find() is used by zfs_dirent_lock() to peform zap lookups * of names after deciding which is the appropriate lookup interface. */ @@ -97,7 +97,7 @@ zfs_match_find(zfsvfs_t *zfsvfs, znode_t return (error); } -/* +/** * Lock a directory entry. A dirlock on protects that name * in dzp's directory zap object. As long as you hold a dirlock, you can * assume two things: (1) dzp cannot be reaped, and (2) no other thread @@ -328,7 +328,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, zn return (0); } -/* +/** * Unlock this directory entry and wake anyone who was waiting for it. */ void @@ -358,7 +358,7 @@ zfs_dirent_unlock(zfs_dirlock_t *dl) kmem_free(dl, sizeof (*dl) + dl->dl_namesize); } -/* +/** * Look up an entry in a directory. * * NOTE: '.' and '..' are handled as special cases because @@ -437,7 +437,7 @@ zfs_dirlook(znode_t *dzp, char *name, vn return (error); } -/* +/** * unlinked Set (formerly known as the "delete queue") Error Handling * * When dealing with the unlinked set, we dmu_tx_hold_zap(), but we @@ -515,7 +515,7 @@ zfs_unlinked_drain(zfsvfs_t *zfsvfs) zap_cursor_fini(&zc); } -/* +/** * Delete the entire contents of a directory. Return a count * of the number of entries that could not be deleted. If we encounter * an error, return a count of at least one so that the directory stays @@ -697,7 +697,7 @@ zfs_dirent(znode_t *zp, uint64_t mode) return (de); } -/* +/** * Link zp into dl. Can only fail if zp has been unlinked. */ int @@ -795,7 +795,7 @@ zfs_dropname(zfs_dirlock_t *dl, znode_t return (error); } -/* +/** * Unlink zp from dl, and mark zp for deletion if this was the last link. * Can fail if zp is a mount point (EBUSY) or a non-empty directory (EEXIST). * If 'unlinkedp' is NULL, we put unlinked znodes on the unlinked list. @@ -905,7 +905,7 @@ zfs_link_destroy(zfs_dirlock_t *dl, znod return (0); } -/* +/** * Indicate whether the directory is empty. Works with or without z_lock * held, but can only be consider a hint in the latter case. Returns true * if only "." and ".." remain and there's no work in progress. @@ -991,7 +991,7 @@ top: return (0); } -/* +/** * Return a znode for the extended attribute directory for zp. * ** If the directory does not already exist, it is created ** * @@ -1066,7 +1066,7 @@ top: return (error); } -/* +/** * Decide whether it is okay to remove within a sticky directory. * * In sticky directories, write access is not sufficient; diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fm.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fm.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fm.c 2012-10-17 17:00:59.884591459 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fm.c 2012-11-15 12:38:38.318455830 -0700 @@ -39,16 +39,16 @@ #include #include -/* +/** * This general routine is responsible for generating all the different ZFS * ereports. The payload is dependent on the class, and which arguments are * supplied to the function: * - * EREPORT POOL VDEV IO - * block X X X - * data X X - * device X X - * pool X + * EREPORT POOL VDEV IO + * block X X X + * data X X + * device X X + * pool X * * If we are in a loading state, all errors are chained together by the same * SPA-wide ENA (Error Numeric Association). @@ -60,18 +60,18 @@ * layered diagram: * * +---------------+ - * | Aggregate I/O | No associated logical data or device - * +---------------+ + * | Aggregate I/O | No associated logical data or device + * +---------------+ * | * V - * +---------------+ Reads associated with a piece of logical data. - * | Read I/O | This includes reads on behalf of RAID-Z, - * +---------------+ mirrors, gang blocks, retries, etc. + * +---------------+ Reads associated with a piece of logical data. + * | Read I/O | This includes reads on behalf of RAID-Z, + * +---------------+ mirrors, gang blocks, retries, etc. * | * V - * +---------------+ Reads associated with a particular device, but - * | Physical I/O | no logical data. Issued as part of vdev caching - * +---------------+ and I/O aggregation. + * +---------------+ Reads associated with a particular device, but + * | Physical I/O | no logical data. Issued as part of vdev caching + * +---------------+ and I/O aggregation. * * Note that 'physical I/O' here is not the same terminology as used in the rest * of ZIO. Typically, 'physical I/O' simply means that there is no attached @@ -403,7 +403,7 @@ update_histogram(uint64_t value_arg, uin *count += bits; } -/* +/** * We've now filled up the range array, and need to increase "mingap" and * shrink the range list accordingly. zei_mingap is always the smallest * distance between array entries, so we set the new_allowed_gap to be @@ -835,7 +835,7 @@ zfs_post_common(spa_t *spa, vdev_t *vd, #endif } -/* +/** * The 'resource.fs.zfs.removed' event is an internal signal that the given vdev * has been removed from the system. This will cause the DE to ignore any * recent I/O errors, inferring that they are due to the asynchronous device @@ -847,7 +847,7 @@ zfs_post_remove(spa_t *spa, vdev_t *vd) zfs_post_common(spa, vd, FM_RESOURCE_REMOVED); } -/* +/** * The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool * has the 'autoreplace' property set, and therefore any broken vdevs will be * handled by higher level logic, and no vdev fault should be generated. @@ -858,7 +858,7 @@ zfs_post_autoreplace(spa_t *spa, vdev_t zfs_post_common(spa, vd, FM_RESOURCE_AUTOREPLACE); } -/* +/** * The 'resource.fs.zfs.statechange' event is an internal signal that the * given vdev has transitioned its state to DEGRADED or HEALTHY. This will * cause the retire agent to repair any outstanding fault management cases diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fuid.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fuid.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fuid.c 2012-10-17 17:00:59.885591762 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_fuid.c 2012-11-15 12:40:34.553457351 -0700 @@ -36,7 +36,8 @@ #endif #include -/* +/** + * \file zfs_fuid.c * FUID Domain table(s). * * The FUID table is stored as a packed nvlist of an array @@ -65,7 +66,7 @@ typedef struct fuid_domain { static char *nulldomain = ""; -/* +/** * Compare two indexes. */ static int @@ -81,7 +82,7 @@ idx_compare(const void *arg1, const void return (0); } -/* +/** * Compare two domain strings. */ static int @@ -106,7 +107,7 @@ zfs_fuid_avl_tree_create(avl_tree_t *idx sizeof (fuid_domain_t), offsetof(fuid_domain_t, f_domnode)); } -/* +/** * load initial fuid domain and idx trees. This function is used by * both the kernel and zdb. */ @@ -192,7 +193,7 @@ zfs_fuid_idx_domain(avl_tree_t *idx_tree } #ifdef _KERNEL -/* +/** * Load the fuid table(s) into memory. */ static void @@ -219,7 +220,7 @@ zfs_fuid_init(zfsvfs_t *zfsvfs) rw_exit(&zfsvfs->z_fuid_lock); } -/* +/** * sync out AVL trees to persistent storage. */ void @@ -289,7 +290,7 @@ zfs_fuid_sync(zfsvfs_t *zfsvfs, dmu_tx_t rw_exit(&zfsvfs->z_fuid_lock); } -/* +/** * Query domain table for a given domain. * * If domain isn't found and addok is set, it is added to AVL trees and @@ -356,10 +357,10 @@ retry: } } -/* +/** * Query domain table by index, returning domain string * - * Returns a pointer from an avl node of the domain string. + * \return A pointer from an avl node of the domain string. * */ const char * @@ -420,7 +421,7 @@ zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64 return (id); } -/* +/** * Add a FUID node to the list of fuid's being created for this * ACL * @@ -486,7 +487,7 @@ zfs_fuid_node_add(zfs_fuid_info_t **fuid } } -/* +/** * Create a file system FUID, based on information in the users cred * * If cred contains KSID_OWNER then it should be used to determine @@ -539,7 +540,7 @@ zfs_fuid_create_cred(zfsvfs_t *zfsvfs, z return (FUID_ENCODE(idx, rid)); } -/* +/** * Create a file system FUID for an ACL ace * or a chown/chgrp of the file. * This is similar to zfs_fuid_create_cred, except that @@ -647,7 +648,7 @@ zfs_fuid_destroy(zfsvfs_t *zfsvfs) rw_exit(&zfsvfs->z_fuid_lock); } -/* +/** * Allocate zfs_fuid_info for tracking FUIDs created during * zfs_mknode, VOP_SETATTR() or VOP_SETSECATTR() */ @@ -664,7 +665,7 @@ zfs_fuid_info_alloc(void) return (fuidp); } -/* +/** * Release all memory associated with zfs_fuid_info_t */ void @@ -690,7 +691,7 @@ zfs_fuid_info_free(zfs_fuid_info_t *fuid kmem_free(fuidp, sizeof (zfs_fuid_info_t)); } -/* +/** * Check to see if id is a groupmember. If cred * has ksid info then sidlist is checked first * and if still not found then POSIX groups are checked diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c 2012-11-16 11:07:22.199456836 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c 2012-11-16 22:22:26.858456321 -0700 @@ -112,7 +112,7 @@ typedef struct zfs_ioc_vec { boolean_t zvec_pool_check; } zfs_ioc_vec_t; -/* This array is indexed by zfs_userquota_prop_t */ +/** This array is indexed by zfs_userquota_prop_t */ static const char *userquota_perms[] = { ZFS_DELEG_PERM_USERUSED, ZFS_DELEG_PERM_USERQUOTA, @@ -193,7 +193,7 @@ history_str_get(zfs_cmd_t *zc) return (buf); } -/* +/** * Check to see if the named dataset is currently defined as bootable */ static boolean_t @@ -210,10 +210,8 @@ zfs_is_bootfs(const char *name) return (B_FALSE); } -/* - * zfs_earlier_version - * - * Return non-zero if the spa version is less than requested version. +/** + * \return non-zero if the spa version is less than requested version. */ static int zfs_earlier_version(const char *name, int version) @@ -230,10 +228,8 @@ zfs_earlier_version(const char *name, in return (0); } -/* - * zpl_earlier_version - * - * Return TRUE if the ZPL version is less than requested version. +/** + * \return TRUE if the ZPL version is less than requested version. */ static boolean_t zpl_earlier_version(const char *name, int version) @@ -273,7 +269,7 @@ zfs_log_history(zfs_cmd_t *zc) history_str_free(buf); } -/* +/** * Policy for top-level read operations (list pools). Requires no privileges, * and can be used in the local zone, as there is no associated dataset. */ @@ -284,7 +280,7 @@ zfs_secpolicy_none(zfs_cmd_t *zc, cred_t return (0); } -/* +/** * Policy for dataset read operations (list children, get statistics). Requires * no privileges, but must be visible in the local zone. */ @@ -409,10 +405,10 @@ zfs_secpolicy_write_perms_ds(const char } #ifdef SECLABEL -/* +/** * Policy for setting the security label property. * - * Returns 0 for success, non-zero for access and other errors. + * \return 0 for success, non-zero for access and other errors. */ static int zfs_set_slabel_policy(const char *name, char *strval, cred_t *cr) @@ -713,7 +709,7 @@ zfs_secpolicy_destroy(zfs_cmd_t *zc, cre return (zfs_secpolicy_destroy_perms(zc->zc_name, cr)); } -/* +/** * Destroying snapshots with delegated permissions requires * descendent mount and destroy permissions. */ @@ -898,7 +894,7 @@ zfs_secpolicy_umount(zfs_cmd_t *zc, cred return (error); } -/* +/** * Policy for pool operations - create/destroy pools, add vdevs, etc. Requires * SYS_CONFIG privilege, which is not available in a local zone. */ @@ -912,7 +908,7 @@ zfs_secpolicy_config(zfs_cmd_t *zc, cred return (0); } -/* +/** * Policy for object to name lookups. */ /* ARGSUSED */ @@ -928,7 +924,7 @@ zfs_secpolicy_diff(zfs_cmd_t *zc, cred_t return (error); } -/* +/** * Policy for fault injection. Requires all privileges. */ /* ARGSUSED */ @@ -1018,7 +1014,7 @@ zfs_secpolicy_release(zfs_cmd_t *zc, cre ZFS_DELEG_PERM_RELEASE, cr)); } -/* +/** * Policy for allowing temporary snapshots to be taken or released */ static int @@ -1045,7 +1041,7 @@ zfs_secpolicy_tmp_snapshot(zfs_cmd_t *zc return (error); } -/* +/** * Returns the nvlist as specified by the user in the zfs_cmd_t. */ static int @@ -1174,7 +1170,7 @@ getzfsvfs(const char *dsname, zfsvfs_t * return (error); } -/* +/** * Find a zfsvfs_t for a mounted filesystem, or create our own, in which * case its z_vfs will be NULL, and it will be opened as the owner. * If 'writer' is set, the z_teardown_lock will be held for RW_WRITER, @@ -1367,13 +1363,10 @@ zfs_ioc_pool_configs(zfs_cmd_t *zc) } /* - * inputs: - * zc_name name of the pool - * - * outputs: - * zc_cookie real errno - * zc_nvlist_dst config nvlist - * zc_nvlist_dst_size size of config nvlist + * \param[in] zc_name name of the pool + * \param[out] zc_cookie real errno + * \param[out] zc_nvlist_dst config nvlist + * \param[out] zc_nvlist_dst_size size of config nvlist */ static int zfs_ioc_pool_stats(zfs_cmd_t *zc) @@ -1402,7 +1395,7 @@ zfs_ioc_pool_stats(zfs_cmd_t *zc) return (ret); } -/* +/** * Try to import the given pool, returning pool stats as appropriate so that * user land knows which devices are available and overall pool health. */ @@ -1429,10 +1422,10 @@ zfs_ioc_pool_tryimport(zfs_cmd_t *zc) return (error); } -/* +/** * inputs: - * zc_name name of the pool - * zc_cookie scan func (pool_scan_func_t) + * - zc_name name of the pool + * - zc_cookie scan func (pool_scan_func_t) */ static int zfs_ioc_pool_scan(zfs_cmd_t *zc) @@ -1545,13 +1538,13 @@ zfs_ioc_dsobj_to_dsname(zfs_cmd_t *zc) return (0); } -/* +/** * inputs: - * zc_name name of filesystem - * zc_obj object to find + * - zc_name name of filesystem + * - zc_obj object to find * * outputs: - * zc_value name of object + * - zc_value name of object */ static int zfs_ioc_obj_to_path(zfs_cmd_t *zc) @@ -1573,14 +1566,14 @@ zfs_ioc_obj_to_path(zfs_cmd_t *zc) return (error); } -/* +/** * inputs: - * zc_name name of filesystem - * zc_obj object to find + * - zc_name name of filesystem + * - zc_obj object to find * * outputs: - * zc_stat stats on object - * zc_value path to object + * - zc_stat stats on object + * - zc_value path to object */ static int zfs_ioc_obj_to_stats(zfs_cmd_t *zc) @@ -1646,11 +1639,11 @@ zfs_ioc_vdev_add(zfs_cmd_t *zc) return (error); } -/* +/** * inputs: - * zc_name name of the pool - * zc_nvlist_conf nvlist of devices to remove - * zc_cookie to stop the remove? + * - zc_name name of the pool + * - zc_nvlist_conf nvlist of devices to remove + * - zc_cookie to stop the remove? */ static int zfs_ioc_vdev_remove(zfs_cmd_t *zc) @@ -1845,15 +1838,15 @@ zfs_ioc_objset_stats_impl(zfs_cmd_t *zc, return (error); } -/* +/** * inputs: - * zc_name name of filesystem - * zc_nvlist_dst_size size of buffer for property nvlist + * - zc_name name of filesystem + * - zc_nvlist_dst_size size of buffer for property nvlist * * outputs: - * zc_objset_stats stats - * zc_nvlist_dst property nvlist - * zc_nvlist_dst_size size of property nvlist + * - zc_objset_stats stats + * - zc_nvlist_dst property nvlist + * - zc_nvlist_dst_size size of property nvlist */ static int zfs_ioc_objset_stats(zfs_cmd_t *zc) @@ -1873,18 +1866,19 @@ zfs_ioc_objset_stats(zfs_cmd_t *zc) return (error); } -/* +/** + * Gets received properties (distinct from local properties on or after + * SPA_VERSION_RECVD_PROPS) for callers who want to differentiate received from + * local property values. + * * inputs: - * zc_name name of filesystem - * zc_nvlist_dst_size size of buffer for property nvlist + * - zc_name name of filesystem + * - zc_nvlist_dst_size size of buffer for property nvlist * * outputs: - * zc_nvlist_dst received property nvlist - * zc_nvlist_dst_size size of received property nvlist + * - zc_nvlist_dst received property nvlist + * - zc_nvlist_dst_size size of received property nvlist * - * Gets received properties (distinct from local properties on or after - * SPA_VERSION_RECVD_PROPS) for callers who want to differentiate received from - * local property values. */ static int zfs_ioc_objset_recvd_props(zfs_cmd_t *zc) @@ -1932,14 +1926,14 @@ nvl_add_zplprop(objset_t *os, nvlist_t * return (0); } -/* +/** * inputs: - * zc_name name of filesystem - * zc_nvlist_dst_size size of buffer for zpl property nvlist + * - zc_name name of filesystem + * - zc_nvlist_dst_size size of buffer for zpl property nvlist * * outputs: - * zc_nvlist_dst zpl property nvlist - * zc_nvlist_dst_size size of zpl property nvlist + * - zc_nvlist_dst zpl property nvlist + * - zc_nvlist_dst_size size of zpl property nvlist */ static int zfs_ioc_objset_zplprops(zfs_cmd_t *zc) @@ -1994,18 +1988,18 @@ dataset_name_hidden(const char *name) return (B_FALSE); } -/* +/** * inputs: - * zc_name name of filesystem - * zc_cookie zap cursor - * zc_nvlist_dst_size size of buffer for property nvlist + * - zc_name name of filesystem + * - zc_cookie zap cursor + * - zc_nvlist_dst_size size of buffer for property nvlist * * outputs: - * zc_name name of next filesystem - * zc_cookie zap cursor - * zc_objset_stats stats - * zc_nvlist_dst property nvlist - * zc_nvlist_dst_size size of property nvlist + * - zc_name name of next filesystem + * - zc_cookie zap cursor + * - zc_objset_stats stats + * - zc_nvlist_dst property nvlist + * - zc_nvlist_dst_size size of property nvlist */ static int zfs_ioc_dataset_list_next(zfs_cmd_t *zc) @@ -2065,7 +2059,7 @@ top: return (error); } -/* +/** * inputs: * zc_name name of filesystem * zc_cookie zap cursor @@ -2073,10 +2067,10 @@ top: * zc_simple when set, only name is requested * * outputs: - * zc_name name of next snapshot - * zc_objset_stats stats - * zc_nvlist_dst property nvlist - * zc_nvlist_dst_size size of property nvlist + * - zc_name name of next snapshot + * - zc_objset_stats stats + * - zc_nvlist_dst property nvlist + * - zc_nvlist_dst_size size of property nvlist */ static int zfs_ioc_snapshot_list_next(zfs_cmd_t *zc) @@ -2192,7 +2186,7 @@ zfs_prop_set_userquota(const char *dsnam return (err); } -/* +/** * If the named property is one that has a special function to set its value, * return 0 on success and a positive error code on failure; otherwise if it is * not one of the special properties handled by this function, return -1. @@ -2272,7 +2266,7 @@ zfs_prop_set_special(const char *dsname, return (err); } -/* +/** * This function is best effort. If it fails to set any of the given properties, * it continues to set as many as it can and returns the first error * encountered. If the caller provides a non-NULL errlist, it also gives the @@ -2445,7 +2439,7 @@ retry: return (rv); } -/* +/** * Check that all the properties are valid user properties. */ static int @@ -2512,15 +2506,15 @@ clear_received_props(objset_t *os, const return (err); } -/* +/** * inputs: - * zc_name name of filesystem - * zc_value name of property to set - * zc_nvlist_src{_size} nvlist of properties to apply - * zc_cookie received properties flag + * - zc_name name of filesystem + * - zc_value name of property to set + * - zc_nvlist_src{_size} nvlist of properties to apply + * - zc_cookie received properties flag * * outputs: - * zc_nvlist_dst{_size} error for each unapplied received property + * - zc_nvlist_dst{_size} error for each unapplied received property */ static int zfs_ioc_set_prop(zfs_cmd_t *zc) @@ -2563,11 +2557,11 @@ zfs_ioc_set_prop(zfs_cmd_t *zc) return (error); } -/* +/** * inputs: - * zc_name name of filesystem - * zc_value name of property to inherit - * zc_cookie revert to received value if TRUE + * - zc_name name of filesystem + * - zc_value name of property to inherit + * - zc_cookie revert to received value if TRUE * * outputs: none */ @@ -2714,11 +2708,11 @@ zfs_ioc_pool_get_props(zfs_cmd_t *zc) return (error); } -/* +/** * inputs: - * zc_name name of filesystem - * zc_nvlist_src{_size} nvlist of delegated permissions - * zc_perm_action allow/unallow flag + * - zc_name name of filesystem + * - zc_nvlist_src{_size} nvlist of delegated permissions + * - zc_perm_action allow/unallow flag * * outputs: none */ @@ -2764,12 +2758,12 @@ zfs_ioc_set_fsacl(zfs_cmd_t *zc) return (error); } -/* +/** * inputs: - * zc_name name of filesystem + * - zc_name name of filesystem * * outputs: - * zc_nvlist_src{_size} nvlist of delegated permissions + * - zc_nvlist_src{_size} nvlist of delegated permissions */ static int zfs_ioc_get_fsacl(zfs_cmd_t *zc) @@ -2785,7 +2779,7 @@ zfs_ioc_get_fsacl(zfs_cmd_t *zc) return (error); } -/* +/** * Search the vfs list for a specified resource. Returns a pointer to it * or NULL if no suitable entry is found. The caller of this routine * is responsible for releasing the returned vfs pointer. @@ -2817,17 +2811,7 @@ zfs_create_cb(objset_t *os, void *arg, c #define ZFS_PROP_UNDEFINED ((uint64_t)-1) -/* - * inputs: - * createprops list of properties requested by creator - * default_zplver zpl version to use if unspecified in createprops - * fuids_ok fuids allowed in this version of the spa? - * os parent objset pointer (NULL if root fs) - * - * outputs: - * zplprops values for the zplprops we attach to the master node object - * is_ci true if requested file system will be purely case-insensitive - * +/** * Determine the settings for utf8only, normalization and * casesensitivity. Specific values may have been requested by the * creator and/or we can inherit values from the parent dataset. If @@ -2836,6 +2820,15 @@ zfs_create_cb(objset_t *os, void *arg, c * setting is the default value. We don't actually want to create dsl * properties for these, so remove them from the source nvlist after * processing. + * + * \param[in] os parent objset pointer (NULL if root fs) + * \param[in] os zpl version to use if unspecified in createprops + * \param[in] os fuids allowed in this version of the spa? + * \param[in] os list of properties requested by creator + * \param[out] zplprops values for the zplprops we attach to the + * master node object + * \param[out] is_ci true if requested file system will be purely + * case-insensitive */ static int zfs_fill_zplprops_impl(objset_t *os, uint64_t zplver, @@ -2972,12 +2965,12 @@ zfs_fill_zplprops_root(uint64_t spa_vers return (error); } -/* +/** * inputs: - * zc_objset_type type of objset to create (fs vs zvol) - * zc_name name of new objset - * zc_value name of snapshot to clone from (may be empty) - * zc_nvlist_src{_size} nvlist of properties to apply + * - zc_objset_type type of objset to create (fs vs zvol) + * - zc_name name of new objset + * - zc_value name of snapshot to clone from (may be empty) + * - zc_nvlist_src{_size} nvlist of properties to apply * * outputs: none */ @@ -3117,15 +3110,15 @@ zfs_ioc_create(zfs_cmd_t *zc) return (error); } -/* +/** * inputs: - * zc_name name of filesystem - * zc_value short name of snapshot - * zc_cookie recursive flag - * zc_nvlist_src[_size] property list + * - zc_name name of filesystem + * - zc_value short name of snapshot + * - zc_cookie recursive flag + * - zc_nvlist_src[_size] property list * * outputs: - * zc_value short snapname (i.e. part after the '@') + * - zc_value short snapname (i.e. part after the '@') */ static int zfs_ioc_snapshot(zfs_cmd_t *zc) @@ -3193,14 +3186,14 @@ zfs_unmount_snap(const char *name, void return (0); } -/* +/** * inputs: - * zc_name name of filesystem, snaps must be under it - * zc_nvlist_src[_size] full names of snapshots to destroy - * zc_defer_destroy mark for deferred destroy + * - zc_name name of filesystem, snaps must be under it + * - zc_nvlist_src[_size] full names of snapshots to destroy + * - zc_defer_destroy mark for deferred destroy * * outputs: - * zc_name on failure, name of failed snapshot + * - zc_name on failure, name of failed snapshot */ static int zfs_ioc_destroy_snaps_nvl(zfs_cmd_t *zc) @@ -3254,11 +3247,11 @@ zfs_ioc_destroy_snaps_nvl(zfs_cmd_t *zc) return (err); } -/* +/** * inputs: - * zc_name name of dataset to destroy - * zc_objset_type type of objset - * zc_defer_destroy mark for deferred destroy + * - zc_name name of dataset to destroy + * - zc_objset_type type of objset + * - zc_defer_destroy mark for deferred destroy * * outputs: none */ @@ -3278,9 +3271,9 @@ zfs_ioc_destroy(zfs_cmd_t *zc) return (err); } -/* +/** * inputs: - * zc_name name of dataset to rollback (to most recent snapshot) + * - zc_name name of dataset to rollback (to most recent snapshot) * * outputs: none */ @@ -3362,11 +3355,11 @@ out: return (error); } -/* +/** * inputs: - * zc_name old name of dataset - * zc_value new name of dataset - * zc_cookie recursive flag (only valid for snapshots) + * - zc_name old name of dataset + * - zc_value new name of dataset + * - zc_cookie recursive flag (only valid for snapshots) * * outputs: none */ @@ -3524,7 +3517,7 @@ zfs_check_settable(const char *dsname, n return (zfs_secpolicy_setprop(dsname, prop, pair, CRED())); } -/* +/** * Removes properties from the given props list that fail permission checks * needed to clear them and to restore them in case of a receive error. For each * property, make sure we have both set and inherit permissions. @@ -3619,7 +3612,7 @@ propval_equals(nvpair_t *p1, nvpair_t *p } } -/* +/** * Remove properties from props if they are not going to change (as determined * by comparison with origprops). Remove them from origprops as well, since we * do not need to clear or restore properties that won't change. @@ -3656,23 +3649,23 @@ next: static boolean_t zfs_ioc_recv_inject_err; #endif -/* +/** * inputs: - * zc_name name of containing filesystem - * zc_nvlist_src{_size} nvlist of properties to apply - * zc_value name of snapshot to create - * zc_string name of clone origin (if DRR_FLAG_CLONE) - * zc_cookie file descriptor to recv from - * zc_begin_record the BEGIN record of the stream (not byteswapped) - * zc_guid force flag - * zc_cleanup_fd cleanup-on-exit file descriptor - * zc_action_handle handle for this guid/ds mapping (or zero on first call) + * - zc_name name of containing filesystem + * - zc_nvlist_src{_size} nvlist of properties to apply + * - zc_value name of snapshot to create + * - zc_string name of clone origin (if DRR_FLAG_CLONE) + * - zc_cookie file descriptor to recv from + * - zc_begin_record the BEGIN record of the stream (not byteswapped) + * - zc_guid force flag + * - zc_cleanup_fd cleanup-on-exit file descriptor + * - zc_action_handle handle for this guid/ds mapping (or zero on first call) * * outputs: - * zc_cookie number of bytes read - * zc_nvlist_dst{_size} error for each unapplied received property - * zc_obj zprop_errflags_t - * zc_action_handle handle for this guid/ds mapping + * - zc_cookie number of bytes read + * - zc_nvlist_dst{_size} error for each unapplied received property + * - zc_obj zprop_errflags_t + * - zc_action_handle handle for this guid/ds mapping */ static int zfs_ioc_recv(zfs_cmd_t *zc) @@ -3891,14 +3884,14 @@ out: return (error); } -/* +/** * inputs: - * zc_name name of snapshot to send - * zc_cookie file descriptor to send stream to - * zc_obj fromorigin flag (mutually exclusive with zc_fromobj) - * zc_sendobj objsetid of snapshot to send - * zc_fromobj objsetid of incremental fromsnap (may be zero) - * zc_guid if set, estimate size of stream only. zc_cookie is ignored. + * - zc_name name of snapshot to send + * - zc_cookie file descriptor to send stream to + * - zc_obj fromorigin flag (mutually exclusive with zc_fromobj) + * - zc_sendobj objsetid of snapshot to send + * - zc_fromobj objsetid of incremental fromsnap (may be zero) + * - zc_guid if set, estimate size of stream only. zc_cookie is ignored. * output size in zc_objset_type. * * outputs: none @@ -4184,13 +4177,14 @@ zfs_ioc_pool_reopen(zfs_cmd_t *zc) spa_close(spa, FTAG); return (0); } -/* + +/** * inputs: - * zc_name name of filesystem - * zc_value name of origin snapshot + * - zc_name name of filesystem + * - zc_value name of origin snapshot * * outputs: - * zc_string name of conflicting snapshot, if there is one + * - zc_string name of conflicting snapshot, if there is one */ static int zfs_ioc_promote(zfs_cmd_t *zc) @@ -4209,17 +4203,17 @@ zfs_ioc_promote(zfs_cmd_t *zc) return (dsl_dataset_promote(zc->zc_name, zc->zc_string)); } -/* +/** * Retrieve a single {user|group}{used|quota}@... property. * * inputs: - * zc_name name of filesystem - * zc_objset_type zfs_userquota_prop_t - * zc_value domain name (eg. "S-1-234-567-89") - * zc_guid RID/UID/GID + * - zc_name name of filesystem + * - zc_objset_type zfs_userquota_prop_t + * - zc_value domain name (eg. "S-1-234-567-89") + * - zc_guid RID/UID/GID * * outputs: - * zc_cookie property value + * - zc_cookie property value */ static int zfs_ioc_userspace_one(zfs_cmd_t *zc) @@ -4241,16 +4235,16 @@ zfs_ioc_userspace_one(zfs_cmd_t *zc) return (error); } -/* +/** * inputs: - * zc_name name of filesystem - * zc_cookie zap cursor - * zc_objset_type zfs_userquota_prop_t - * zc_nvlist_dst[_size] buffer to fill (not really an nvlist) + * - zc_name name of filesystem + * - zc_cookie zap cursor + * - zc_objset_type zfs_userquota_prop_t + * - zc_nvlist_dst[_size] buffer to fill (not really an nvlist) * * outputs: - * zc_nvlist_dst[_size] data buffer (array of zfs_useracct_t) - * zc_cookie zap cursor + * - zc_nvlist_dst[_size] data buffer (array of zfs_useracct_t) + * - zc_cookie zap cursor */ static int zfs_ioc_userspace_many(zfs_cmd_t *zc) @@ -4281,9 +4275,9 @@ zfs_ioc_userspace_many(zfs_cmd_t *zc) return (error); } -/* +/** * inputs: - * zc_name name of filesystem + * - zc_name name of filesystem * * outputs: * none @@ -4468,13 +4462,13 @@ ace_t full_access[] = { {(uid_t)-1, ACE_ALL_PERMS, ACE_EVERYONE, 0} }; -/* +/** * inputs: - * zc_name name of containing filesystem - * zc_obj object # beyond which we want next in-use object # + * - zc_name name of containing filesystem + * - zc_obj object # beyond which we want next in-use object # * * outputs: - * zc_obj next in-use object # + * - zc_obj next in-use object # */ static int zfs_ioc_next_obj(zfs_cmd_t *zc) @@ -4493,11 +4487,11 @@ zfs_ioc_next_obj(zfs_cmd_t *zc) return (error); } -/* +/** * inputs: - * zc_name name of filesystem - * zc_value prefix name for snapshot - * zc_cleanup_fd cleanup-on-exit file descriptor for calling process + * - zc_name name of filesystem + * - zc_value prefix name for snapshot + * - zc_cleanup_fd cleanup-on-exit file descriptor for calling process * * outputs: */ @@ -4527,14 +4521,14 @@ zfs_ioc_tmp_snapshot(zfs_cmd_t *zc) return (0); } -/* +/** * inputs: - * zc_name name of "to" snapshot - * zc_value name of "from" snapshot - * zc_cookie file descriptor to write diff data on + * - zc_name name of "to" snapshot + * - zc_value name of "from" snapshot + * - zc_cookie file descriptor to write diff data on * * outputs: - * dmu_diff_record_t's to the file descriptor + * - dmu_diff_record_t's to the file descriptor */ static int zfs_ioc_diff(zfs_cmd_t *zc) @@ -4576,7 +4570,7 @@ zfs_ioc_diff(zfs_cmd_t *zc) } #ifdef sun -/* +/** * Remove all ACL files in shares dir */ static int @@ -4730,16 +4724,16 @@ zfs_ioc_smb_acl(zfs_cmd_t *zc) #endif /* !sun */ } -/* +/** * inputs: - * zc_name name of filesystem - * zc_value short name of snap - * zc_string user-supplied tag for this hold - * zc_cookie recursive flag - * zc_temphold set if hold is temporary - * zc_cleanup_fd cleanup-on-exit file descriptor for calling process - * zc_sendobj if non-zero, the objid for zc_name@zc_value - * zc_createtxg if zc_sendobj is non-zero, snap must have zc_createtxg + * - zc_name name of filesystem + * - zc_value short name of snap + * - zc_string user-supplied tag for this hold + * - zc_cookie recursive flag + * - zc_temphold set if hold is temporary + * - zc_cleanup_fd cleanup-on-exit file descriptor for calling process + * - zc_sendobj if non-zero, the objid for zc_name@zc_value + * - zc_createtxg if zc_sendobj is non-zero, snap must have zc_createtxg * * outputs: none */ @@ -4809,12 +4803,12 @@ zfs_ioc_hold(zfs_cmd_t *zc) return (error); } -/* +/** * inputs: - * zc_name name of dataset from which we're releasing a user hold - * zc_value short name of snap - * zc_string user-supplied tag for this hold - * zc_cookie recursive flag + * - zc_name name of dataset from which we're releasing a user hold + * - zc_value short name of snap + * - zc_string user-supplied tag for this hold + * - zc_cookie recursive flag * * outputs: none */ @@ -4830,12 +4824,12 @@ zfs_ioc_release(zfs_cmd_t *zc) zc->zc_string, recursive)); } -/* +/** * inputs: - * zc_name name of filesystem + * - zc_name name of filesystem * * outputs: - * zc_nvlist_src{_size} nvlist of snapshot holds + * - zc_nvlist_src{_size} nvlist of snapshot holds */ static int zfs_ioc_get_holds(zfs_cmd_t *zc) @@ -4851,15 +4845,15 @@ zfs_ioc_get_holds(zfs_cmd_t *zc) return (error); } -/* +/** * inputs: - * zc_name name of new filesystem or snapshot - * zc_value full name of old snapshot + * - zc_name name of new filesystem or snapshot + * - zc_value full name of old snapshot * * outputs: - * zc_cookie space in bytes - * zc_objset_type compressed space in bytes - * zc_perm_action uncompressed space in bytes + * - zc_cookie space in bytes + * - zc_objset_type compressed space in bytes + * - zc_perm_action uncompressed space in bytes */ static int zfs_ioc_space_written(zfs_cmd_t *zc) @@ -4915,7 +4909,7 @@ zfs_ioc_space_snaps(zfs_cmd_t *zc) return (error); } -/* +/** * pool create, destroy, and export don't log the history as part of * zfsdev_ioctl, but rather zfs_ioc_pool_create, and zfs_ioc_pool_export * do the logging of those commands. @@ -5075,7 +5069,7 @@ pool_status_check(const char *name, zfs_ return (error); } -/* +/** * Find a free minor number. */ minor_t @@ -5338,39 +5332,39 @@ zfs_info(dev_info_t *dip, ddi_info_cmd_t */ #ifdef sun static struct cb_ops zfs_cb_ops = { - zfsdev_open, /* open */ - zfsdev_close, /* close */ - zvol_strategy, /* strategy */ - nodev, /* print */ - zvol_dump, /* dump */ - zvol_read, /* read */ - zvol_write, /* write */ - zfsdev_ioctl, /* ioctl */ - nodev, /* devmap */ - nodev, /* mmap */ - nodev, /* segmap */ - nochpoll, /* poll */ - ddi_prop_op, /* prop_op */ - NULL, /* streamtab */ - D_NEW | D_MP | D_64BIT, /* Driver compatibility flag */ - CB_REV, /* version */ - nodev, /* async read */ - nodev, /* async write */ + zfsdev_open, /**< open */ + zfsdev_close, /**< close */ + zvol_strategy, /**< strategy */ + nodev, /**< print */ + zvol_dump, /**< dump */ + zvol_read, /**< read */ + zvol_write, /**< write */ + zfsdev_ioctl, /**< ioctl */ + nodev, /**< devmap */ + nodev, /**< mmap */ + nodev, /**< segmap */ + nochpoll, /**< poll */ + ddi_prop_op, /**< prop_op */ + NULL, /**< streamtab */ + D_NEW | D_MP | D_64BIT, /**< Driver compatibility flag */ + CB_REV, /**< version */ + nodev, /**< async read */ + nodev, /**< async write */ }; static struct dev_ops zfs_dev_ops = { - DEVO_REV, /* version */ - 0, /* refcnt */ - zfs_info, /* info */ - nulldev, /* identify */ - nulldev, /* probe */ - zfs_attach, /* attach */ - zfs_detach, /* detach */ - nodev, /* reset */ - &zfs_cb_ops, /* driver operations */ - NULL, /* no bus operations */ - NULL, /* power */ - ddi_quiesce_not_needed, /* quiesce */ + DEVO_REV, /**< version */ + 0, /**< refcnt */ + zfs_info, /**< info */ + nulldev, /**< identify */ + nulldev, /**< probe */ + zfs_attach, /**< attach */ + zfs_detach, /**< detach */ + nodev, /**< reset */ + &zfs_cb_ops, /**< driver operations */ + NULL, /**< no bus operations */ + NULL, /**< power */ + ddi_quiesce_not_needed, /**< quiesce */ }; static struct modldrv zfs_modldrv = { diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_log.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_log.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_log.c 2012-10-17 17:00:59.889592912 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_log.c 2012-11-15 17:32:21.821456952 -0700 @@ -43,7 +43,10 @@ #include #include -/* +/** + * \file zfs_log.c + * Routines to manage intent log entries + * * These zfs_log_* functions must be called within a dmu tx, in one * of 2 contexts depending on zilog->z_replay: * @@ -101,7 +104,7 @@ zfs_log_create_txtype(zil_create_t type, return (TX_MAX_TYPE); } -/* +/** * build up the log data necessary for logging xvattr_t * First lr_attr_t is initialized. following the lr_attr_t * is the mapsize and attribute bitmap copied from the xvattr_t. @@ -215,10 +218,9 @@ zfs_log_fuid_domains(zfs_fuid_info_t *fu return (start); } -/* - * zfs_log_create() is used to handle TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, - * TX_MKDIR_ATTR and TX_MKXATTR - * transactions. +/** + * Handles TX_CREATE, TX_CREATE_ATTR, TX_MKDIR, TX_MKDIR_ATTR and + * TX_MKXATTR transactions. * * TX_CREATE and TX_MKDIR are standard creates, but they may have FUID * domain information appended prior to the name. In this case the @@ -345,8 +347,8 @@ zfs_log_create(zilog_t *zilog, dmu_tx_t zil_itx_assign(zilog, itx, tx); } -/* - * zfs_log_remove() handles both TX_REMOVE and TX_RMDIR transactions. +/** + * handles both TX_REMOVE and TX_RMDIR transactions. */ void zfs_log_remove(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, @@ -369,8 +371,8 @@ zfs_log_remove(zilog_t *zilog, dmu_tx_t zil_itx_assign(zilog, itx, tx); } -/* - * zfs_log_link() handles TX_LINK transactions. +/** + * handles TX_LINK transactions. */ void zfs_log_link(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, @@ -392,8 +394,8 @@ zfs_log_link(zilog_t *zilog, dmu_tx_t *t zil_itx_assign(zilog, itx, tx); } -/* - * zfs_log_symlink() handles TX_SYMLINK transactions. +/** + * handles TX_SYMLINK transactions. */ void zfs_log_symlink(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, @@ -424,8 +426,8 @@ zfs_log_symlink(zilog_t *zilog, dmu_tx_t zil_itx_assign(zilog, itx, tx); } -/* - * zfs_log_rename() handles TX_RENAME transactions. +/** + * handles TX_RENAME transactions. */ void zfs_log_rename(zilog_t *zilog, dmu_tx_t *tx, uint64_t txtype, @@ -450,8 +452,8 @@ zfs_log_rename(zilog_t *zilog, dmu_tx_t zil_itx_assign(zilog, itx, tx); } -/* - * zfs_log_write() handles TX_WRITE transactions. +/** + * handles TX_WRITE transactions. */ ssize_t zfs_immediate_write_sz = 32768; @@ -529,8 +531,8 @@ zfs_log_write(zilog_t *zilog, dmu_tx_t * } } -/* - * zfs_log_truncate() handles TX_TRUNCATE transactions. +/** + * handles TX_TRUNCATE transactions. */ void zfs_log_truncate(zilog_t *zilog, dmu_tx_t *tx, int txtype, @@ -552,8 +554,8 @@ zfs_log_truncate(zilog_t *zilog, dmu_tx_ zil_itx_assign(zilog, itx, tx); } -/* - * zfs_log_setattr() handles TX_SETATTR transactions. +/** + * handles TX_SETATTR transactions. */ void zfs_log_setattr(zilog_t *zilog, dmu_tx_t *tx, int txtype, @@ -614,8 +616,8 @@ zfs_log_setattr(zilog_t *zilog, dmu_tx_t zil_itx_assign(zilog, itx, tx); } -/* - * zfs_log_acl() handles TX_ACL transactions. +/** + * handles TX_ACL transactions. */ void zfs_log_acl(zilog_t *zilog, dmu_tx_t *tx, znode_t *zp, diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_onexit.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_onexit.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_onexit.c 2012-10-17 17:00:59.889592912 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_onexit.c 2012-11-15 14:25:09.745456161 -0700 @@ -32,7 +32,10 @@ #include #include -/* +/** + * \file zfs_onexit.c + * ZFS callback routines on process exit + * * ZFS kernel routines may add/delete callback routines to be invoked * upon process exit (triggered via the close operation from the /dev/zfs * driver). @@ -110,7 +113,7 @@ zfs_onexit_minor_to_state(minor_t minor, return (0); } -/* +/** * Consumers might need to operate by minor number instead of fd, since * they might be running in another thread (e.g. txg_sync_thread). Callers * of this function must call zfs_onexit_fd_rele() when they're finished @@ -145,7 +148,7 @@ zfs_onexit_fd_rele(int fd) releasef(fd); } -/* +/** * Add a callback to be invoked when the calling process exits. */ int @@ -192,8 +195,10 @@ zfs_onexit_find_cb(zfs_onexit_t *zo, uin return (ap); } -/* - * Delete the callback, triggering it first if 'fire' is set. +/** + * Delete the callback + * + * \param[in] fire trigger the callback if set */ int zfs_onexit_del_cb(minor_t minor, uint64_t action_handle, boolean_t fire) @@ -222,7 +227,7 @@ zfs_onexit_del_cb(minor_t minor, uint64_ return (error); } -/* +/** * Return the data associated with this callback. This allows consumers * of the cleanup-on-exit interfaces to stash kernel data across system * calls, knowing that it will be cleaned up if the calling process exits. diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_replay.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_replay.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_replay.c 2012-10-17 17:00:59.890593791 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_replay.c 2012-11-15 17:32:59.035458810 -0700 @@ -45,7 +45,8 @@ #include #include -/* +/** + * \file zfs_replay.c * Functions to replay ZFS intent log (ZIL) records * The functions are called through a function vector (zfs_replay_vector) * which is indexed by the transaction type. @@ -168,7 +169,7 @@ zfs_replay_fuid_domain_common(zfs_fuid_i return (start); } -/* +/** * Set the uid/gid in the fuid_info structure. */ static void @@ -185,7 +186,7 @@ zfs_replay_fuid_ugid(zfs_fuid_info_t *fu fuid_infop->z_fuid_group = gid; } -/* +/** * Load fuid domains into fuid_info_t */ static zfs_fuid_info_t * @@ -212,7 +213,7 @@ zfs_replay_fuid_domain(void *buf, void * return (fuid_infop); } -/* +/** * load zfs_fuid_t's and fuid_domains into fuid_info_t */ static zfs_fuid_info_t * @@ -259,7 +260,7 @@ zfs_replay_swap_attrs(lr_attr_t *lrattr) (lrattr->lr_attr_masksize - 1)), 3 * sizeof (uint64_t)); } -/* +/** * Replay file create with optional ACL, xvattr information as well * as option FUID information. */ @@ -765,7 +766,7 @@ zfs_replay_write(zfsvfs_t *zfsvfs, lr_wr return (error); } -/* +/** * TX_WRITE2 are only generated when dmu_sync() returns EALREADY * meaning the pool block is already being synced. So now that we always write * out full blocks, all we have to do is expand the eof if @@ -941,7 +942,7 @@ zfs_replay_acl_v0(zfsvfs_t *zfsvfs, lr_a return (error); } -/* +/** * Replaying ACLs is complicated by FUID support. * The log record may contain some optional data * to be used for replaying FUID's. These pieces @@ -1007,7 +1008,7 @@ zfs_replay_acl(zfsvfs_t *zfsvfs, lr_acl_ return (error); } -/* +/** * Callback vectors for replaying records */ zil_replay_func_t *zfs_replay_vector[TX_MAX_TYPE] = { diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_rlock.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_rlock.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_rlock.c 2012-11-16 11:07:22.200457077 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_rlock.c 2012-11-15 18:17:57.440456199 -0700 @@ -26,9 +26,12 @@ * Copyright (c) 2012 by Delphix. All rights reserved. */ -/* +/** + * \file zfs_rlock.c + * File Range Locking for ZFS + * * This file contains the code to implement file range locking in - * ZFS, although there isn't much specific to ZFS (all that comes to mind + * ZFS, although there isn't much specific to ZFS (all that comes to mind is * support for growing the blocksize). * * Interface @@ -97,7 +100,7 @@ #include -/* +/** * Check if a write lock can be grabbed, or wait and recheck until available. */ static void @@ -183,7 +186,7 @@ wait: } } -/* +/** * If this is an original (non-proxy) lock then replace it by * a proxy and return the proxy. */ @@ -215,7 +218,7 @@ zfs_range_proxify(avl_tree_t *tree, rl_t return (proxy); } -/* +/** * Split the range lock at the supplied offset * returning the *front* proxy. */ @@ -247,7 +250,7 @@ zfs_range_split(avl_tree_t *tree, rl_t * return (front); } -/* +/** * Create and add a new proxy range lock for the supplied range. */ static void @@ -349,7 +352,7 @@ zfs_range_add_reader(avl_tree_t *tree, r (off + len) - (prev->r_off + prev->r_len)); } -/* +/** * Check if a reader lock can be grabbed, or wait and recheck until available. */ static void @@ -416,12 +419,6 @@ got_lock: zfs_range_add_reader(tree, new, prev, where); } -/* - * Lock a range (offset, length) as either shared (RL_READER) - * or exclusive (RL_WRITER). Returns the range lock structure - * for later unlocking or reduce range (if entire file - * previously locked as RL_WRITER). - */ rl_t * zfs_range_lock(znode_t *zp, uint64_t off, uint64_t len, rl_type_t type) { @@ -456,7 +453,7 @@ zfs_range_lock(znode_t *zp, uint64_t off return (new); } -/* +/** * Unlock a reader lock */ static void @@ -523,9 +520,6 @@ zfs_range_unlock_reader(znode_t *zp, rl_ kmem_free(remove, sizeof (rl_t)); } -/* - * Unlock range and destroy range lock structure. - */ void zfs_range_unlock(rl_t *rl) { @@ -559,7 +553,7 @@ zfs_range_unlock(rl_t *rl) } } -/* +/** * Reduce range locked as RL_WRITER from whole file to specified range. * Asserts the whole file is exclusivly locked and so there's only one * entry in the tree. @@ -587,10 +581,6 @@ zfs_range_reduce(rl_t *rl, uint64_t off, cv_broadcast(&rl->r_rd_cv); } -/* - * AVL comparison function used to order range locks - * Locks are ordered on the start offset of the range. - */ int zfs_range_compare(const void *arg1, const void *arg2) { diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_sa.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_sa.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_sa.c 2012-10-17 17:00:59.891591215 -0600 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_sa.c 2012-11-15 14:28:41.233457180 -0700 @@ -29,7 +29,7 @@ #include #include -/* +/** * ZPL attribute registration table. * Order of attributes doesn't matter * a unique value will be assigned for each @@ -184,10 +184,10 @@ zfs_sa_set_scanstamp(znode_t *zp, xvattr } } -/* +/** * I'm not convinced we should do any of this upgrade. * since the SA code can read both old/new znode formats - * with probably little to know performance difference. + * with probably little to no performance difference. * * All new files will be created with the new format. */ diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c 2012-11-16 11:07:22.201457253 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c 2012-11-15 18:17:57.456457596 -0700 @@ -72,6 +72,9 @@ int zfs_super_owner; SYSCTL_INT(_vfs_zfs, OID_AUTO, super_owner, CTLFLAG_RW, &zfs_super_owner, 0, "File system owner can perform privileged operation on his file systems"); +/** + * \ingroup tunables + */ int zfs_debug_level; TUNABLE_INT("vfs.zfs.debug", &zfs_debug_level); SYSCTL_INT(_vfs_zfs, OID_AUTO, debug, CTLFLAG_RW, &zfs_debug_level, 0, @@ -113,7 +116,7 @@ static struct vfsops zfs_vfsops = { VFS_SET(zfs_vfsops, zfs, VFCF_JAIL | VFCF_DELEGADMIN); -/* +/** * We need to keep a count of active fs's. * This is necessary to prevent our module * from being unloaded after a umount -f @@ -329,7 +332,7 @@ exec_changed_cb(void *arg, uint64_t newv } } -/* +/** * The nbmand mount option can be changed at mount time. * We can't allow it to be toggled on live file systems or incorrect * behavior may be seen from cifs clients @@ -686,7 +689,7 @@ zfs_userspace_many(zfsvfs_t *zfsvfs, zfs return (error); } -/* +/** * buf must be big enough (eg, 32 bytes) */ static int @@ -1244,7 +1247,7 @@ zfs_unregister_callbacks(zfsvfs_t *zfsvf } #ifdef SECLABEL -/* +/** * Convert a decimal digit string to a uint64_t integer. */ static int @@ -1263,7 +1266,7 @@ str_to_uint64(char *str, uint64_t *objnu return (0); } -/* +/** * The boot path passed from the boot loader is in the form of * "rootpool-name/root-filesystem-object-number'. Convert this * string to a dataset name: "rootpool-name/root-filesystem-name". @@ -1299,10 +1302,9 @@ zfs_parse_bootfs(char *bpath, char *outp return (error); } -/* - * zfs_check_global_label: - * Check that the hex label string is appropriate for the dataset - * being mounted into the global_zone proper. +/** + * Check that the hex label string is appropriate for the dataset being + * mounted into the global_zone proper. * * Return an error if the hex label string is not default or * admin_low/admin_high. For admin_low labels, the corresponding @@ -1327,16 +1329,13 @@ zfs_check_global_label(const char *dsnam return (EACCES); } -/* - * zfs_mount_label_policy: - * Determine whether the mount is allowed according to MAC check. - * by comparing (where appropriate) label of the dataset against - * the label of the zone being mounted into. If the dataset has - * no label, create one. +/** + * Determine whether the mount is allowed according to MAC check. + * by comparing (where appropriate) label of the dataset against + * the label of the zone being mounted into. If the dataset has + * no label, create one. * - * Returns: - * 0 : access allowed - * >0 : error code, such as EACCES + * \returns 0 on success, or an errno on failure */ static int zfs_mount_label_policy(vfs_t *vfsp, char *osname) @@ -1774,11 +1773,11 @@ zfs_root(vfs_t *vfsp, int flags, vnode_t return (error); } -/* +/** * Teardown the zfsvfs::z_os. * - * Note, if 'unmounting' if FALSE, we return with the 'z_teardown_lock' - * and 'z_teardown_inactive_lock' held. + * \note If 'unmounting' is FALSE, we return with the 'z_teardown_lock' + * and 'z_teardown_inactive_lock' held. */ static int zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting) @@ -2158,11 +2157,11 @@ zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int return (err); } -/* +/** * Block out VOPs and close zfsvfs_t::z_os * - * Note, if successful, then we return with the 'z_teardown_lock' and - * 'z_teardown_inactive_lock' write held. + * \note If successful, then we return with the 'z_teardown_lock' and + * 'z_teardown_inactive_lock' write held. */ int zfs_suspend_fs(zfsvfs_t *zfsvfs) @@ -2176,7 +2175,7 @@ zfs_suspend_fs(zfsvfs_t *zfsvfs) return (0); } -/* +/** * Reopen zfsvfs_t::z_os and release VOPs. */ int @@ -2415,7 +2414,7 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64 return (0); } -/* +/** * Read a property stored within the master node. */ int diff -Nurp SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c --- SpectraBSD_head/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c 2012-11-16 11:07:22.205458854 -0700 +++ SpectraBSD_doxygen/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c 2012-11-15 18:17:57.483457402 -0700 @@ -76,7 +76,10 @@ #include #include -/* +/** + * \file zfs_vnops.c + * ZFS VNode operations. Entry points to the ZFS module + * * Programming rules. * * Each vnode op performs some logical unit of work. To do this, the ZPL must @@ -87,13 +90,12 @@ * The ordering of events is important to avoid deadlocks and references * to freed memory. The example below illustrates the following Big Rules: * - * (1) A check must be made in each zfs thread for a mounted file system. + * -# A check must be made in each zfs thread for a mounted file system. * This is done avoiding races using ZFS_ENTER(zfsvfs). - * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes - * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros - * can return EIO from the calling function. - * - * (2) VN_RELE() should always be the last thing except for zil_commit() + * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes + * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros + * can return EIO from the calling function. + * -# VN_RELE() should always be the last thing except for zil_commit() * (if necessary) and ZFS_EXIT(). This is for 3 reasons: * First, if it's the last reference, the vnode/znode * can be freed, so the zp may point to freed memory. Second, the last @@ -102,68 +104,65 @@ * cached atime changes. Third, zfs_zinactive() may require a new tx, * which could deadlock the system if you were already holding one. * If you must call VN_RELE() within a tx then use VN_RELE_ASYNC(). - * - * (3) All range locks must be grabbed before calling dmu_tx_assign(), + * -# All range locks must be grabbed before calling dmu_tx_assign(), * as they can span dmu_tx_assign() calls. - * - * (4) Always pass TXG_NOWAIT as the second argument to dmu_tx_assign(). + * -# Always pass TXG_NOWAIT as the second argument to dmu_tx_assign(). * This is critical because we don't want to block while holding locks. * Note, in particular, that if a lock is sometimes acquired before * the tx assigns, and sometimes after (e.g. z_lock), then failing to * use a non-blocking assign can deadlock the system. The scenario: - * - * Thread A has grabbed a lock before calling dmu_tx_assign(). - * Thread B is in an already-assigned tx, and blocks for this lock. - * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open() - * forever, because the previous txg can't quiesce until B's tx commits. - * - * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT, - * then drop all locks, call dmu_tx_wait(), and try again. - * - * (5) If the operation succeeded, generate the intent log entry for it + * - Thread A has grabbed a lock before calling dmu_tx_assign(). + * - Thread B is in an already-assigned tx, and blocks for this lock. + * - Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in + * txg_wait_open() forever, because the previous txg can't + * quiesce until B's tx commits. + * . + * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is + * TXG_NOWAIT, then drop all locks, call dmu_tx_wait(), and try again. + * -# If the operation succeeded, generate the intent log entry for it * before dropping locks. This ensures that the ordering of events * in the intent log matches the order in which they actually occurred. - * During ZIL replay the zfs_log_* functions will update the sequence + * During ZIL replay the zfs_log_* functions will update the sequence * number to indicate the zil transaction has replayed. - * - * (6) At the end of each vnode op, the DMU tx must always commit, + * -# At the end of each vnode op, the DMU tx must always commit, * regardless of whether there were any errors. - * - * (7) After dropping all locks, invoke zil_commit(zilog, foid) + * -# After dropping all locks, invoke zil_commit(zilog, foid) * to ensure that synchronous semantics are provided when necessary. * * In general, this is how things should be ordered in each vnode op: * - * ZFS_ENTER(zfsvfs); // exit if unmounted - * top: - * zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD()) - * rw_enter(...); // grab any other locks you need - * tx = dmu_tx_create(...); // get DMU tx - * dmu_tx_hold_*(); // hold each object you might modify - * error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign - * if (error) { - * rw_exit(...); // drop locks - * zfs_dirent_unlock(dl); // unlock directory entry - * VN_RELE(...); // release held vnodes - * if (error == ERESTART) { - * dmu_tx_wait(tx); - * dmu_tx_abort(tx); - * goto top; - * } - * dmu_tx_abort(tx); // abort DMU tx - * ZFS_EXIT(zfsvfs); // finished in zfs - * return (error); // really out of space - * } - * error = do_real_work(); // do whatever this VOP does - * if (error == 0) - * zfs_log_*(...); // on success, make ZIL entry - * dmu_tx_commit(tx); // commit DMU tx -- error or not - * rw_exit(...); // drop locks - * zfs_dirent_unlock(dl); // unlock directory entry - * VN_RELE(...); // release held vnodes - * zil_commit(zilog, foid); // synchronous when necessary - * ZFS_EXIT(zfsvfs); // finished in zfs - * return (error); // done, report error + \code + ZFS_ENTER(zfsvfs); // exit if unmounted + top: + zfs_dirent_lock(&dl, ...) // lock directory entry (may VN_HOLD()) + rw_enter(...); // grab any other locks you need + tx = dmu_tx_create(...); // get DMU tx + dmu_tx_hold_*(); // hold each object you might modify + error = dmu_tx_assign(tx, TXG_NOWAIT); // try to assign + if (error) { + rw_exit(...); // drop locks + zfs_dirent_unlock(dl); // unlock directory entry + VN_RELE(...); // release held vnodes + if (error == ERESTART) { + dmu_tx_wait(tx); + dmu_tx_abort(tx); + goto top; + } + dmu_tx_abort(tx); // abort DMU tx + ZFS_EXIT(zfsvfs); // finished in zfs + return (error); // really out of space + } + error = do_real_work(); // do whatever this VOP does + if (error == 0) + zfs_log_*(...); // on success, make ZIL entry + dmu_tx_commit(tx); // commit DMU tx -- error or not + rw_exit(...); // drop locks + zfs_dirent_unlock(dl); // unlock directory entry + VN_RELE(...); // release held vnodes + zil_commit(zilog, foid); // synchronous when necessary + ZFS_EXIT(zfsvfs); // finished in zfs + return (error); // done, report error + \endcode */ /* ARGSUSED */ @@ -229,7 +228,7 @@ zfs_close(vnode_t *vp, int flag, int cou return (0); } -/* +/** * Lseek support for finding holes (cmd == _FIO_SEEK_HOLE) and * data (cmd == _FIO_SEEK_DATA). "off" is an in/out parameter. */ @@ -380,7 +379,7 @@ zfs_unmap_page(struct sf_buf *sf) sf_buf_free(sf); } -/* +/** * When a file is memory mapped, we must keep the IO data synchronized * between the DMU cache and the memory mapped pages. What this means: * @@ -427,14 +426,14 @@ update_pages(vnode_t *vp, int64_t start, VM_OBJECT_UNLOCK(obj); } -/* +/** * Read with UIO_NOCOPY flag means that sendfile(2) requests * ZFS to populate a range of page cache pages with data. * - * NOTE: this function could be optimized to pre-allocate - * all pages in advance, drain VPO_BUSY on all of them, - * map them into contiguous KVA region and populate them - * in one single dmu_read() call. + * \note This function could be optimized to pre-allocate + * all pages in advance, drain VPO_BUSY on all of them, + * map them into contiguous KVA region and populate them + * in one single dmu_read() call. */ static int mappedread_sf(vnode_t *vp, int nbytes, uio_t *uio) @@ -492,15 +491,15 @@ mappedread_sf(vnode_t *vp, int nbytes, u return (error); } -/* +/** * When a file is memory mapped, we must keep the IO data synchronized * between the DMU cache and the memory mapped pages. What this means: * * On Read: We "read" preferentially from memory mapped pages, * else we default from the dmu buffer. * - * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when - * the file is memory mapped. + * \note We will always "break up" the IO into PAGESIZE uiomoves when + * the file is memory mapped. */ static int mappedread(vnode_t *vp, int nbytes, uio_t *uio) @@ -551,23 +550,21 @@ mappedread(vnode_t *vp, int nbytes, uio_ offset_t zfs_read_chunk_size = 1024 * 1024; /* Tunable */ -/* +/** * Read bytes from specified file into supplied buffer. * - * IN: vp - vnode of file to be read from. - * uio - structure supplying read location, range info, - * and return buffer. - * ioflag - SYNC flags; used to provide FRSYNC semantics. - * cr - credentials of caller. - * ct - caller context - * - * OUT: uio - updated offset and range, buffer filled. - * - * RETURN: 0 if success - * error code if failure + * \param[in,out] vp vnode of file to be read from. On return, + * atime is updated if byte count > 0 + * \param[in,out] uio structure supplying read location, range info, + * and return buffer. On return, the range and + * offset is updated, and the buffer is filled + * \param[in] ioflag SYNC flags; used to provide FRSYNC semantics + * \param[in] cr credentials of caller + * \param[in] ct caller context * + * \return 0 if success, or an error code on failure * Side Effects: - * vp - atime updated if byte count > 0 + * - vp - atime updated if byte count > 0 */ /* ARGSUSED */ static int @@ -702,25 +699,23 @@ out: return (error); } -/* +/** * Write the bytes to a file. * - * IN: vp - vnode of file to be written to. - * uio - structure supplying write location, range info, - * and data buffer. - * ioflag - FAPPEND flag set if in append mode. - * cr - credentials of caller. - * ct - caller context (NFS/CIFS fem monitor only) - * - * OUT: uio - updated offset and range. + * \param[in,out] vp vnode of file to be written to. On return, + * ctime and/or mtime is updated if byte count > 0 + * \param[in,out] uio structure supplying write location, range + * info, and data buffer. On return, offset and + * range is updated + * \param[in] ioflag FAPPEND, FSYNC, and/or FDSYNC + * \param[in] cr credentials of caller + * \param[in] ct caller context (NFS/CIFS fem monitor only) * - * RETURN: 0 if success - * error code if failure + * \return 0 on success, error code on failure * * Timestamps: - * vp - ctime|mtime updated if byte count > 0 + * - vp - ctime|mtime updated if byte count > 0 */ - /* ARGSUSED */ static int zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct) @@ -1115,7 +1110,7 @@ zfs_get_done(zgd_t *zgd, int error) static int zil_fault_io = 0; #endif -/* +/** * Get data to generate a TX_WRITE intent log record. */ int @@ -1256,7 +1251,7 @@ zfs_access(vnode_t *vp, int mode, int fl return (error); } -/* +/** * If vnode is for a device return a specfs vnode instead. */ static int @@ -1277,24 +1272,17 @@ specvp_check(vnode_t **vpp, cred_t *cr) } -/* +/** * Lookup an entry in a directory, or an extended attribute directory. * If it exists, return a held vnode reference for it. * - * IN: dvp - vnode of directory to search. - * nm - name of entry to lookup. - * pnp - full pathname to lookup [UNUSED]. - * flags - LOOKUP_XATTR set if looking for an attribute. - * rdir - root directory vnode [UNUSED]. - * cr - credentials of caller. - * ct - caller context - * direntflags - directory lookup flags - * realpnp - returned pathname. - * - * OUT: vpp - vnode of located entry, NULL if not found. + * \param[in] dvp vnode of directory to search + * \param[in] nm name of entry to lookup + * \param[out] vpp vnode of located entry, NULL if not found. + * \param[in] cr credentials of caller + * \param[in] flags LOOKUP_XATTR set if looking for an attribute. * - * RETURN: 0 if success - * error code if failure + * \return 0 on success, error code on failure * * Timestamps: * NA @@ -1307,8 +1295,8 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode znode_t *zdp = VTOZ(dvp); zfsvfs_t *zfsvfs = zdp->z_zfsvfs; int error = 0; - int *direntflags = NULL; - void *realpnp = NULL; + int *direntflags = NULL; /* directory lookup flags */ + void *realpnp = NULL; /* returned pathname */ /* fast path */ if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) { @@ -1474,31 +1462,31 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode return (error); } -/* +/** * Attempt to create a new entry in a directory. If the entry * already exists, truncate the file if permissible, else return * an error. Return the vp of the created or trunc'd file. * - * IN: dvp - vnode of directory to put new file entry in. - * name - name of new file entry. - * vap - attributes of new file. - * excl - flag indicating exclusive or non-exclusive mode. - * mode - mode to open file with. - * cr - credentials of caller. - * flag - large file flag [UNUSED]. - * ct - caller context - * vsecp - ACL to be set - * - * OUT: vpp - vnode of created or trunc'd entry. + * \param[in,out] dvp vnode of directory to put new file entry in. + * On return, ctime|mtime are updated if new + * entry created + * \param[in] name name of new file entry. + * \param[in] vap attributes of new file. + * \param[in] excl flag indicating exclusive or non-exclusive + * mode. + * \param[in] mode mode to open file with. + * \param[out] vpp storage for the returned vnode of created or + * trunc'd entry. On return, ctime|mtime are + * updated, and atime is updated if the file is + * new + * \param[in] cr credentials of caller * - * RETURN: 0 if success - * error code if failure + * \return 0 on success, error code on failure. * * Timestamps: - * dvp - ctime|mtime updated if new entry created - * vp - ctime|mtime always, atime if new + * - dvp - ctime|mtime updated if new entry created + * - vp - ctime|mtime always, atime if new */ - /* ARGSUSED */ static int zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode, @@ -1517,8 +1505,8 @@ zfs_create(vnode_t *dvp, char *name, vat zfs_acl_ids_t acl_ids; boolean_t fuid_dirtied; boolean_t have_acl = B_FALSE; - void *vsecp = NULL; - int flag = 0; + void *vsecp = NULL; /* ACL to be set */ + int flag = 0; /* Large file flag */ /* * If we have an ephemeral id, ACL, or XVATTR then @@ -1733,25 +1721,24 @@ out: return (error); } -/* +uint64_t null_xattr = 0; + +/** * Remove an entry from a directory. * - * IN: dvp - vnode of directory to remove entry from. - * name - name of entry to remove. - * cr - credentials of caller. - * ct - caller context - * flags - case flags + * \param[in,out] dvp vnode of directory to remove entry from. On + * return, ctime and mtime are updated + * \param[in] name name of entry to remove. + * \param[in] cr credentials of caller. + * \param[in] ct caller context + * \param[in] flags case flags * - * RETURN: 0 if success - * error code if failure + * \return 0 on success, or error code on failure * * Timestamps: - * dvp - ctime|mtime - * vp - ctime (if nlink > 0) + * - dvp - ctime|mtime + * - vp - ctime (if nlink > 0) */ - -uint64_t null_xattr = 0; - /*ARGSUSED*/ static int zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct, @@ -1964,25 +1951,25 @@ out: return (error); } -/* +/** * Create a new directory and insert it into dvp using the name * provided. Return a pointer to the inserted directory. * - * IN: dvp - vnode of directory to add subdir to. - * dirname - name of new directory. - * vap - attributes of new directory. - * cr - credentials of caller. - * ct - caller context - * vsecp - ACL to be set + * \param[in,out] dvp vnode of directory to add subdir to. On + * return, ctime and mtime are updated + * \param[in] dirname name of new directory. + * \param[in] vap attributes of new directory. + * \param[out] vpp storage for the returned vnode of the created + * directory. Ctime, mtime, and atime are updated + * \param[in] cr credentials of caller. + * \param[in] ct caller context + * \param[in] vsecp ACL to be set * - * OUT: vpp - vnode of created directory. - * - * RETURN: 0 if success - * error code if failure + * \return 0 on success, error code on failure * * Timestamps: - * dvp - ctime|mtime updated - * vp - ctime|mtime|atime updated + * - dvp - ctime|mtime updated + * - vp - ctime|mtime|atime updated */ /*ARGSUSED*/ static int @@ -2146,23 +2133,23 @@ top: return (0); } -/* +/** * Remove a directory subdir entry. If the current working * directory is the same as the subdir to be removed, the * remove will fail. * - * IN: dvp - vnode of directory to remove from. - * name - name of directory to be removed. - * cwd - vnode of current working directory. - * cr - credentials of caller. - * ct - caller context - * flags - case flags + * \param[in,out] dvp vnode of directory to remove from. On return, + * ctime and mtime are updated + * \param[in] name name of directory to be removed. + * \param[in] cwd vnode of current working directory. + * \param[in] cr credentials of caller. + * \param[in] ct caller context + * \param[in] flags case flags * - * RETURN: 0 if success - * error code if failure + * \return 0 on success, error code on failure * * Timestamps: - * dvp - ctime|mtime updated + * - dvp - ctime|mtime updated */ /*ARGSUSED*/ static int @@ -2281,35 +2268,37 @@ out: return (error); } -/* - * Read as many directory entries as will fit into the provided - * buffer from the given directory cursor position (specified in - * the uio structure. - * - * IN: vp - vnode of directory to read. - * uio - structure supplying read location, range info, - * and return buffer. - * cr - credentials of caller. - * ct - caller context - * flags - case flags +/** + * Read multiple directory entries. * - * OUT: uio - updated offset and range, buffer filled. - * eofp - set to true if end-of-file detected. + * Read as many directory entries as will fit into the provided buffer from + * the given directory cursor position (specified in the uio structure. * - * RETURN: 0 if success - * error code if failure + * \param[in,out] vp vnode of directory to read. On return, atime + * is updated + * \param[in,out] uio structure supplying read location, range info, + * and return buffer. On return, offset and + * range are updated and buffer is filled + * \param[in] cr credentials of caller + * \param[out] eofp set to true if end-of-file detected + * \param[out] ncookies number of returned cookies + * \param[out] cookies array of cookies returned by ZAP. Note that + * the low 4 bits of the cookie returned by zap + * is always zero. This allows us to use the low + * range for "special" directory entries: We use + * 0 for '.', and 1 for '..'. If this is the + * root of the filesystem, we use the offset 2 + * for the '.zfs' directory. * - * Timestamps: - * vp - atime updated + * \return 0 on success, error code on failure * - * Note that the low 4 bits of the cookie returned by zap is always zero. - * This allows us to use the low range for "special" directory entries: - * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem, - * we use the offset 2 for the '.zfs' directory. + * Timestamps: + * - vp - atime updated */ /* ARGSUSED */ static int -zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, int *ncookies, u_long **cookies) +zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, int *ncookies, + u_long **cookies) { znode_t *zp = VTOZ(vp); iovec_t *iovp; @@ -2332,7 +2321,7 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cre uint8_t type; int ncooks; u_long *cooks = NULL; - int flags = 0; + int flags = 0; /* case flags */ ZFS_ENTER(zfsvfs); ZFS_VERIFY_ZP(zp); @@ -2628,20 +2617,19 @@ zfs_fsync(vnode_t *vp, int syncflag, cre } -/* +/** * Get the requested file attributes and place them in the provided * vattr structure. * - * IN: vp - vnode of file. - * vap - va_mask identifies requested attributes. - * If AT_XVATTR set, then optional attrs are requested - * flags - ATTR_NOACLCHECK (CIFS server context) - * cr - credentials of caller. - * ct - caller context - * - * OUT: vap - attribute values. + * \param[in] vp vnode of file. + * \param[in,out] vap va_mask identifies requested attributes. If + * AT_XVATTR set, then optional attrs are + * requested. Attribute values are returned here + * \param[in] flags ATTR_NOACLCHECK (CIFS server context) + * \param[in] cr credentials of caller. + * \param[in] ct caller context * - * RETURN: 0 (always succeeds) + * \return 0 (always succeeds) */ /* ARGSUSED */ static int @@ -2850,28 +2838,30 @@ zfs_getattr(vnode_t *vp, vattr_t *vap, i return (0); } -/* +/** * Set the file attributes to the values contained in the * vattr structure. * - * IN: vp - vnode of file to be modified. - * vap - new attribute values. - * If AT_XVATTR set, then optional attrs are being set - * flags - ATTR_UTIME set if non-default time values provided. - * - ATTR_NOACLCHECK (CIFS context only). - * cr - credentials of caller. - * ct - caller context + * \param[in,out] vp vnode of file to be modified. On return, + * ctime is updated. mtime is updated if the size + * changed + * \param[in] vap new attribute values. If AT_XVATTR set, then + * optional attrs are being set + * \param[in] flags flags - ATTR_UTIME set if non-default time + * values provided - ATTR_NOACLCHECK (CIFS context + * only) + * \param[in] cr credentials of caller + * \param[in] ct caller context * - * RETURN: 0 if success - * error code if failure + * \return 0 on success, error code on failure * * Timestamps: - * vp - ctime updated, mtime updated if size changed. + * - vp - ctime updated, mtime updated if size changed. */ /* ARGSUSED */ static int zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr, - caller_context_t *ct) + caller_context_t *ct) { znode_t *zp = VTOZ(vp); zfsvfs_t *zfsvfs = zp->z_zfsvfs; @@ -3495,12 +3485,12 @@ out2: } typedef struct zfs_zlock { - krwlock_t *zl_rwlock; /* lock we acquired */ - znode_t *zl_znode; /* znode we held */ - struct zfs_zlock *zl_next; /* next in list */ + krwlock_t *zl_rwlock; /**< lock we acquired */ + znode_t *zl_znode; /**< znode we held */ + struct zfs_zlock *zl_next; /**< next in list */ } zfs_zlock_t; -/* +/** * Drop locks and release vnodes that were held by zfs_rename_lock(). */ static void @@ -3517,7 +3507,7 @@ zfs_rename_unlock(zfs_zlock_t **zlpp) } } -/* +/** * Search back through the directory tree, using the ".." entries. * Lock each directory in the chain to prevent concurrent renames. * Fail any attempt to move a directory into one of its own descendants. @@ -3591,23 +3581,24 @@ zfs_rename_lock(znode_t *szp, znode_t *t return (0); } -/* +/** * Move an entry from the provided source directory to the target * directory. Change the entry name as indicated. * - * IN: sdvp - Source directory containing the "old entry". - * snm - Old entry name. - * tdvp - Target directory to contain the "new entry". - * tnm - New entry name. - * cr - credentials of caller. - * ct - caller context - * flags - case flags + * \param[in,out] sdvp Source directory containing the "old entry". + * ctime|mtime are updated on return + * \param[in] snm Old entry name. + * \param[in,out] tdvp Target directory to contain the "new entry". + * ctime|mtime are updated on return + * \param[in] tnm New entry name. + * \param[in] cr credentials of caller. + * \param[in] ct caller context + * \param[in] flags case flags * - * RETURN: 0 if success - * error code if failure + * \return 0 on success, error code on failure * * Timestamps: - * sdvp,tdvp - ctime|mtime updated + * - sdvp,tdvp - ctime|mtime updated */ /*ARGSUSED*/ static int @@ -3960,22 +3951,20 @@ out: return (error); } -/* +/** * Insert the indicated symbolic reference entry into the directory. * - * IN: dvp - Directory to contain new symbolic link. - * link - Name for new symlink entry. - * vap - Attributes of new entry. - * target - Target path of new symlink. - * cr - credentials of caller. - * ct - caller context - * flags - case flags + * \param[in,out] dvp Directory to contain new symbolic link. On + * return, ctime and mtime are updated + * \param[in] name target path of new symlink + * \param[in] vap Attributes of new entry + * \param[in] link Name for new symlink entry + * \param[in] cr credentials of caller * - * RETURN: 0 if success - * error code if failure + * \return 0 on success, error code on failure * * Timestamps: - * dvp - ctime|mtime updated + * - dvp - ctime|mtime updated */ /*ARGSUSED*/ static int @@ -3993,7 +3982,7 @@ zfs_symlink(vnode_t *dvp, vnode_t **vpp, zfs_acl_ids_t acl_ids; boolean_t fuid_dirtied; uint64_t txtype = TX_SYMLINK; - int flags = 0; + int flags = 0; /* Case flags */ ASSERT(vap->va_type == VLNK); @@ -4113,22 +4102,20 @@ top: return (error); } -/* +/** * Return, in the buffer contained in the provided uio structure, * the symbolic path referred to by vp. * - * IN: vp - vnode of symbolic link. - * uoip - structure to contain the link path. - * cr - credentials of caller. - * ct - caller context - * - * OUT: uio - structure to contain the link path. + * \param[in,out] vp vnode of symbolic link. On return, atime is + * updated + * \param[in,out] uio structure to contain the link path. + * \param[in] cr credentials of caller. + * \param[in] ct caller context * - * RETURN: 0 if success - * error code if failure + * \return 0 on success, error code on failure * * Timestamps: - * vp - atime updated + * - vp - atime updated */ /* ARGSUSED */ static int @@ -4155,21 +4142,21 @@ zfs_readlink(vnode_t *vp, uio_t *uio, cr return (error); } -/* +/** * Insert a new entry into directory tdvp referencing svp. * - * IN: tdvp - Directory to contain new entry. - * svp - vnode of new entry. - * name - name of new entry. - * cr - credentials of caller. - * ct - caller context + * \param[in,out] tdvp Directory to contain new entry. On return, + * ctime and mtime are updated + * \param[in,out] svp vnode of new entry. On return, ctime is updated + * \param[in] name name of new entry. + * \param[in] cr credentials of caller + * \param[in] ct caller context * - * RETURN: 0 if success - * error code if failure + * \return 0 on success, error code on failure * * Timestamps: - * tdvp - ctime|mtime updated - * svp - ctime updated + * - tdvp - ctime|mtime updated + * - svp - ctime updated */ /* ARGSUSED */ static int @@ -4310,7 +4297,7 @@ top: } #ifdef sun -/* +/** * zfs_null_putapage() is used when the file system has been force * unmounted. It just drops the pages. */ @@ -4323,28 +4310,26 @@ zfs_null_putapage(vnode_t *vp, page_t *p return (0); } -/* +/** * Push a page out to disk, klustering if possible. * - * IN: vp - file to push page to. - * pp - page to push. - * flags - additional flags. - * cr - credentials of caller. - * - * OUT: offp - start of range pushed. - * lenp - len of range pushed. - * - * RETURN: 0 if success - * error code if failure - * - * NOTE: callers must have locked the page to be pushed. On - * exit, the page (and all other pages in the kluster) must be - * unlocked. + * \param[in] vp file to push page to. + * \param[in] pp page to push. + * \param[out] offp start of range pushed. + * \param[out] lenp len of range pushed. + * \param[in] flags additional flags. + * \param[in] cr credentials of caller. + * + * \return 0 on success, error code on failure + * + * \note Callers must have locked the page to be pushed. On + * exit, the page (and all other pages in the kluster) must be + * unlocked. */ /* ARGSUSED */ static int zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp, - size_t *lenp, int flags, cred_t *cr) + size_t *lenp, int flags, cred_t *cr) { znode_t *zp = VTOZ(vp); zfsvfs_t *zfsvfs = zp->z_zfsvfs; @@ -4446,22 +4431,22 @@ out: return (err); } -/* +/** * Copy the portion of the file indicated from pages into the file. * The pages are stored in a page list attached to the files vnode. * - * IN: vp - vnode of file to push page data to. - * off - position in file to put data. - * len - amount of data to write. - * flags - flags to control the operation. - * cr - credentials of caller. - * ct - caller context. + * \param[in,out] vp vnode of file to push page data to. On + * return, ctime and mtime are updated + * \param[in] off position in file to put data. + * \param[in] len amount of data to write. + * \param[in] flags flags to control the operation. + * \param[in] cr credentials of caller. + * \param[in] ct caller context * - * RETURN: 0 if success - * error code if failure + * \return 0 on success, error code on failure * * Timestamps: - * vp - ctime|mtime updated + * - vp - ctime|mtime updated */ /*ARGSUSED*/ static int @@ -4593,16 +4578,16 @@ zfs_inactive(vnode_t *vp, cred_t *cr, ca } #ifdef sun -/* +/** * Bounds-check the seek operation. * - * IN: vp - vnode seeking within - * ooff - old file offset - * noffp - pointer to new file offset - * ct - caller context + * \param[in] vp vnode seeking within + * \param[in] ooff old file offset + * \param[in] noffp pointer to new file offset + * \param[in] ct caller context * - * RETURN: 0 if success - * EINVAL if new offset invalid + * \retval 0 success + * \retval EINVAL new offset is invalid */ /* ARGSUSED */ static int @@ -4614,7 +4599,7 @@ zfs_seek(vnode_t *vp, offset_t ooff, off return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0); } -/* +/** * Pre-filter the generic locking function to trap attempts to place * a mandatory lock on a memory mapped file. */ @@ -4642,7 +4627,7 @@ zfs_frlock(vnode_t *vp, int cmd, flock64 return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct)); } -/* +/** * If we can't find a page in the cache, we will create a new page * and fill it with file data. For efficiency, we may try to fill * multiple pages at once (klustering) to fill up the supplied page @@ -4717,7 +4702,7 @@ zfs_fillpage(vnode_t *vp, u_offset_t off return (0); } -/* +/** * Return pointers to the pages for the file region [off, off + len] * in the pl array. If plsz is greater than len, this function may * also return page pointers from after the specified region @@ -4725,30 +4710,29 @@ zfs_fillpage(vnode_t *vp, u_offset_t off * only returned if they are already in the cache, or were created as * part of a klustered read. * - * IN: vp - vnode of file to get data from. - * off - position in file to get data from. - * len - amount of data to retrieve. - * plsz - length of provided page list. - * seg - segment to obtain pages for. - * addr - virtual address of fault. - * rw - mode of created pages. - * cr - credentials of caller. - * ct - caller context. - * - * OUT: protp - protection mode of created pages. - * pl - list of pages created. + * \param[in,out] vp vnode of file to get data from. On return, + * atime is updated + * \param[in] off position in file to get data from. + * \param[in] len amount of data to retrieve. + * \param[out] protp protection mode of created pages. + * \param[out] pl list of pages created. + * \param[in] plsz length of provided page list. + * \param[in] seg segment to obtain pages for. + * \param[in] addr virtual address of fault. + * \param[in] rw mode of created pages. + * \param[in] cr credentials of caller. + * \param[in] ct caller context. * - * RETURN: 0 if success - * error code if failure + * \return 0 on success, error code on failure * * Timestamps: - * vp - atime updated + * - vp - atime updated */ /* ARGSUSED */ static int zfs_getpage(vnode_t *vp, offset_t off, size_t len, uint_t *protp, - page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, - enum seg_rw rw, cred_t *cr, caller_context_t *ct) + page_t *pl, size_t plsz, struct seg *seg, caddr_t addr, + enum seg_rw rw, cred_t *cr, caller_context_t *ct) { znode_t *zp = VTOZ(vp); zfsvfs_t *zfsvfs = zp->z_zfsvfs; @@ -4819,19 +4803,14 @@ out: return (err); } -/* +/** * Request a memory map for a section of a file. This code interacts * with common code and the VM system as follows: - * - * common code calls mmap(), which ends up in smmap_common() - * - * this calls VOP_MAP(), which takes you into (say) zfs - * - * zfs_map() calls as_map(), passing segvn_create() as the callback - * - * segvn_create() creates the new segment and calls VOP_ADDMAP() - * - * zfs_addmap() updates z_mapcnt + * - common code calls mmap(), which ends up in smmap_common() + * - this calls VOP_MAP(), which takes you into (say) zfs + * - zfs_map() calls as_map(), passing segvn_create() as the callback + * - segvn_create() creates the new segment and calls VOP_ADDMAP() + * - zfs_addmap() updates z_mapcnt */ /*ARGSUSED*/ static int @@ -4920,21 +4899,20 @@ zfs_addmap(vnode_t *vp, offset_t off, st return (0); } -/* +/** * The reason we push dirty pages as part of zfs_delmap() is so that we get a * more accurate mtime for the associated file. Since we don't have a way of * detecting when the data was actually modified, we have to resort to * heuristics. If an explicit msync() is done, then we mark the mtime when the * last page is pushed. The problem occurs when the msync() call is omitted, * which by far the most common case: - * - * open() - * mmap() - * - * munmap() - * close() - *