Print this page
    
zpool import speedup
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/fs/zfs/sys/spa.h
          +++ new/usr/src/uts/common/fs/zfs/sys/spa.h
   1    1  /*
   2    2   * CDDL HEADER START
   3    3   *
   4    4   * The contents of this file are subject to the terms of the
   5    5   * Common Development and Distribution License (the "License").
   6    6   * You may not use this file except in compliance with the License.
   7    7   *
   8    8   * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9    9   * or http://www.opensolaris.org/os/licensing.
  10   10   * See the License for the specific language governing permissions
  11   11   * and limitations under the License.
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  /*
  22   22   * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23   23   * Copyright (c) 2013 by Delphix. All rights reserved.
  24   24   * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  25   25   */
  26   26  
  27   27  #ifndef _SYS_SPA_H
  28   28  #define _SYS_SPA_H
  29   29  
  30   30  #include <sys/avl.h>
  31   31  #include <sys/zfs_context.h>
  32   32  #include <sys/nvpair.h>
  33   33  #include <sys/sysmacros.h>
  34   34  #include <sys/types.h>
  35   35  #include <sys/fs/zfs.h>
  36   36  
  37   37  #ifdef  __cplusplus
  38   38  extern "C" {
  39   39  #endif
  40   40  
  41   41  /*
  42   42   * Forward references that lots of things need.
  43   43   */
  44   44  typedef struct spa spa_t;
  45   45  typedef struct vdev vdev_t;
  46   46  typedef struct metaslab metaslab_t;
  47   47  typedef struct metaslab_group metaslab_group_t;
  48   48  typedef struct metaslab_class metaslab_class_t;
  49   49  typedef struct zio zio_t;
  50   50  typedef struct zilog zilog_t;
  51   51  typedef struct spa_aux_vdev spa_aux_vdev_t;
  52   52  typedef struct ddt ddt_t;
  53   53  typedef struct ddt_entry ddt_entry_t;
  54   54  struct dsl_pool;
  55   55  struct dsl_dataset;
  56   56  
  57   57  /*
  58   58   * General-purpose 32-bit and 64-bit bitfield encodings.
  59   59   */
  60   60  #define BF32_DECODE(x, low, len)        P2PHASE((x) >> (low), 1U << (len))
  61   61  #define BF64_DECODE(x, low, len)        P2PHASE((x) >> (low), 1ULL << (len))
  62   62  #define BF32_ENCODE(x, low, len)        (P2PHASE((x), 1U << (len)) << (low))
  63   63  #define BF64_ENCODE(x, low, len)        (P2PHASE((x), 1ULL << (len)) << (low))
  64   64  
  65   65  #define BF32_GET(x, low, len)           BF32_DECODE(x, low, len)
  66   66  #define BF64_GET(x, low, len)           BF64_DECODE(x, low, len)
  67   67  
  68   68  #define BF32_SET(x, low, len, val) do { \
  69   69          ASSERT3U(val, <, 1U << (len)); \
  70   70          ASSERT3U(low + len, <=, 32); \
  71   71          (x) ^= BF32_ENCODE((x >> low) ^ (val), low, len); \
  72   72  _NOTE(CONSTCOND) } while (0)
  73   73  
  74   74  #define BF64_SET(x, low, len, val) do { \
  75   75          ASSERT3U(val, <, 1ULL << (len)); \
  76   76          ASSERT3U(low + len, <=, 64); \
  77   77          ((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len)); \
  78   78  _NOTE(CONSTCOND) } while (0)
  79   79  
  80   80  #define BF32_GET_SB(x, low, len, shift, bias)   \
  81   81          ((BF32_GET(x, low, len) + (bias)) << (shift))
  82   82  #define BF64_GET_SB(x, low, len, shift, bias)   \
  83   83          ((BF64_GET(x, low, len) + (bias)) << (shift))
  84   84  
  85   85  #define BF32_SET_SB(x, low, len, shift, bias, val) do { \
  86   86          ASSERT(IS_P2ALIGNED(val, 1U << shift)); \
  87   87          ASSERT3S((val) >> (shift), >=, bias); \
  88   88          BF32_SET(x, low, len, ((val) >> (shift)) - (bias)); \
  89   89  _NOTE(CONSTCOND) } while (0)
  90   90  #define BF64_SET_SB(x, low, len, shift, bias, val) do { \
  91   91          ASSERT(IS_P2ALIGNED(val, 1ULL << shift)); \
  92   92          ASSERT3S((val) >> (shift), >=, bias); \
  93   93          BF64_SET(x, low, len, ((val) >> (shift)) - (bias)); \
  94   94  _NOTE(CONSTCOND) } while (0)
  95   95  
  96   96  /*
  97   97   * We currently support nine block sizes, from 512 bytes to 128K.
  98   98   * We could go higher, but the benefits are near-zero and the cost
  99   99   * of COWing a giant block to modify one byte would become excessive.
 100  100   */
 101  101  #define SPA_MINBLOCKSHIFT       9
 102  102  #define SPA_MAXBLOCKSHIFT       17
 103  103  #define SPA_MINBLOCKSIZE        (1ULL << SPA_MINBLOCKSHIFT)
 104  104  #define SPA_MAXBLOCKSIZE        (1ULL << SPA_MAXBLOCKSHIFT)
 105  105  
 106  106  #define SPA_BLOCKSIZES          (SPA_MAXBLOCKSHIFT - SPA_MINBLOCKSHIFT + 1)
 107  107  
 108  108  /*
 109  109   * Size of block to hold the configuration data (a packed nvlist)
 110  110   */
 111  111  #define SPA_CONFIG_BLOCKSIZE    (1ULL << 14)
 112  112  
 113  113  /*
 114  114   * The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB.
 115  115   * The ASIZE encoding should be at least 64 times larger (6 more bits)
 116  116   * to support up to 4-way RAID-Z mirror mode with worst-case gang block
 117  117   * overhead, three DVAs per bp, plus one more bit in case we do anything
 118  118   * else that expands the ASIZE.
 119  119   */
 120  120  #define SPA_LSIZEBITS           16      /* LSIZE up to 32M (2^16 * 512) */
 121  121  #define SPA_PSIZEBITS           16      /* PSIZE up to 32M (2^16 * 512) */
 122  122  #define SPA_ASIZEBITS           24      /* ASIZE up to 64 times larger  */
 123  123  
 124  124  /*
 125  125   * All SPA data is represented by 128-bit data virtual addresses (DVAs).
 126  126   * The members of the dva_t should be considered opaque outside the SPA.
 127  127   */
 128  128  typedef struct dva {
 129  129          uint64_t        dva_word[2];
 130  130  } dva_t;
 131  131  
 132  132  /*
 133  133   * Each block has a 256-bit checksum -- strong enough for cryptographic hashes.
 134  134   */
 135  135  typedef struct zio_cksum {
 136  136          uint64_t        zc_word[4];
 137  137  } zio_cksum_t;
 138  138  
 139  139  /*
 140  140   * Each block is described by its DVAs, time of birth, checksum, etc.
 141  141   * The word-by-word, bit-by-bit layout of the blkptr is as follows:
 142  142   *
 143  143   *      64      56      48      40      32      24      16      8       0
 144  144   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 145  145   * 0    |               vdev1           | GRID  |         ASIZE         |
 146  146   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 147  147   * 1    |G|                      offset1                                |
 148  148   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 149  149   * 2    |               vdev2           | GRID  |         ASIZE         |
 150  150   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 151  151   * 3    |G|                      offset2                                |
 152  152   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 153  153   * 4    |               vdev3           | GRID  |         ASIZE         |
 154  154   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 155  155   * 5    |G|                      offset3                                |
 156  156   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 157  157   * 6    |BDX|lvl| type  | cksum |E| comp|    PSIZE      |     LSIZE     |
 158  158   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 159  159   * 7    |                       padding                                 |
 160  160   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 161  161   * 8    |                       padding                                 |
 162  162   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 163  163   * 9    |                       physical birth txg                      |
 164  164   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 165  165   * a    |                       logical birth txg                       |
 166  166   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 167  167   * b    |                       fill count                              |
 168  168   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 169  169   * c    |                       checksum[0]                             |
 170  170   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 171  171   * d    |                       checksum[1]                             |
 172  172   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 173  173   * e    |                       checksum[2]                             |
 174  174   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 175  175   * f    |                       checksum[3]                             |
 176  176   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 177  177   *
 178  178   * Legend:
 179  179   *
 180  180   * vdev         virtual device ID
 181  181   * offset       offset into virtual device
 182  182   * LSIZE        logical size
 183  183   * PSIZE        physical size (after compression)
 184  184   * ASIZE        allocated size (including RAID-Z parity and gang block headers)
 185  185   * GRID         RAID-Z layout information (reserved for future use)
 186  186   * cksum        checksum function
 187  187   * comp         compression function
 188  188   * G            gang block indicator
 189  189   * B            byteorder (endianness)
 190  190   * D            dedup
 191  191   * X            encryption (on version 30, which is not supported)
 192  192   * E            blkptr_t contains embedded data (see below)
 193  193   * lvl          level of indirection
 194  194   * type         DMU object type
 195  195   * phys birth   txg of block allocation; zero if same as logical birth txg
 196  196   * log. birth   transaction group in which the block was logically born
 197  197   * fill count   number of non-zero blocks under this bp
 198  198   * checksum[4]  256-bit checksum of the data this bp describes
 199  199   */
 200  200  
 201  201  /*
 202  202   * "Embedded" blkptr_t's don't actually point to a block, instead they
 203  203   * have a data payload embedded in the blkptr_t itself.  See the comment
 204  204   * in blkptr.c for more details.
 205  205   *
 206  206   * The blkptr_t is laid out as follows:
 207  207   *
 208  208   *      64      56      48      40      32      24      16      8       0
 209  209   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 210  210   * 0    |      payload                                                  |
 211  211   * 1    |      payload                                                  |
 212  212   * 2    |      payload                                                  |
 213  213   * 3    |      payload                                                  |
 214  214   * 4    |      payload                                                  |
 215  215   * 5    |      payload                                                  |
 216  216   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 217  217   * 6    |BDX|lvl| type  | etype |E| comp| PSIZE|              LSIZE     |
 218  218   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 219  219   * 7    |      payload                                                  |
 220  220   * 8    |      payload                                                  |
 221  221   * 9    |      payload                                                  |
 222  222   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 223  223   * a    |                       logical birth txg                       |
 224  224   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 225  225   * b    |      payload                                                  |
 226  226   * c    |      payload                                                  |
 227  227   * d    |      payload                                                  |
 228  228   * e    |      payload                                                  |
 229  229   * f    |      payload                                                  |
 230  230   *      +-------+-------+-------+-------+-------+-------+-------+-------+
 231  231   *
 232  232   * Legend:
 233  233   *
 234  234   * payload              contains the embedded data
 235  235   * B (byteorder)        byteorder (endianness)
 236  236   * D (dedup)            padding (set to zero)
 237  237   * X                    encryption (set to zero; see above)
 238  238   * E (embedded)         set to one
 239  239   * lvl                  indirection level
 240  240   * type                 DMU object type
 241  241   * etype                how to interpret embedded data (BP_EMBEDDED_TYPE_*)
 242  242   * comp                 compression function of payload
 243  243   * PSIZE                size of payload after compression, in bytes
 244  244   * LSIZE                logical size of payload, in bytes
 245  245   *                      note that 25 bits is enough to store the largest
 246  246   *                      "normal" BP's LSIZE (2^16 * 2^9) in bytes
 247  247   * log. birth           transaction group in which the block was logically born
 248  248   *
 249  249   * Note that LSIZE and PSIZE are stored in bytes, whereas for non-embedded
 250  250   * bp's they are stored in units of SPA_MINBLOCKSHIFT.
 251  251   * Generally, the generic BP_GET_*() macros can be used on embedded BP's.
 252  252   * The B, D, X, lvl, type, and comp fields are stored the same as with normal
 253  253   * BP's so the BP_SET_* macros can be used with them.  etype, PSIZE, LSIZE must
 254  254   * be set with the BPE_SET_* macros.  BP_SET_EMBEDDED() should be called before
 255  255   * other macros, as they assert that they are only used on BP's of the correct
 256  256   * "embedded-ness".
 257  257   */
 258  258  
 259  259  #define BPE_GET_ETYPE(bp)       \
 260  260          (ASSERT(BP_IS_EMBEDDED(bp)), \
 261  261          BF64_GET((bp)->blk_prop, 40, 8))
 262  262  #define BPE_SET_ETYPE(bp, t)    do { \
 263  263          ASSERT(BP_IS_EMBEDDED(bp)); \
 264  264          BF64_SET((bp)->blk_prop, 40, 8, t); \
 265  265  _NOTE(CONSTCOND) } while (0)
 266  266  
 267  267  #define BPE_GET_LSIZE(bp)       \
 268  268          (ASSERT(BP_IS_EMBEDDED(bp)), \
 269  269          BF64_GET_SB((bp)->blk_prop, 0, 25, 0, 1))
 270  270  #define BPE_SET_LSIZE(bp, x)    do { \
 271  271          ASSERT(BP_IS_EMBEDDED(bp)); \
 272  272          BF64_SET_SB((bp)->blk_prop, 0, 25, 0, 1, x); \
 273  273  _NOTE(CONSTCOND) } while (0)
 274  274  
 275  275  #define BPE_GET_PSIZE(bp)       \
 276  276          (ASSERT(BP_IS_EMBEDDED(bp)), \
 277  277          BF64_GET_SB((bp)->blk_prop, 25, 7, 0, 1))
 278  278  #define BPE_SET_PSIZE(bp, x)    do { \
 279  279          ASSERT(BP_IS_EMBEDDED(bp)); \
 280  280          BF64_SET_SB((bp)->blk_prop, 25, 7, 0, 1, x); \
 281  281  _NOTE(CONSTCOND) } while (0)
 282  282  
 283  283  typedef enum bp_embedded_type {
 284  284          BP_EMBEDDED_TYPE_DATA,
 285  285          BP_EMBEDDED_TYPE_RESERVED, /* Reserved for an unintegrated feature. */
 286  286          NUM_BP_EMBEDDED_TYPES = BP_EMBEDDED_TYPE_RESERVED
 287  287  } bp_embedded_type_t;
 288  288  
 289  289  #define BPE_NUM_WORDS 14
 290  290  #define BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
 291  291  #define BPE_IS_PAYLOADWORD(bp, wp) \
 292  292          ((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
 293  293  
 294  294  #define SPA_BLKPTRSHIFT 7               /* blkptr_t is 128 bytes        */
 295  295  #define SPA_DVAS_PER_BP 3               /* Number of DVAs in a bp       */
 296  296  
 297  297  /*
 298  298   * A block is a hole when it has either 1) never been written to, or
 299  299   * 2) is zero-filled. In both cases, ZFS can return all zeroes for all reads
 300  300   * without physically allocating disk space. Holes are represented in the
 301  301   * blkptr_t structure by zeroed blk_dva. Correct checking for holes is
 302  302   * done through the BP_IS_HOLE macro. For holes, the logical size, level,
 303  303   * DMU object type, and birth times are all also stored for holes that
 304  304   * were written to at some point (i.e. were punched after having been filled).
 305  305   */
 306  306  typedef struct blkptr {
 307  307          dva_t           blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */
 308  308          uint64_t        blk_prop;       /* size, compression, type, etc     */
 309  309          uint64_t        blk_pad[2];     /* Extra space for the future       */
 310  310          uint64_t        blk_phys_birth; /* txg when block was allocated     */
 311  311          uint64_t        blk_birth;      /* transaction group at birth       */
 312  312          uint64_t        blk_fill;       /* fill count                       */
 313  313          zio_cksum_t     blk_cksum;      /* 256-bit checksum                 */
 314  314  } blkptr_t;
 315  315  
 316  316  /*
 317  317   * Macros to get and set fields in a bp or DVA.
 318  318   */
 319  319  #define DVA_GET_ASIZE(dva)      \
 320  320          BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0)
 321  321  #define DVA_SET_ASIZE(dva, x)   \
 322  322          BF64_SET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, \
 323  323          SPA_MINBLOCKSHIFT, 0, x)
 324  324  
 325  325  #define DVA_GET_GRID(dva)       BF64_GET((dva)->dva_word[0], 24, 8)
 326  326  #define DVA_SET_GRID(dva, x)    BF64_SET((dva)->dva_word[0], 24, 8, x)
 327  327  
 328  328  #define DVA_GET_VDEV(dva)       BF64_GET((dva)->dva_word[0], 32, 32)
 329  329  #define DVA_SET_VDEV(dva, x)    BF64_SET((dva)->dva_word[0], 32, 32, x)
 330  330  
 331  331  #define DVA_GET_OFFSET(dva)     \
 332  332          BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0)
 333  333  #define DVA_SET_OFFSET(dva, x)  \
 334  334          BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x)
 335  335  
 336  336  #define DVA_GET_GANG(dva)       BF64_GET((dva)->dva_word[1], 63, 1)
 337  337  #define DVA_SET_GANG(dva, x)    BF64_SET((dva)->dva_word[1], 63, 1, x)
 338  338  
 339  339  #define BP_GET_LSIZE(bp)        \
 340  340          (BP_IS_EMBEDDED(bp) ?   \
 341  341          (BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA ? BPE_GET_LSIZE(bp) : 0): \
 342  342          BF64_GET_SB((bp)->blk_prop, 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1))
 343  343  #define BP_SET_LSIZE(bp, x)     do { \
 344  344          ASSERT(!BP_IS_EMBEDDED(bp)); \
 345  345          BF64_SET_SB((bp)->blk_prop, \
 346  346              0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
 347  347  _NOTE(CONSTCOND) } while (0)
 348  348  
 349  349  #define BP_GET_PSIZE(bp)        \
 350  350          (BP_IS_EMBEDDED(bp) ? 0 : \
 351  351          BF64_GET_SB((bp)->blk_prop, 16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1))
 352  352  #define BP_SET_PSIZE(bp, x)     do { \
 353  353          ASSERT(!BP_IS_EMBEDDED(bp)); \
 354  354          BF64_SET_SB((bp)->blk_prop, \
 355  355              16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
 356  356  _NOTE(CONSTCOND) } while (0)
 357  357  
 358  358  #define BP_GET_COMPRESS(bp)             BF64_GET((bp)->blk_prop, 32, 7)
 359  359  #define BP_SET_COMPRESS(bp, x)          BF64_SET((bp)->blk_prop, 32, 7, x)
 360  360  
 361  361  #define BP_IS_EMBEDDED(bp)              BF64_GET((bp)->blk_prop, 39, 1)
 362  362  #define BP_SET_EMBEDDED(bp, x)          BF64_SET((bp)->blk_prop, 39, 1, x)
 363  363  
 364  364  #define BP_GET_CHECKSUM(bp)             \
 365  365          (BP_IS_EMBEDDED(bp) ? ZIO_CHECKSUM_OFF : \
 366  366          BF64_GET((bp)->blk_prop, 40, 8))
 367  367  #define BP_SET_CHECKSUM(bp, x)          do { \
 368  368          ASSERT(!BP_IS_EMBEDDED(bp)); \
 369  369          BF64_SET((bp)->blk_prop, 40, 8, x); \
 370  370  _NOTE(CONSTCOND) } while (0)
 371  371  
 372  372  #define BP_GET_TYPE(bp)                 BF64_GET((bp)->blk_prop, 48, 8)
 373  373  #define BP_SET_TYPE(bp, x)              BF64_SET((bp)->blk_prop, 48, 8, x)
 374  374  
 375  375  #define BP_GET_LEVEL(bp)                BF64_GET((bp)->blk_prop, 56, 5)
 376  376  #define BP_SET_LEVEL(bp, x)             BF64_SET((bp)->blk_prop, 56, 5, x)
 377  377  
 378  378  #define BP_GET_DEDUP(bp)                BF64_GET((bp)->blk_prop, 62, 1)
 379  379  #define BP_SET_DEDUP(bp, x)             BF64_SET((bp)->blk_prop, 62, 1, x)
 380  380  
 381  381  #define BP_GET_BYTEORDER(bp)            BF64_GET((bp)->blk_prop, 63, 1)
 382  382  #define BP_SET_BYTEORDER(bp, x)         BF64_SET((bp)->blk_prop, 63, 1, x)
 383  383  
 384  384  #define BP_PHYSICAL_BIRTH(bp)           \
 385  385          (BP_IS_EMBEDDED(bp) ? 0 : \
 386  386          (bp)->blk_phys_birth ? (bp)->blk_phys_birth : (bp)->blk_birth)
 387  387  
 388  388  #define BP_SET_BIRTH(bp, logical, physical)     \
 389  389  {                                               \
 390  390          ASSERT(!BP_IS_EMBEDDED(bp));            \
 391  391          (bp)->blk_birth = (logical);            \
 392  392          (bp)->blk_phys_birth = ((logical) == (physical) ? 0 : (physical)); \
 393  393  }
 394  394  
 395  395  #define BP_GET_FILL(bp) (BP_IS_EMBEDDED(bp) ? 1 : (bp)->blk_fill)
 396  396  
 397  397  #define BP_GET_ASIZE(bp)        \
 398  398          (BP_IS_EMBEDDED(bp) ? 0 : \
 399  399          DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
 400  400          DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
 401  401          DVA_GET_ASIZE(&(bp)->blk_dva[2]))
 402  402  
 403  403  #define BP_GET_UCSIZE(bp) \
 404  404          ((BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp))) ? \
 405  405          BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp))
 406  406  
 407  407  #define BP_GET_NDVAS(bp)        \
 408  408          (BP_IS_EMBEDDED(bp) ? 0 : \
 409  409          !!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
 410  410          !!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
 411  411          !!DVA_GET_ASIZE(&(bp)->blk_dva[2]))
 412  412  
 413  413  #define BP_COUNT_GANG(bp)       \
 414  414          (BP_IS_EMBEDDED(bp) ? 0 : \
 415  415          (DVA_GET_GANG(&(bp)->blk_dva[0]) + \
 416  416          DVA_GET_GANG(&(bp)->blk_dva[1]) + \
 417  417          DVA_GET_GANG(&(bp)->blk_dva[2])))
 418  418  
 419  419  #define DVA_EQUAL(dva1, dva2)   \
 420  420          ((dva1)->dva_word[1] == (dva2)->dva_word[1] && \
 421  421          (dva1)->dva_word[0] == (dva2)->dva_word[0])
 422  422  
 423  423  #define BP_EQUAL(bp1, bp2)      \
 424  424          (BP_PHYSICAL_BIRTH(bp1) == BP_PHYSICAL_BIRTH(bp2) &&    \
 425  425          (bp1)->blk_birth == (bp2)->blk_birth &&                 \
 426  426          DVA_EQUAL(&(bp1)->blk_dva[0], &(bp2)->blk_dva[0]) &&    \
 427  427          DVA_EQUAL(&(bp1)->blk_dva[1], &(bp2)->blk_dva[1]) &&    \
 428  428          DVA_EQUAL(&(bp1)->blk_dva[2], &(bp2)->blk_dva[2]))
 429  429  
 430  430  #define ZIO_CHECKSUM_EQUAL(zc1, zc2) \
 431  431          (0 == (((zc1).zc_word[0] - (zc2).zc_word[0]) | \
 432  432          ((zc1).zc_word[1] - (zc2).zc_word[1]) | \
 433  433          ((zc1).zc_word[2] - (zc2).zc_word[2]) | \
 434  434          ((zc1).zc_word[3] - (zc2).zc_word[3])))
 435  435  
 436  436  #define DVA_IS_VALID(dva)       (DVA_GET_ASIZE(dva) != 0)
 437  437  
 438  438  #define ZIO_SET_CHECKSUM(zcp, w0, w1, w2, w3)   \
 439  439  {                                               \
 440  440          (zcp)->zc_word[0] = w0;                 \
 441  441          (zcp)->zc_word[1] = w1;                 \
 442  442          (zcp)->zc_word[2] = w2;                 \
 443  443          (zcp)->zc_word[3] = w3;                 \
 444  444  }
 445  445  
 446  446  #define BP_IDENTITY(bp)         (ASSERT(!BP_IS_EMBEDDED(bp)), &(bp)->blk_dva[0])
 447  447  #define BP_IS_GANG(bp)          \
 448  448          (BP_IS_EMBEDDED(bp) ? B_FALSE : DVA_GET_GANG(BP_IDENTITY(bp)))
 449  449  #define DVA_IS_EMPTY(dva)       ((dva)->dva_word[0] == 0ULL &&  \
 450  450                                  (dva)->dva_word[1] == 0ULL)
 451  451  #define BP_IS_HOLE(bp) \
 452  452          (!BP_IS_EMBEDDED(bp) && DVA_IS_EMPTY(BP_IDENTITY(bp)))
 453  453  
 454  454  /* BP_IS_RAIDZ(bp) assumes no block compression */
 455  455  #define BP_IS_RAIDZ(bp)         (DVA_GET_ASIZE(&(bp)->blk_dva[0]) > \
 456  456                                  BP_GET_PSIZE(bp))
 457  457  
 458  458  #define BP_ZERO(bp)                             \
 459  459  {                                               \
 460  460          (bp)->blk_dva[0].dva_word[0] = 0;       \
 461  461          (bp)->blk_dva[0].dva_word[1] = 0;       \
 462  462          (bp)->blk_dva[1].dva_word[0] = 0;       \
 463  463          (bp)->blk_dva[1].dva_word[1] = 0;       \
 464  464          (bp)->blk_dva[2].dva_word[0] = 0;       \
 465  465          (bp)->blk_dva[2].dva_word[1] = 0;       \
 466  466          (bp)->blk_prop = 0;                     \
 467  467          (bp)->blk_pad[0] = 0;                   \
 468  468          (bp)->blk_pad[1] = 0;                   \
 469  469          (bp)->blk_phys_birth = 0;               \
 470  470          (bp)->blk_birth = 0;                    \
 471  471          (bp)->blk_fill = 0;                     \
 472  472          ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \
 473  473  }
 474  474  
 475  475  #ifdef _BIG_ENDIAN
 476  476  #define ZFS_HOST_BYTEORDER      (0ULL)
 477  477  #else
 478  478  #define ZFS_HOST_BYTEORDER      (1ULL)
 479  479  #endif
 480  480  
 481  481  #define BP_SHOULD_BYTESWAP(bp)  (BP_GET_BYTEORDER(bp) != ZFS_HOST_BYTEORDER)
 482  482  
 483  483  #define BP_SPRINTF_LEN  320
 484  484  
 485  485  /*
 486  486   * This macro allows code sharing between zfs, libzpool, and mdb.
 487  487   * 'func' is either snprintf() or mdb_snprintf().
 488  488   * 'ws' (whitespace) can be ' ' for single-line format, '\n' for multi-line.
 489  489   */
 490  490  #define SNPRINTF_BLKPTR(func, ws, buf, size, bp, type, checksum, compress) \
 491  491  {                                                                       \
 492  492          static const char *copyname[] =                                 \
 493  493              { "zero", "single", "double", "triple" };                   \
 494  494          int len = 0;                                                    \
 495  495          int copies = 0;                                                 \
 496  496                                                                          \
 497  497          if (bp == NULL) {                                               \
 498  498                  len += func(buf + len, size - len, "<NULL>");           \
 499  499          } else if (BP_IS_HOLE(bp)) {                                    \
 500  500                  len += func(buf + len, size - len, "<hole>");           \
 501  501                  if (bp->blk_birth > 0) {                                \
 502  502                          len += func(buf + len, size - len,              \
 503  503                              " birth=%lluL",                             \
 504  504                              (u_longlong_t)bp->blk_birth);               \
 505  505                  }                                                       \
 506  506          } else if (BP_IS_EMBEDDED(bp)) {                                \
 507  507                  len = func(buf + len, size - len,                       \
 508  508                      "EMBEDDED [L%llu %s] et=%u %s "                     \
 509  509                      "size=%llxL/%llxP birth=%lluL",                     \
 510  510                      (u_longlong_t)BP_GET_LEVEL(bp),                     \
 511  511                      type,                                               \
 512  512                      (int)BPE_GET_ETYPE(bp),                             \
 513  513                      compress,                                           \
 514  514                      (u_longlong_t)BPE_GET_LSIZE(bp),                    \
 515  515                      (u_longlong_t)BPE_GET_PSIZE(bp),                    \
 516  516                      (u_longlong_t)bp->blk_birth);                       \
 517  517          } else {                                                        \
 518  518                  for (int d = 0; d < BP_GET_NDVAS(bp); d++) {            \
 519  519                          const dva_t *dva = &bp->blk_dva[d];             \
 520  520                          if (DVA_IS_VALID(dva))                          \
 521  521                                  copies++;                               \
 522  522                          len += func(buf + len, size - len,              \
 523  523                              "DVA[%d]=<%llu:%llx:%llx>%c", d,            \
 524  524                              (u_longlong_t)DVA_GET_VDEV(dva),            \
 525  525                              (u_longlong_t)DVA_GET_OFFSET(dva),          \
 526  526                              (u_longlong_t)DVA_GET_ASIZE(dva),           \
 527  527                              ws);                                        \
 528  528                  }                                                       \
 529  529                  if (BP_IS_GANG(bp) &&                                   \
 530  530                      DVA_GET_ASIZE(&bp->blk_dva[2]) <=                   \
 531  531                      DVA_GET_ASIZE(&bp->blk_dva[1]) / 2)                 \
 532  532                          copies--;                                       \
 533  533                  len += func(buf + len, size - len,                      \
 534  534                      "[L%llu %s] %s %s %s %s %s %s%c"                    \
 535  535                      "size=%llxL/%llxP birth=%lluL/%lluP fill=%llu%c"    \
 536  536                      "cksum=%llx:%llx:%llx:%llx",                        \
 537  537                      (u_longlong_t)BP_GET_LEVEL(bp),                     \
 538  538                      type,                                               \
 539  539                      checksum,                                           \
 540  540                      compress,                                           \
 541  541                      BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE",            \
 542  542                      BP_IS_GANG(bp) ? "gang" : "contiguous",             \
 543  543                      BP_GET_DEDUP(bp) ? "dedup" : "unique",              \
 544  544                      copyname[copies],                                   \
 545  545                      ws,                                                 \
 546  546                      (u_longlong_t)BP_GET_LSIZE(bp),                     \
 547  547                      (u_longlong_t)BP_GET_PSIZE(bp),                     \
 548  548                      (u_longlong_t)bp->blk_birth,                        \
 549  549                      (u_longlong_t)BP_PHYSICAL_BIRTH(bp),                \
 550  550                      (u_longlong_t)BP_GET_FILL(bp),                      \
 551  551                      ws,                                                 \
 552  552                      (u_longlong_t)bp->blk_cksum.zc_word[0],             \
 553  553                      (u_longlong_t)bp->blk_cksum.zc_word[1],             \
 554  554                      (u_longlong_t)bp->blk_cksum.zc_word[2],             \
 555  555                      (u_longlong_t)bp->blk_cksum.zc_word[3]);            \
 556  556          }                                                               \
 557  557          ASSERT(len < size);                                             \
 558  558  }
 559  559  
 560  560  #include <sys/dmu.h>
 561  561  
 562  562  #define BP_GET_BUFC_TYPE(bp)                                            \
  
    | 
      ↓ open down ↓ | 
    562 lines elided | 
    
      ↑ open up ↑ | 
  
 563  563          (((BP_GET_LEVEL(bp) > 0) || (DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))) ? \
 564  564          ARC_BUFC_METADATA : ARC_BUFC_DATA)
 565  565  
 566  566  typedef enum spa_import_type {
 567  567          SPA_IMPORT_EXISTING,
 568  568          SPA_IMPORT_ASSEMBLE
 569  569  } spa_import_type_t;
 570  570  
 571  571  /* state manipulation functions */
 572  572  extern int spa_open(const char *pool, spa_t **, void *tag);
      573 +extern int spa_open_lock(const char *pool, spa_t **, void *tag, int lock);
 573  574  extern int spa_open_rewind(const char *pool, spa_t **, void *tag,
 574  575      nvlist_t *policy, nvlist_t **config);
 575  576  extern int spa_get_stats(const char *pool, nvlist_t **config, char *altroot,
 576  577      size_t buflen);
 577  578  extern int spa_create(const char *pool, nvlist_t *config, nvlist_t *props,
 578  579      nvlist_t *zplprops);
 579  580  extern int spa_import_rootpool(char *devpath, char *devid);
 580  581  extern int spa_import(const char *pool, nvlist_t *config, nvlist_t *props,
 581  582      uint64_t flags);
 582  583  extern nvlist_t *spa_tryimport(nvlist_t *tryconfig);
 583  584  extern int spa_destroy(char *pool);
 584  585  extern int spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
 585  586      boolean_t hardforce);
 586  587  extern int spa_reset(char *pool);
 587  588  extern void spa_async_request(spa_t *spa, int flag);
 588  589  extern void spa_async_unrequest(spa_t *spa, int flag);
 589  590  extern void spa_async_suspend(spa_t *spa);
 590  591  extern void spa_async_resume(spa_t *spa);
 591  592  extern spa_t *spa_inject_addref(char *pool);
 592  593  extern void spa_inject_delref(spa_t *spa);
 593  594  extern void spa_scan_stat_init(spa_t *spa);
 594  595  extern int spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps);
 595  596  
 596  597  #define SPA_ASYNC_CONFIG_UPDATE 0x01
 597  598  #define SPA_ASYNC_REMOVE        0x02
 598  599  #define SPA_ASYNC_PROBE         0x04
 599  600  #define SPA_ASYNC_RESILVER_DONE 0x08
 600  601  #define SPA_ASYNC_RESILVER      0x10
 601  602  #define SPA_ASYNC_AUTOEXPAND    0x20
 602  603  #define SPA_ASYNC_REMOVE_DONE   0x40
 603  604  #define SPA_ASYNC_REMOVE_STOP   0x80
 604  605  
 605  606  /*
 606  607   * Controls the behavior of spa_vdev_remove().
 607  608   */
 608  609  #define SPA_REMOVE_UNSPARE      0x01
 609  610  #define SPA_REMOVE_DONE         0x02
 610  611  
 611  612  /* device manipulation */
 612  613  extern int spa_vdev_add(spa_t *spa, nvlist_t *nvroot);
 613  614  extern int spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot,
 614  615      int replacing);
 615  616  extern int spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid,
 616  617      int replace_done);
 617  618  extern int spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare);
 618  619  extern boolean_t spa_vdev_remove_active(spa_t *spa);
 619  620  extern int spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath);
 620  621  extern int spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru);
 621  622  extern int spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
 622  623      nvlist_t *props, boolean_t exp);
 623  624  
 624  625  /* spare state (which is global across all pools) */
 625  626  extern void spa_spare_add(vdev_t *vd);
 626  627  extern void spa_spare_remove(vdev_t *vd);
 627  628  extern boolean_t spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt);
 628  629  extern void spa_spare_activate(vdev_t *vd);
 629  630  
 630  631  /* L2ARC state (which is global across all pools) */
 631  632  extern void spa_l2cache_add(vdev_t *vd);
 632  633  extern void spa_l2cache_remove(vdev_t *vd);
 633  634  extern boolean_t spa_l2cache_exists(uint64_t guid, uint64_t *pool);
 634  635  extern void spa_l2cache_activate(vdev_t *vd);
 635  636  extern void spa_l2cache_drop(spa_t *spa);
 636  637  
 637  638  /* scanning */
 638  639  extern int spa_scan(spa_t *spa, pool_scan_func_t func);
 639  640  extern int spa_scan_stop(spa_t *spa);
 640  641  
 641  642  /* spa syncing */
 642  643  extern void spa_sync(spa_t *spa, uint64_t txg); /* only for DMU use */
 643  644  extern void spa_sync_allpools(void);
 644  645  
 645  646  /* spa namespace global mutex */
 646  647  extern kmutex_t spa_namespace_lock;
 647  648  
 648  649  /*
 649  650   * SPA configuration functions in spa_config.c
 650  651   */
 651  652  
 652  653  #define SPA_CONFIG_UPDATE_POOL  0
 653  654  #define SPA_CONFIG_UPDATE_VDEVS 1
 654  655  
 655  656  extern void spa_config_sync(spa_t *, boolean_t, boolean_t);
 656  657  extern void spa_config_load(void);
 657  658  extern nvlist_t *spa_all_configs(uint64_t *);
 658  659  extern void spa_config_set(spa_t *spa, nvlist_t *config);
 659  660  extern nvlist_t *spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg,
 660  661      int getstats);
 661  662  extern void spa_config_update(spa_t *spa, int what);
 662  663  
 663  664  /*
 664  665   * Miscellaneous SPA routines in spa_misc.c
 665  666   */
 666  667  
 667  668  /* Namespace manipulation */
 668  669  extern spa_t *spa_lookup(const char *name);
 669  670  extern spa_t *spa_add(const char *name, nvlist_t *config, const char *altroot);
 670  671  extern void spa_remove(spa_t *spa);
 671  672  extern spa_t *spa_next(spa_t *prev);
 672  673  
 673  674  /* Refcount functions */
 674  675  extern void spa_open_ref(spa_t *spa, void *tag);
 675  676  extern void spa_close(spa_t *spa, void *tag);
 676  677  extern boolean_t spa_refcount_zero(spa_t *spa);
 677  678  
 678  679  #define SCL_NONE        0x00
 679  680  #define SCL_CONFIG      0x01
 680  681  #define SCL_STATE       0x02
 681  682  #define SCL_L2ARC       0x04            /* hack until L2ARC 2.0 */
 682  683  #define SCL_ALLOC       0x08
 683  684  #define SCL_ZIO         0x10
 684  685  #define SCL_FREE        0x20
 685  686  #define SCL_VDEV        0x40
 686  687  #define SCL_LOCKS       7
 687  688  #define SCL_ALL         ((1 << SCL_LOCKS) - 1)
 688  689  #define SCL_STATE_ALL   (SCL_STATE | SCL_L2ARC | SCL_ZIO)
 689  690  
 690  691  /* Pool configuration locks */
 691  692  extern int spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw);
 692  693  extern void spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw);
 693  694  extern void spa_config_exit(spa_t *spa, int locks, void *tag);
 694  695  extern int spa_config_held(spa_t *spa, int locks, krw_t rw);
 695  696  
 696  697  /* Pool vdev add/remove lock */
 697  698  extern uint64_t spa_vdev_enter(spa_t *spa);
 698  699  extern uint64_t spa_vdev_config_enter(spa_t *spa);
 699  700  extern void spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg,
 700  701      int error, char *tag);
 701  702  extern int spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error);
 702  703  
 703  704  /* Pool vdev state change lock */
 704  705  extern void spa_vdev_state_enter(spa_t *spa, int oplock);
 705  706  extern int spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error);
 706  707  
 707  708  /* Log state */
 708  709  typedef enum spa_log_state {
 709  710          SPA_LOG_UNKNOWN = 0,    /* unknown log state */
 710  711          SPA_LOG_MISSING,        /* missing log(s) */
 711  712          SPA_LOG_CLEAR,          /* clear the log(s) */
 712  713          SPA_LOG_GOOD,           /* log(s) are good */
 713  714  } spa_log_state_t;
 714  715  
 715  716  extern spa_log_state_t spa_get_log_state(spa_t *spa);
 716  717  extern void spa_set_log_state(spa_t *spa, spa_log_state_t state);
 717  718  extern int spa_offline_log(spa_t *spa);
 718  719  
 719  720  /* Log claim callback */
 720  721  extern void spa_claim_notify(zio_t *zio);
 721  722  
 722  723  /* Accessor functions */
 723  724  extern boolean_t spa_shutting_down(spa_t *spa);
 724  725  extern struct dsl_pool *spa_get_dsl(spa_t *spa);
 725  726  extern boolean_t spa_is_initializing(spa_t *spa);
 726  727  extern blkptr_t *spa_get_rootblkptr(spa_t *spa);
 727  728  extern void spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp);
 728  729  extern void spa_altroot(spa_t *, char *, size_t);
 729  730  extern int spa_sync_pass(spa_t *spa);
 730  731  extern char *spa_name(spa_t *spa);
 731  732  extern uint64_t spa_guid(spa_t *spa);
 732  733  extern uint64_t spa_load_guid(spa_t *spa);
 733  734  extern uint64_t spa_last_synced_txg(spa_t *spa);
 734  735  extern uint64_t spa_first_txg(spa_t *spa);
 735  736  extern uint64_t spa_syncing_txg(spa_t *spa);
 736  737  extern uint64_t spa_version(spa_t *spa);
 737  738  extern pool_state_t spa_state(spa_t *spa);
 738  739  extern spa_load_state_t spa_load_state(spa_t *spa);
 739  740  extern uint64_t spa_freeze_txg(spa_t *spa);
 740  741  extern uint64_t spa_get_asize(spa_t *spa, uint64_t lsize);
 741  742  extern uint64_t spa_get_dspace(spa_t *spa);
 742  743  extern void spa_update_dspace(spa_t *spa);
 743  744  extern uint64_t spa_version(spa_t *spa);
 744  745  extern boolean_t spa_deflate(spa_t *spa);
 745  746  extern metaslab_class_t *spa_normal_class(spa_t *spa);
 746  747  extern metaslab_class_t *spa_log_class(spa_t *spa);
 747  748  extern int spa_max_replication(spa_t *spa);
 748  749  extern int spa_prev_software_version(spa_t *spa);
 749  750  extern int spa_busy(void);
 750  751  extern uint8_t spa_get_failmode(spa_t *spa);
 751  752  extern boolean_t spa_suspended(spa_t *spa);
 752  753  extern uint64_t spa_bootfs(spa_t *spa);
 753  754  extern uint64_t spa_delegation(spa_t *spa);
 754  755  extern objset_t *spa_meta_objset(spa_t *spa);
 755  756  extern uint64_t spa_deadman_synctime(spa_t *spa);
 756  757  
 757  758  /* Miscellaneous support routines */
 758  759  extern void spa_activate_mos_feature(spa_t *spa, const char *feature,
 759  760      dmu_tx_t *tx);
 760  761  extern void spa_deactivate_mos_feature(spa_t *spa, const char *feature);
 761  762  extern int spa_rename(const char *oldname, const char *newname);
 762  763  extern spa_t *spa_by_guid(uint64_t pool_guid, uint64_t device_guid);
 763  764  extern boolean_t spa_guid_exists(uint64_t pool_guid, uint64_t device_guid);
 764  765  extern char *spa_strdup(const char *);
 765  766  extern void spa_strfree(char *);
 766  767  extern uint64_t spa_get_random(uint64_t range);
 767  768  extern uint64_t spa_generate_guid(spa_t *spa);
 768  769  extern void snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp);
 769  770  extern void spa_freeze(spa_t *spa);
 770  771  extern int spa_change_guid(spa_t *spa);
 771  772  extern void spa_upgrade(spa_t *spa, uint64_t version);
 772  773  extern void spa_evict_all(void);
 773  774  extern vdev_t *spa_lookup_by_guid(spa_t *spa, uint64_t guid,
 774  775      boolean_t l2cache);
 775  776  extern boolean_t spa_has_spare(spa_t *, uint64_t guid);
 776  777  extern uint64_t dva_get_dsize_sync(spa_t *spa, const dva_t *dva);
 777  778  extern uint64_t bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp);
 778  779  extern uint64_t bp_get_dsize(spa_t *spa, const blkptr_t *bp);
 779  780  extern boolean_t spa_has_slogs(spa_t *spa);
 780  781  extern boolean_t spa_is_root(spa_t *spa);
 781  782  extern boolean_t spa_writeable(spa_t *spa);
 782  783  
 783  784  extern int spa_mode(spa_t *spa);
 784  785  extern uint64_t strtonum(const char *str, char **nptr);
 785  786  
 786  787  extern char *spa_his_ievent_table[];
 787  788  
 788  789  extern void spa_history_create_obj(spa_t *spa, dmu_tx_t *tx);
 789  790  extern int spa_history_get(spa_t *spa, uint64_t *offset, uint64_t *len_read,
 790  791      char *his_buf);
 791  792  extern int spa_history_log(spa_t *spa, const char *his_buf);
 792  793  extern int spa_history_log_nvl(spa_t *spa, nvlist_t *nvl);
 793  794  extern void spa_history_log_version(spa_t *spa, const char *operation);
 794  795  extern void spa_history_log_internal(spa_t *spa, const char *operation,
 795  796      dmu_tx_t *tx, const char *fmt, ...);
 796  797  extern void spa_history_log_internal_ds(struct dsl_dataset *ds, const char *op,
 797  798      dmu_tx_t *tx, const char *fmt, ...);
 798  799  extern void spa_history_log_internal_dd(dsl_dir_t *dd, const char *operation,
 799  800      dmu_tx_t *tx, const char *fmt, ...);
 800  801  
 801  802  /* error handling */
 802  803  struct zbookmark;
 803  804  extern void spa_log_error(spa_t *spa, zio_t *zio);
 804  805  extern void zfs_ereport_post(const char *class, spa_t *spa, vdev_t *vd,
 805  806      zio_t *zio, uint64_t stateoroffset, uint64_t length);
 806  807  extern void zfs_post_remove(spa_t *spa, vdev_t *vd);
 807  808  extern void zfs_post_state_change(spa_t *spa, vdev_t *vd);
 808  809  extern void zfs_post_autoreplace(spa_t *spa, vdev_t *vd);
 809  810  extern uint64_t spa_get_errlog_size(spa_t *spa);
 810  811  extern int spa_get_errlog(spa_t *spa, void *uaddr, size_t *count);
 811  812  extern void spa_errlog_rotate(spa_t *spa);
 812  813  extern void spa_errlog_drain(spa_t *spa);
 813  814  extern void spa_errlog_sync(spa_t *spa, uint64_t txg);
 814  815  extern void spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub);
 815  816  
 816  817  /* vdev cache */
 817  818  extern void vdev_cache_stat_init(void);
 818  819  extern void vdev_cache_stat_fini(void);
 819  820  
 820  821  /* Initialization and termination */
 821  822  extern void spa_init(int flags);
 822  823  extern void spa_fini(void);
 823  824  extern void spa_boot_init();
 824  825  
 825  826  /* properties */
 826  827  extern int spa_prop_set(spa_t *spa, nvlist_t *nvp);
 827  828  extern int spa_prop_get(spa_t *spa, nvlist_t **nvp);
 828  829  extern void spa_prop_clear_bootfs(spa_t *spa, uint64_t obj, dmu_tx_t *tx);
 829  830  extern void spa_configfile_set(spa_t *, nvlist_t *, boolean_t);
 830  831  
 831  832  /* asynchronous event notification */
 832  833  extern void spa_event_notify(spa_t *spa, vdev_t *vdev, const char *name);
 833  834  
 834  835  #ifdef ZFS_DEBUG
 835  836  #define dprintf_bp(bp, fmt, ...) do {                           \
 836  837          if (zfs_flags & ZFS_DEBUG_DPRINTF) {                    \
 837  838          char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP);  \
 838  839          snprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, (bp));        \
 839  840          dprintf(fmt " %s\n", __VA_ARGS__, __blkbuf);            \
 840  841          kmem_free(__blkbuf, BP_SPRINTF_LEN);                    \
 841  842          } \
 842  843  _NOTE(CONSTCOND) } while (0)
 843  844  #else
 844  845  #define dprintf_bp(bp, fmt, ...)
 845  846  #endif
 846  847  
 847  848  extern boolean_t spa_debug_enabled(spa_t *spa);
 848  849  #define spa_dbgmsg(spa, ...)                    \
 849  850  {                                               \
 850  851          if (spa_debug_enabled(spa))             \
 851  852                  zfs_dbgmsg(__VA_ARGS__);        \
 852  853  }
 853  854  
 854  855  extern int spa_mode_global;                     /* mode, e.g. FREAD | FWRITE */
 855  856  
 856  857  #ifdef  __cplusplus
 857  858  }
 858  859  #endif
 859  860  
 860  861  #endif  /* _SYS_SPA_H */
  
    | 
      ↓ open down ↓ | 
    278 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX