Print this page
3742 zfs comments need cleaner, more consistent style
Submitted by:   Will Andrews <willa@spectralogic.com>
Submitted by:   Alan Somers <alans@spectralogic.com>
Reviewed by:    Matthew Ahrens <mahrens@delphix.com>
Reviewed by:    George Wilson <george.wilson@delphix.com>
Reviewed by:    Eric Schrock <eric.schrock@delphix.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/zfs/arc.c
          +++ new/usr/src/uts/common/fs/zfs/arc.c
↓ open down ↓ 49 lines elided ↑ open up ↑
  50   50   * into the cache until we can make space available.
  51   51   *
  52   52   * 2. The Megiddo and Modha model assumes a fixed cache size.
  53   53   * Pages are evicted when the cache is full and there is a cache
  54   54   * miss.  Our model has a variable sized cache.  It grows with
  55   55   * high use, but also tries to react to memory pressure from the
  56   56   * operating system: decreasing its size when system memory is
  57   57   * tight.
  58   58   *
  59   59   * 3. The Megiddo and Modha model assumes a fixed page size. All
  60      - * elements of the cache are therefor exactly the same size.  So
       60 + * elements of the cache are therefore exactly the same size.  So
  61   61   * when adjusting the cache size following a cache miss, its simply
  62   62   * a matter of choosing a single page to evict.  In our model, we
  63   63   * have variable sized cache blocks (rangeing from 512 bytes to
  64      - * 128K bytes).  We therefor choose a set of blocks to evict to make
       64 + * 128K bytes).  We therefore choose a set of blocks to evict to make
  65   65   * space for a cache miss that approximates as closely as possible
  66   66   * the space used by the new block.
  67   67   *
  68   68   * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
  69   69   * by N. Megiddo & D. Modha, FAST 2003
  70   70   */
  71   71  
  72   72  /*
  73   73   * The locking model:
  74   74   *
  75   75   * A new reference to a cache buffer can be obtained in two
  76   76   * ways: 1) via a hash table lookup using the DVA as a key,
  77   77   * or 2) via one of the ARC lists.  The arc_read() interface
  78   78   * uses method 1, while the internal arc algorithms for
  79      - * adjusting the cache use method 2.  We therefor provide two
       79 + * adjusting the cache use method 2.  We therefore provide two
  80   80   * types of locks: 1) the hash table lock array, and 2) the
  81   81   * arc list locks.
  82   82   *
  83   83   * Buffers do not have their own mutexes, rather they rely on the
  84   84   * hash table mutexes for the bulk of their protection (i.e. most
  85   85   * fields in the arc_buf_hdr_t are protected by these mutexes).
  86   86   *
  87   87   * buf_hash_find() returns the appropriate mutex (held) when it
  88   88   * locates the requested buffer in the hash table.  It returns
  89   89   * NULL for the mutex if the buffer was not in the table.
↓ open down ↓ 278 lines elided ↑ open up ↑
 368  368          { "duplicate_buffers_size",     KSTAT_DATA_UINT64 },
 369  369          { "duplicate_reads",            KSTAT_DATA_UINT64 },
 370  370          { "arc_meta_used",              KSTAT_DATA_UINT64 },
 371  371          { "arc_meta_limit",             KSTAT_DATA_UINT64 },
 372  372          { "arc_meta_max",               KSTAT_DATA_UINT64 }
 373  373  };
 374  374  
 375  375  #define ARCSTAT(stat)   (arc_stats.stat.value.ui64)
 376  376  
 377  377  #define ARCSTAT_INCR(stat, val) \
 378      -        atomic_add_64(&arc_stats.stat.value.ui64, (val));
      378 +        atomic_add_64(&arc_stats.stat.value.ui64, (val))
 379  379  
 380  380  #define ARCSTAT_BUMP(stat)      ARCSTAT_INCR(stat, 1)
 381  381  #define ARCSTAT_BUMPDOWN(stat)  ARCSTAT_INCR(stat, -1)
 382  382  
 383  383  #define ARCSTAT_MAX(stat, val) {                                        \
 384  384          uint64_t m;                                                     \
 385  385          while ((val) > (m = arc_stats.stat.value.ui64) &&               \
 386  386              (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
 387  387                  continue;                                               \
 388  388  }
↓ open down ↓ 199 lines elided ↑ open up ↑
 588  588   */
 589  589  
 590  590  #define L2ARC_WRITE_SIZE        (8 * 1024 * 1024)       /* initial write max */
 591  591  #define L2ARC_HEADROOM          2               /* num of writes */
 592  592  #define L2ARC_FEED_SECS         1               /* caching interval secs */
 593  593  #define L2ARC_FEED_MIN_MS       200             /* min caching interval ms */
 594  594  
 595  595  #define l2arc_writes_sent       ARCSTAT(arcstat_l2_writes_sent)
 596  596  #define l2arc_writes_done       ARCSTAT(arcstat_l2_writes_done)
 597  597  
 598      -/*
 599      - * L2ARC Performance Tunables
 600      - */
      598 +/* L2ARC Performance Tunables */
 601  599  uint64_t l2arc_write_max = L2ARC_WRITE_SIZE;    /* default max write size */
 602  600  uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE;  /* extra write during warmup */
 603  601  uint64_t l2arc_headroom = L2ARC_HEADROOM;       /* number of dev writes */
 604  602  uint64_t l2arc_feed_secs = L2ARC_FEED_SECS;     /* interval seconds */
 605  603  uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
 606  604  boolean_t l2arc_noprefetch = B_TRUE;            /* don't cache prefetch bufs */
 607  605  boolean_t l2arc_feed_again = B_TRUE;            /* turbo warmup */
 608  606  boolean_t l2arc_norw = B_TRUE;                  /* no reads during writes */
 609  607  
 610  608  /*
↓ open down ↓ 2927 lines elided ↑ open up ↑
3538 3536  
3539 3537          /*
3540 3538           * Don't count loaned bufs as in flight dirty data to prevent long
3541 3539           * network delays from blocking transactions that are ready to be
3542 3540           * assigned to a txg.
3543 3541           */
3544 3542          anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3545 3543  
3546 3544          /*
3547 3545           * Writes will, almost always, require additional memory allocations
3548      -         * in order to compress/encrypt/etc the data.  We therefor need to
     3546 +         * in order to compress/encrypt/etc the data.  We therefore need to
3549 3547           * make sure that there is sufficient available memory for this.
3550 3548           */
3551 3549          if (error = arc_memory_throttle(reserve, anon_size, txg))
3552 3550                  return (error);
3553 3551  
3554 3552          /*
3555 3553           * Throttle writes when the amount of dirty data in the cache
3556 3554           * gets too large.  We try to keep the cache less than half full
3557 3555           * of dirty blocks so that our sync times don't grow too large.
3558 3556           * Note: if two requests come in concurrently, we might let them
↓ open down ↓ 1240 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX