Print this page
arc_get_data_buf should be more aggressive in eviction when memory is unavailable

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/fs/zfs/zio.c
          +++ new/usr/src/uts/common/fs/zfs/zio.c
↓ open down ↓ 213 lines elided ↑ open up ↑
 214  214  zio_buf_alloc(size_t size)
 215  215  {
 216  216          size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
 217  217  
 218  218          ASSERT3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
 219  219  
 220  220          return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
 221  221  }
 222  222  
 223  223  /*
      224 + * Same as zio_buf_alloc, but won't sleep in case memory cannot be allocated
      225 + * and will instead return immediately with a failure.
      226 + */
      227 +void *
      228 +zio_buf_alloc_canfail(size_t size)
      229 +{
      230 +        size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
      231 +
      232 +        ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
      233 +
      234 +        return (kmem_cache_alloc(zio_buf_cache[c], KM_NOSLEEP | KM_NORMALPRI));
      235 +}
      236 +
      237 +/*
 224  238   * Use zio_data_buf_alloc to allocate data.  The data will not appear in a
 225  239   * crashdump if the kernel panics.  This exists so that we will limit the amount
 226  240   * of ZFS data that shows up in a kernel crashdump.  (Thus reducing the amount
 227  241   * of kernel heap dumped to disk when the kernel panics)
 228  242   */
 229  243  void *
 230  244  zio_data_buf_alloc(size_t size)
 231  245  {
 232  246          size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
 233  247  
 234  248          ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
 235  249  
 236  250          return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
 237  251  }
 238  252  
      253 +/*
      254 + * Same as zio_data_buf_alloc, but won't sleep in case memory cannot be
      255 + * allocated and will instead return immediately with a failure.
      256 + */
      257 +void *
      258 +zio_data_buf_alloc_canfail(size_t size)
      259 +{
      260 +        size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
      261 +
      262 +        ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
      263 +
      264 +        return (kmem_cache_alloc(zio_data_buf_cache[c],
      265 +            KM_NOSLEEP | KM_NORMALPRI));
      266 +}
      267 +
 239  268  void
 240  269  zio_buf_free(void *buf, size_t size)
 241  270  {
 242  271          size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
 243  272  
 244  273          ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
 245  274  
 246  275          kmem_cache_free(zio_buf_cache[c], buf);
 247  276  }
 248  277  
↓ open down ↓ 3016 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX