Print this page
3748 zfs headers should be C++ compatible
Submitted by:   Justin Gibbs <justing@spectralogic.com>
Submitted by:   Will Andrews <willa@spectralogic.com>
Reviewed by:    Matthew Ahrens <mahrens@delphix.com>


2769 /*
2770  * "Read" the block at the specified DVA (in bp) via the
2771  * cache.  If the block is found in the cache, invoke the provided
2772  * callback immediately and return.  Note that the `zio' parameter
2773  * in the callback will be NULL in this case, since no IO was
2774  * required.  If the block is not in the cache pass the read request
2775  * on to the spa with a substitute callback function, so that the
2776  * requested block will be added to the cache.
2777  *
2778  * If a read request arrives for a block that has a read in-progress,
2779  * either wait for the in-progress read to complete (and return the
2780  * results); or, if this is a read with a "done" func, add a record
2781  * to the read to invoke the "done" func when the read completes,
2782  * and return; or just return.
2783  *
2784  * arc_read_done() will invoke all the requested "done" functions
2785  * for readers of this block.
2786  */
2787 int
2788 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
2789     void *private, int priority, int zio_flags, uint32_t *arc_flags,
2790     const zbookmark_t *zb)
2791 {
2792         arc_buf_hdr_t *hdr;
2793         arc_buf_t *buf = NULL;
2794         kmutex_t *hash_lock;
2795         zio_t *rzio;
2796         uint64_t guid = spa_load_guid(spa);
2797 
2798 top:
2799         hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
2800             &hash_lock);
2801         if (hdr && hdr->b_datacnt > 0) {
2802 
2803                 *arc_flags |= ARC_CACHED;
2804 
2805                 if (HDR_IO_IN_PROGRESS(hdr)) {
2806 
2807                         if (*arc_flags & ARC_WAIT) {
2808                                 cv_wait(&hdr->b_cv, hash_lock);
2809                                 mutex_exit(hash_lock);
2810                                 goto top;
2811                         }
2812                         ASSERT(*arc_flags & ARC_NOWAIT);
2813 
2814                         if (done) {
2815                                 arc_callback_t  *acb = NULL;
2816 
2817                                 acb = kmem_zalloc(sizeof (arc_callback_t),
2818                                     KM_SLEEP);
2819                                 acb->acb_done = done;
2820                                 acb->acb_private = private;
2821                                 if (pio != NULL)
2822                                         acb->acb_zio_dummy = zio_null(pio,
2823                                             spa, NULL, NULL, NULL, zio_flags);
2824 
2825                                 ASSERT(acb->acb_done != NULL);
2826                                 acb->acb_next = hdr->b_acb;
2827                                 hdr->b_acb = acb;
2828                                 add_reference(hdr, hash_lock, private);
2829                                 mutex_exit(hash_lock);
2830                                 return (0);
2831                         }
2832                         mutex_exit(hash_lock);
2833                         return (0);
2834                 }
2835 
2836                 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2837 
2838                 if (done) {
2839                         add_reference(hdr, hash_lock, private);
2840                         /*
2841                          * If this block is already in use, create a new
2842                          * copy of the data so that we will be guaranteed
2843                          * that arc_release() will always succeed.
2844                          */
2845                         buf = hdr->b_buf;
2846                         ASSERT(buf);
2847                         ASSERT(buf->b_data);
2848                         if (HDR_BUF_AVAILABLE(hdr)) {
2849                                 ASSERT(buf->b_efunc == NULL);
2850                                 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2851                         } else {
2852                                 buf = arc_buf_clone(buf);
2853                         }
2854 
2855                 } else if (*arc_flags & ARC_PREFETCH &&
2856                     refcount_count(&hdr->b_refcnt) == 0) {
2857                         hdr->b_flags |= ARC_PREFETCH;
2858                 }
2859                 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2860                 arc_access(hdr, hash_lock);
2861                 if (*arc_flags & ARC_L2CACHE)
2862                         hdr->b_flags |= ARC_L2CACHE;
2863                 mutex_exit(hash_lock);
2864                 ARCSTAT_BUMP(arcstat_hits);
2865                 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2866                     demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2867                     data, metadata, hits);
2868 
2869                 if (done)
2870                         done(NULL, buf, private);
2871         } else {
2872                 uint64_t size = BP_GET_LSIZE(bp);
2873                 arc_callback_t  *acb;
2874                 vdev_t *vd = NULL;
2875                 uint64_t addr = 0;
2876                 boolean_t devw = B_FALSE;
2877 
2878                 if (hdr == NULL) {
2879                         /* this block is not in the cache */
2880                         arc_buf_hdr_t   *exists;
2881                         arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
2882                         buf = arc_buf_alloc(spa, size, private, type);
2883                         hdr = buf->b_hdr;
2884                         hdr->b_dva = *BP_IDENTITY(bp);
2885                         hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
2886                         hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2887                         exists = buf_hash_insert(hdr, &hash_lock);
2888                         if (exists) {
2889                                 /* somebody beat us to the hash insert */
2890                                 mutex_exit(hash_lock);
2891                                 buf_discard_identity(hdr);
2892                                 (void) arc_buf_remove_ref(buf, private);
2893                                 goto top; /* restart the IO request */
2894                         }
2895                         /* if this is a prefetch, we don't have a reference */
2896                         if (*arc_flags & ARC_PREFETCH) {
2897                                 (void) remove_reference(hdr, hash_lock,
2898                                     private);
2899                                 hdr->b_flags |= ARC_PREFETCH;
2900                         }
2901                         if (*arc_flags & ARC_L2CACHE)
2902                                 hdr->b_flags |= ARC_L2CACHE;
2903                         if (BP_GET_LEVEL(bp) > 0)
2904                                 hdr->b_flags |= ARC_INDIRECT;
2905                 } else {
2906                         /* this block is in the ghost cache */
2907                         ASSERT(GHOST_STATE(hdr->b_state));
2908                         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2909                         ASSERT0(refcount_count(&hdr->b_refcnt));
2910                         ASSERT(hdr->b_buf == NULL);
2911 
2912                         /* if this is a prefetch, we don't have a reference */
2913                         if (*arc_flags & ARC_PREFETCH)
2914                                 hdr->b_flags |= ARC_PREFETCH;
2915                         else
2916                                 add_reference(hdr, hash_lock, private);
2917                         if (*arc_flags & ARC_L2CACHE)
2918                                 hdr->b_flags |= ARC_L2CACHE;
2919                         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2920                         buf->b_hdr = hdr;
2921                         buf->b_data = NULL;
2922                         buf->b_efunc = NULL;
2923                         buf->b_private = NULL;
2924                         buf->b_next = NULL;
2925                         hdr->b_buf = buf;
2926                         ASSERT(hdr->b_datacnt == 0);
2927                         hdr->b_datacnt = 1;
2928                         arc_get_data_buf(buf);
2929                         arc_access(hdr, hash_lock);
2930                 }
2931 
2932                 ASSERT(!GHOST_STATE(hdr->b_state));
2933 
2934                 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2935                 acb->acb_done = done;
2936                 acb->acb_private = private;
2937 
2938                 ASSERT(hdr->b_acb == NULL);
2939                 hdr->b_acb = acb;
2940                 hdr->b_flags |= ARC_IO_IN_PROGRESS;
2941 
2942                 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
2943                     (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
2944                         devw = hdr->b_l2hdr->b_dev->l2ad_writing;
2945                         addr = hdr->b_l2hdr->b_daddr;
2946                         /*
2947                          * Lock out device removal.
2948                          */
2949                         if (vdev_is_dead(vd) ||
2950                             !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
2951                                 vd = NULL;
2952                 }
2953 
2954                 mutex_exit(hash_lock);
2955 
2956                 ASSERT3U(hdr->b_size, ==, size);


3029                         if (l2arc_ndev != 0) {
3030                                 DTRACE_PROBE1(l2arc__miss,
3031                                     arc_buf_hdr_t *, hdr);
3032                                 ARCSTAT_BUMP(arcstat_l2_misses);
3033                         }
3034                 }
3035 
3036                 rzio = zio_read(pio, spa, bp, buf->b_data, size,
3037                     arc_read_done, buf, priority, zio_flags, zb);
3038 
3039                 if (*arc_flags & ARC_WAIT)
3040                         return (zio_wait(rzio));
3041 
3042                 ASSERT(*arc_flags & ARC_NOWAIT);
3043                 zio_nowait(rzio);
3044         }
3045         return (0);
3046 }
3047 
3048 void
3049 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
3050 {
3051         ASSERT(buf->b_hdr != NULL);
3052         ASSERT(buf->b_hdr->b_state != arc_anon);
3053         ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
3054         ASSERT(buf->b_efunc == NULL);
3055         ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
3056 
3057         buf->b_efunc = func;
3058         buf->b_private = private;
3059 }
3060 
3061 /*
3062  * This is used by the DMU to let the ARC know that a buffer is
3063  * being evicted, so the ARC should clean up.  If this arc buf
3064  * is not yet in the evicted state, it will be put there.
3065  */
3066 int
3067 arc_buf_evict(arc_buf_t *buf)
3068 {
3069         arc_buf_hdr_t *hdr;
3070         kmutex_t *hash_lock;
3071         arc_buf_t **bufp;
3072 
3073         mutex_enter(&buf->b_evict_lock);
3074         hdr = buf->b_hdr;
3075         if (hdr == NULL) {
3076                 /*
3077                  * We are in arc_do_user_evicts().
3078                  */


3397                         }
3398                 }
3399                 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3400                 /* if it's not anon, we are doing a scrub */
3401                 if (!exists && hdr->b_state == arc_anon)
3402                         arc_access(hdr, hash_lock);
3403                 mutex_exit(hash_lock);
3404         } else {
3405                 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3406         }
3407 
3408         ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3409         callback->awcb_done(zio, buf, callback->awcb_private);
3410 
3411         kmem_free(callback, sizeof (arc_write_callback_t));
3412 }
3413 
3414 zio_t *
3415 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3416     blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp,
3417     arc_done_func_t *ready, arc_done_func_t *done, void *private,
3418     int priority, int zio_flags, const zbookmark_t *zb)
3419 {
3420         arc_buf_hdr_t *hdr = buf->b_hdr;
3421         arc_write_callback_t *callback;
3422         zio_t *zio;
3423 
3424         ASSERT(ready != NULL);
3425         ASSERT(done != NULL);
3426         ASSERT(!HDR_IO_ERROR(hdr));
3427         ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3428         ASSERT(hdr->b_acb == NULL);
3429         if (l2arc)
3430                 hdr->b_flags |= ARC_L2CACHE;
3431         callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3432         callback->awcb_ready = ready;
3433         callback->awcb_done = done;
3434         callback->awcb_private = private;
3435         callback->awcb_buf = buf;
3436 
3437         zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3438             arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);
3439 
3440         return (zio);
3441 }
3442 
3443 static int
3444 arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg)
3445 {
3446 #ifdef _KERNEL
3447         uint64_t available_memory = ptob(freemem);
3448         static uint64_t page_load = 0;
3449         static uint64_t last_txg = 0;
3450 
3451 #if defined(__i386)
3452         available_memory =
3453             MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3454 #endif




2769 /*
2770  * "Read" the block at the specified DVA (in bp) via the
2771  * cache.  If the block is found in the cache, invoke the provided
2772  * callback immediately and return.  Note that the `zio' parameter
2773  * in the callback will be NULL in this case, since no IO was
2774  * required.  If the block is not in the cache pass the read request
2775  * on to the spa with a substitute callback function, so that the
2776  * requested block will be added to the cache.
2777  *
2778  * If a read request arrives for a block that has a read in-progress,
2779  * either wait for the in-progress read to complete (and return the
2780  * results); or, if this is a read with a "done" func, add a record
2781  * to the read to invoke the "done" func when the read completes,
2782  * and return; or just return.
2783  *
2784  * arc_read_done() will invoke all the requested "done" functions
2785  * for readers of this block.
2786  */
2787 int
2788 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
2789     void *cb_private, int priority, int zio_flags, uint32_t *arc_flags,
2790     const zbookmark_t *zb)
2791 {
2792         arc_buf_hdr_t *hdr;
2793         arc_buf_t *buf = NULL;
2794         kmutex_t *hash_lock;
2795         zio_t *rzio;
2796         uint64_t guid = spa_load_guid(spa);
2797 
2798 top:
2799         hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
2800             &hash_lock);
2801         if (hdr && hdr->b_datacnt > 0) {
2802 
2803                 *arc_flags |= ARC_CACHED;
2804 
2805                 if (HDR_IO_IN_PROGRESS(hdr)) {
2806 
2807                         if (*arc_flags & ARC_WAIT) {
2808                                 cv_wait(&hdr->b_cv, hash_lock);
2809                                 mutex_exit(hash_lock);
2810                                 goto top;
2811                         }
2812                         ASSERT(*arc_flags & ARC_NOWAIT);
2813 
2814                         if (done) {
2815                                 arc_callback_t  *acb = NULL;
2816 
2817                                 acb = kmem_zalloc(sizeof (arc_callback_t),
2818                                     KM_SLEEP);
2819                                 acb->acb_done = done;
2820                                 acb->acb_private = cb_private;
2821                                 if (pio != NULL)
2822                                         acb->acb_zio_dummy = zio_null(pio,
2823                                             spa, NULL, NULL, NULL, zio_flags);
2824 
2825                                 ASSERT(acb->acb_done != NULL);
2826                                 acb->acb_next = hdr->b_acb;
2827                                 hdr->b_acb = acb;
2828                                 add_reference(hdr, hash_lock, cb_private);
2829                                 mutex_exit(hash_lock);
2830                                 return (0);
2831                         }
2832                         mutex_exit(hash_lock);
2833                         return (0);
2834                 }
2835 
2836                 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2837 
2838                 if (done) {
2839                         add_reference(hdr, hash_lock, cb_private);
2840                         /*
2841                          * If this block is already in use, create a new
2842                          * copy of the data so that we will be guaranteed
2843                          * that arc_release() will always succeed.
2844                          */
2845                         buf = hdr->b_buf;
2846                         ASSERT(buf);
2847                         ASSERT(buf->b_data);
2848                         if (HDR_BUF_AVAILABLE(hdr)) {
2849                                 ASSERT(buf->b_efunc == NULL);
2850                                 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2851                         } else {
2852                                 buf = arc_buf_clone(buf);
2853                         }
2854 
2855                 } else if (*arc_flags & ARC_PREFETCH &&
2856                     refcount_count(&hdr->b_refcnt) == 0) {
2857                         hdr->b_flags |= ARC_PREFETCH;
2858                 }
2859                 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2860                 arc_access(hdr, hash_lock);
2861                 if (*arc_flags & ARC_L2CACHE)
2862                         hdr->b_flags |= ARC_L2CACHE;
2863                 mutex_exit(hash_lock);
2864                 ARCSTAT_BUMP(arcstat_hits);
2865                 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2866                     demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2867                     data, metadata, hits);
2868 
2869                 if (done)
2870                         done(NULL, buf, cb_private);
2871         } else {
2872                 uint64_t size = BP_GET_LSIZE(bp);
2873                 arc_callback_t  *acb;
2874                 vdev_t *vd = NULL;
2875                 uint64_t addr = 0;
2876                 boolean_t devw = B_FALSE;
2877 
2878                 if (hdr == NULL) {
2879                         /* this block is not in the cache */
2880                         arc_buf_hdr_t   *exists;
2881                         arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
2882                         buf = arc_buf_alloc(spa, size, cb_private, type);
2883                         hdr = buf->b_hdr;
2884                         hdr->b_dva = *BP_IDENTITY(bp);
2885                         hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
2886                         hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2887                         exists = buf_hash_insert(hdr, &hash_lock);
2888                         if (exists) {
2889                                 /* somebody beat us to the hash insert */
2890                                 mutex_exit(hash_lock);
2891                                 buf_discard_identity(hdr);
2892                                 (void) arc_buf_remove_ref(buf, cb_private);
2893                                 goto top; /* restart the IO request */
2894                         }
2895                         /* if this is a prefetch, we don't have a reference */
2896                         if (*arc_flags & ARC_PREFETCH) {
2897                                 (void) remove_reference(hdr, hash_lock,
2898                                     cb_private);
2899                                 hdr->b_flags |= ARC_PREFETCH;
2900                         }
2901                         if (*arc_flags & ARC_L2CACHE)
2902                                 hdr->b_flags |= ARC_L2CACHE;
2903                         if (BP_GET_LEVEL(bp) > 0)
2904                                 hdr->b_flags |= ARC_INDIRECT;
2905                 } else {
2906                         /* this block is in the ghost cache */
2907                         ASSERT(GHOST_STATE(hdr->b_state));
2908                         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2909                         ASSERT0(refcount_count(&hdr->b_refcnt));
2910                         ASSERT(hdr->b_buf == NULL);
2911 
2912                         /* if this is a prefetch, we don't have a reference */
2913                         if (*arc_flags & ARC_PREFETCH)
2914                                 hdr->b_flags |= ARC_PREFETCH;
2915                         else
2916                                 add_reference(hdr, hash_lock, cb_private);
2917                         if (*arc_flags & ARC_L2CACHE)
2918                                 hdr->b_flags |= ARC_L2CACHE;
2919                         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2920                         buf->b_hdr = hdr;
2921                         buf->b_data = NULL;
2922                         buf->b_efunc = NULL;
2923                         buf->b_private = NULL;
2924                         buf->b_next = NULL;
2925                         hdr->b_buf = buf;
2926                         ASSERT(hdr->b_datacnt == 0);
2927                         hdr->b_datacnt = 1;
2928                         arc_get_data_buf(buf);
2929                         arc_access(hdr, hash_lock);
2930                 }
2931 
2932                 ASSERT(!GHOST_STATE(hdr->b_state));
2933 
2934                 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2935                 acb->acb_done = done;
2936                 acb->acb_private = cb_private;
2937 
2938                 ASSERT(hdr->b_acb == NULL);
2939                 hdr->b_acb = acb;
2940                 hdr->b_flags |= ARC_IO_IN_PROGRESS;
2941 
2942                 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
2943                     (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
2944                         devw = hdr->b_l2hdr->b_dev->l2ad_writing;
2945                         addr = hdr->b_l2hdr->b_daddr;
2946                         /*
2947                          * Lock out device removal.
2948                          */
2949                         if (vdev_is_dead(vd) ||
2950                             !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
2951                                 vd = NULL;
2952                 }
2953 
2954                 mutex_exit(hash_lock);
2955 
2956                 ASSERT3U(hdr->b_size, ==, size);


3029                         if (l2arc_ndev != 0) {
3030                                 DTRACE_PROBE1(l2arc__miss,
3031                                     arc_buf_hdr_t *, hdr);
3032                                 ARCSTAT_BUMP(arcstat_l2_misses);
3033                         }
3034                 }
3035 
3036                 rzio = zio_read(pio, spa, bp, buf->b_data, size,
3037                     arc_read_done, buf, priority, zio_flags, zb);
3038 
3039                 if (*arc_flags & ARC_WAIT)
3040                         return (zio_wait(rzio));
3041 
3042                 ASSERT(*arc_flags & ARC_NOWAIT);
3043                 zio_nowait(rzio);
3044         }
3045         return (0);
3046 }
3047 
3048 void
3049 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *cb_private)
3050 {
3051         ASSERT(buf->b_hdr != NULL);
3052         ASSERT(buf->b_hdr->b_state != arc_anon);
3053         ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
3054         ASSERT(buf->b_efunc == NULL);
3055         ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
3056 
3057         buf->b_efunc = func;
3058         buf->b_private = cb_private;
3059 }
3060 
3061 /*
3062  * This is used by the DMU to let the ARC know that a buffer is
3063  * being evicted, so the ARC should clean up.  If this arc buf
3064  * is not yet in the evicted state, it will be put there.
3065  */
3066 int
3067 arc_buf_evict(arc_buf_t *buf)
3068 {
3069         arc_buf_hdr_t *hdr;
3070         kmutex_t *hash_lock;
3071         arc_buf_t **bufp;
3072 
3073         mutex_enter(&buf->b_evict_lock);
3074         hdr = buf->b_hdr;
3075         if (hdr == NULL) {
3076                 /*
3077                  * We are in arc_do_user_evicts().
3078                  */


3397                         }
3398                 }
3399                 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3400                 /* if it's not anon, we are doing a scrub */
3401                 if (!exists && hdr->b_state == arc_anon)
3402                         arc_access(hdr, hash_lock);
3403                 mutex_exit(hash_lock);
3404         } else {
3405                 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3406         }
3407 
3408         ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3409         callback->awcb_done(zio, buf, callback->awcb_private);
3410 
3411         kmem_free(callback, sizeof (arc_write_callback_t));
3412 }
3413 
3414 zio_t *
3415 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3416     blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp,
3417     arc_done_func_t *ready, arc_done_func_t *done, void *cb_private,
3418     int priority, int zio_flags, const zbookmark_t *zb)
3419 {
3420         arc_buf_hdr_t *hdr = buf->b_hdr;
3421         arc_write_callback_t *callback;
3422         zio_t *zio;
3423 
3424         ASSERT(ready != NULL);
3425         ASSERT(done != NULL);
3426         ASSERT(!HDR_IO_ERROR(hdr));
3427         ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3428         ASSERT(hdr->b_acb == NULL);
3429         if (l2arc)
3430                 hdr->b_flags |= ARC_L2CACHE;
3431         callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3432         callback->awcb_ready = ready;
3433         callback->awcb_done = done;
3434         callback->awcb_private = cb_private;
3435         callback->awcb_buf = buf;
3436 
3437         zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3438             arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);
3439 
3440         return (zio);
3441 }
3442 
3443 static int
3444 arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg)
3445 {
3446 #ifdef _KERNEL
3447         uint64_t available_memory = ptob(freemem);
3448         static uint64_t page_load = 0;
3449         static uint64_t last_txg = 0;
3450 
3451 #if defined(__i386)
3452         available_memory =
3453             MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3454 #endif