Print this page
3006 VERIFY[S,U,P] and ASSERT[S,U,P] frequently check if first argument is zero


 993         arc_cksum_compute(buf, B_FALSE);
 994         mutex_exit(hash_lock);
 995 }
 996 
 997 static void
 998 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
 999 {
1000         ASSERT(MUTEX_HELD(hash_lock));
1001 
1002         if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1003             (ab->b_state != arc_anon)) {
1004                 uint64_t delta = ab->b_size * ab->b_datacnt;
1005                 list_t *list = &ab->b_state->arcs_list[ab->b_type];
1006                 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1007 
1008                 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
1009                 mutex_enter(&ab->b_state->arcs_mtx);
1010                 ASSERT(list_link_active(&ab->b_arc_node));
1011                 list_remove(list, ab);
1012                 if (GHOST_STATE(ab->b_state)) {
1013                         ASSERT3U(ab->b_datacnt, ==, 0);
1014                         ASSERT3P(ab->b_buf, ==, NULL);
1015                         delta = ab->b_size;
1016                 }
1017                 ASSERT(delta > 0);
1018                 ASSERT3U(*size, >=, delta);
1019                 atomic_add_64(size, -delta);
1020                 mutex_exit(&ab->b_state->arcs_mtx);
1021                 /* remove the prefetch flag if we get a reference */
1022                 if (ab->b_flags & ARC_PREFETCH)
1023                         ab->b_flags &= ~ARC_PREFETCH;
1024         }
1025 }
1026 
1027 static int
1028 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1029 {
1030         int cnt;
1031         arc_state_t *state = ab->b_state;
1032 
1033         ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));


1620         mutex_enter(&evicted_state->arcs_mtx);
1621 
1622         for (ab = list_tail(list); ab; ab = ab_prev) {
1623                 ab_prev = list_prev(list, ab);
1624                 /* prefetch buffers have a minimum lifespan */
1625                 if (HDR_IO_IN_PROGRESS(ab) ||
1626                     (spa && ab->b_spa != spa) ||
1627                     (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1628                     ddi_get_lbolt() - ab->b_arc_access <
1629                     arc_min_prefetch_lifespan)) {
1630                         skipped++;
1631                         continue;
1632                 }
1633                 /* "lookahead" for better eviction candidate */
1634                 if (recycle && ab->b_size != bytes &&
1635                     ab_prev && ab_prev->b_size == bytes)
1636                         continue;
1637                 hash_lock = HDR_LOCK(ab);
1638                 have_lock = MUTEX_HELD(hash_lock);
1639                 if (have_lock || mutex_tryenter(hash_lock)) {
1640                         ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
1641                         ASSERT(ab->b_datacnt > 0);
1642                         while (ab->b_buf) {
1643                                 arc_buf_t *buf = ab->b_buf;
1644                                 if (!mutex_tryenter(&buf->b_evict_lock)) {
1645                                         missed += 1;
1646                                         break;
1647                                 }
1648                                 if (buf->b_data) {
1649                                         bytes_evicted += ab->b_size;
1650                                         if (recycle && ab->b_type == type &&
1651                                             ab->b_size == bytes &&
1652                                             !HDR_L2_WRITING(ab)) {
1653                                                 stolen = buf->b_data;
1654                                                 recycle = FALSE;
1655                                         }
1656                                 }
1657                                 if (buf->b_efunc) {
1658                                         mutex_enter(&arc_eviction_mtx);
1659                                         arc_buf_destroy(buf,
1660                                             buf->b_data == stolen, FALSE);


2448                  */
2449                 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2450                         ASSERT(refcount_count(&buf->b_refcnt) == 0);
2451                         ASSERT(list_link_active(&buf->b_arc_node));
2452                 }
2453                 ARCSTAT_BUMP(arcstat_mfu_hits);
2454                 buf->b_arc_access = ddi_get_lbolt();
2455         } else if (buf->b_state == arc_mfu_ghost) {
2456                 arc_state_t     *new_state = arc_mfu;
2457                 /*
2458                  * This buffer has been accessed more than once but has
2459                  * been evicted from the cache.  Move it back to the
2460                  * MFU state.
2461                  */
2462 
2463                 if (buf->b_flags & ARC_PREFETCH) {
2464                         /*
2465                          * This is a prefetch access...
2466                          * move this block back to the MRU state.
2467                          */
2468                         ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
2469                         new_state = arc_mru;
2470                 }
2471 
2472                 buf->b_arc_access = ddi_get_lbolt();
2473                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2474                 arc_change_state(new_state, buf, hash_lock);
2475 
2476                 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2477         } else if (buf->b_state == arc_l2c_only) {
2478                 /*
2479                  * This buffer is on the 2nd Level ARC.
2480                  */
2481 
2482                 buf->b_arc_access = ddi_get_lbolt();
2483                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2484                 arc_change_state(arc_mfu, buf, hash_lock);
2485         } else {
2486                 ASSERT(!"invalid arc state");
2487         }
2488 }


2789                                 /* somebody beat us to the hash insert */
2790                                 mutex_exit(hash_lock);
2791                                 buf_discard_identity(hdr);
2792                                 (void) arc_buf_remove_ref(buf, private);
2793                                 goto top; /* restart the IO request */
2794                         }
2795                         /* if this is a prefetch, we don't have a reference */
2796                         if (*arc_flags & ARC_PREFETCH) {
2797                                 (void) remove_reference(hdr, hash_lock,
2798                                     private);
2799                                 hdr->b_flags |= ARC_PREFETCH;
2800                         }
2801                         if (*arc_flags & ARC_L2CACHE)
2802                                 hdr->b_flags |= ARC_L2CACHE;
2803                         if (BP_GET_LEVEL(bp) > 0)
2804                                 hdr->b_flags |= ARC_INDIRECT;
2805                 } else {
2806                         /* this block is in the ghost cache */
2807                         ASSERT(GHOST_STATE(hdr->b_state));
2808                         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2809                         ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
2810                         ASSERT(hdr->b_buf == NULL);
2811 
2812                         /* if this is a prefetch, we don't have a reference */
2813                         if (*arc_flags & ARC_PREFETCH)
2814                                 hdr->b_flags |= ARC_PREFETCH;
2815                         else
2816                                 add_reference(hdr, hash_lock, private);
2817                         if (*arc_flags & ARC_L2CACHE)
2818                                 hdr->b_flags |= ARC_L2CACHE;
2819                         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2820                         buf->b_hdr = hdr;
2821                         buf->b_data = NULL;
2822                         buf->b_efunc = NULL;
2823                         buf->b_private = NULL;
2824                         buf->b_next = NULL;
2825                         hdr->b_buf = buf;
2826                         ASSERT(hdr->b_datacnt == 0);
2827                         hdr->b_datacnt = 1;
2828                         arc_get_data_buf(buf);
2829                         arc_access(hdr, hash_lock);


4363                             zio_t *, wzio);
4364                         (void) zio_nowait(wzio);
4365 
4366                         /*
4367                          * Keep the clock hand suitably device-aligned.
4368                          */
4369                         buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4370 
4371                         write_sz += buf_sz;
4372                         dev->l2ad_hand += buf_sz;
4373                 }
4374 
4375                 mutex_exit(list_lock);
4376 
4377                 if (full == B_TRUE)
4378                         break;
4379         }
4380         mutex_exit(&l2arc_buflist_mtx);
4381 
4382         if (pio == NULL) {
4383                 ASSERT3U(write_sz, ==, 0);
4384                 kmem_cache_free(hdr_cache, head);
4385                 return (0);
4386         }
4387 
4388         ASSERT3U(write_sz, <=, target_sz);
4389         ARCSTAT_BUMP(arcstat_l2_writes_sent);
4390         ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz);
4391         ARCSTAT_INCR(arcstat_l2_size, write_sz);
4392         vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0);
4393 
4394         /*
4395          * Bump device hand to the device start if it is approaching the end.
4396          * l2arc_evict() will already have evicted ahead for this case.
4397          */
4398         if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4399                 vdev_space_update(dev->l2ad_vdev,
4400                     dev->l2ad_end - dev->l2ad_hand, 0, 0);
4401                 dev->l2ad_hand = dev->l2ad_start;
4402                 dev->l2ad_evict = dev->l2ad_start;
4403                 dev->l2ad_first = B_FALSE;




 993         arc_cksum_compute(buf, B_FALSE);
 994         mutex_exit(hash_lock);
 995 }
 996 
 997 static void
 998 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
 999 {
1000         ASSERT(MUTEX_HELD(hash_lock));
1001 
1002         if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1003             (ab->b_state != arc_anon)) {
1004                 uint64_t delta = ab->b_size * ab->b_datacnt;
1005                 list_t *list = &ab->b_state->arcs_list[ab->b_type];
1006                 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1007 
1008                 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
1009                 mutex_enter(&ab->b_state->arcs_mtx);
1010                 ASSERT(list_link_active(&ab->b_arc_node));
1011                 list_remove(list, ab);
1012                 if (GHOST_STATE(ab->b_state)) {
1013                         ASSERT0(ab->b_datacnt);
1014                         ASSERT3P(ab->b_buf, ==, NULL);
1015                         delta = ab->b_size;
1016                 }
1017                 ASSERT(delta > 0);
1018                 ASSERT3U(*size, >=, delta);
1019                 atomic_add_64(size, -delta);
1020                 mutex_exit(&ab->b_state->arcs_mtx);
1021                 /* remove the prefetch flag if we get a reference */
1022                 if (ab->b_flags & ARC_PREFETCH)
1023                         ab->b_flags &= ~ARC_PREFETCH;
1024         }
1025 }
1026 
1027 static int
1028 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1029 {
1030         int cnt;
1031         arc_state_t *state = ab->b_state;
1032 
1033         ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));


1620         mutex_enter(&evicted_state->arcs_mtx);
1621 
1622         for (ab = list_tail(list); ab; ab = ab_prev) {
1623                 ab_prev = list_prev(list, ab);
1624                 /* prefetch buffers have a minimum lifespan */
1625                 if (HDR_IO_IN_PROGRESS(ab) ||
1626                     (spa && ab->b_spa != spa) ||
1627                     (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1628                     ddi_get_lbolt() - ab->b_arc_access <
1629                     arc_min_prefetch_lifespan)) {
1630                         skipped++;
1631                         continue;
1632                 }
1633                 /* "lookahead" for better eviction candidate */
1634                 if (recycle && ab->b_size != bytes &&
1635                     ab_prev && ab_prev->b_size == bytes)
1636                         continue;
1637                 hash_lock = HDR_LOCK(ab);
1638                 have_lock = MUTEX_HELD(hash_lock);
1639                 if (have_lock || mutex_tryenter(hash_lock)) {
1640                         ASSERT0(refcount_count(&ab->b_refcnt));
1641                         ASSERT(ab->b_datacnt > 0);
1642                         while (ab->b_buf) {
1643                                 arc_buf_t *buf = ab->b_buf;
1644                                 if (!mutex_tryenter(&buf->b_evict_lock)) {
1645                                         missed += 1;
1646                                         break;
1647                                 }
1648                                 if (buf->b_data) {
1649                                         bytes_evicted += ab->b_size;
1650                                         if (recycle && ab->b_type == type &&
1651                                             ab->b_size == bytes &&
1652                                             !HDR_L2_WRITING(ab)) {
1653                                                 stolen = buf->b_data;
1654                                                 recycle = FALSE;
1655                                         }
1656                                 }
1657                                 if (buf->b_efunc) {
1658                                         mutex_enter(&arc_eviction_mtx);
1659                                         arc_buf_destroy(buf,
1660                                             buf->b_data == stolen, FALSE);


2448                  */
2449                 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2450                         ASSERT(refcount_count(&buf->b_refcnt) == 0);
2451                         ASSERT(list_link_active(&buf->b_arc_node));
2452                 }
2453                 ARCSTAT_BUMP(arcstat_mfu_hits);
2454                 buf->b_arc_access = ddi_get_lbolt();
2455         } else if (buf->b_state == arc_mfu_ghost) {
2456                 arc_state_t     *new_state = arc_mfu;
2457                 /*
2458                  * This buffer has been accessed more than once but has
2459                  * been evicted from the cache.  Move it back to the
2460                  * MFU state.
2461                  */
2462 
2463                 if (buf->b_flags & ARC_PREFETCH) {
2464                         /*
2465                          * This is a prefetch access...
2466                          * move this block back to the MRU state.
2467                          */
2468                         ASSERT0(refcount_count(&buf->b_refcnt));
2469                         new_state = arc_mru;
2470                 }
2471 
2472                 buf->b_arc_access = ddi_get_lbolt();
2473                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2474                 arc_change_state(new_state, buf, hash_lock);
2475 
2476                 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2477         } else if (buf->b_state == arc_l2c_only) {
2478                 /*
2479                  * This buffer is on the 2nd Level ARC.
2480                  */
2481 
2482                 buf->b_arc_access = ddi_get_lbolt();
2483                 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2484                 arc_change_state(arc_mfu, buf, hash_lock);
2485         } else {
2486                 ASSERT(!"invalid arc state");
2487         }
2488 }


2789                                 /* somebody beat us to the hash insert */
2790                                 mutex_exit(hash_lock);
2791                                 buf_discard_identity(hdr);
2792                                 (void) arc_buf_remove_ref(buf, private);
2793                                 goto top; /* restart the IO request */
2794                         }
2795                         /* if this is a prefetch, we don't have a reference */
2796                         if (*arc_flags & ARC_PREFETCH) {
2797                                 (void) remove_reference(hdr, hash_lock,
2798                                     private);
2799                                 hdr->b_flags |= ARC_PREFETCH;
2800                         }
2801                         if (*arc_flags & ARC_L2CACHE)
2802                                 hdr->b_flags |= ARC_L2CACHE;
2803                         if (BP_GET_LEVEL(bp) > 0)
2804                                 hdr->b_flags |= ARC_INDIRECT;
2805                 } else {
2806                         /* this block is in the ghost cache */
2807                         ASSERT(GHOST_STATE(hdr->b_state));
2808                         ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2809                         ASSERT0(refcount_count(&hdr->b_refcnt));
2810                         ASSERT(hdr->b_buf == NULL);
2811 
2812                         /* if this is a prefetch, we don't have a reference */
2813                         if (*arc_flags & ARC_PREFETCH)
2814                                 hdr->b_flags |= ARC_PREFETCH;
2815                         else
2816                                 add_reference(hdr, hash_lock, private);
2817                         if (*arc_flags & ARC_L2CACHE)
2818                                 hdr->b_flags |= ARC_L2CACHE;
2819                         buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2820                         buf->b_hdr = hdr;
2821                         buf->b_data = NULL;
2822                         buf->b_efunc = NULL;
2823                         buf->b_private = NULL;
2824                         buf->b_next = NULL;
2825                         hdr->b_buf = buf;
2826                         ASSERT(hdr->b_datacnt == 0);
2827                         hdr->b_datacnt = 1;
2828                         arc_get_data_buf(buf);
2829                         arc_access(hdr, hash_lock);


4363                             zio_t *, wzio);
4364                         (void) zio_nowait(wzio);
4365 
4366                         /*
4367                          * Keep the clock hand suitably device-aligned.
4368                          */
4369                         buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4370 
4371                         write_sz += buf_sz;
4372                         dev->l2ad_hand += buf_sz;
4373                 }
4374 
4375                 mutex_exit(list_lock);
4376 
4377                 if (full == B_TRUE)
4378                         break;
4379         }
4380         mutex_exit(&l2arc_buflist_mtx);
4381 
4382         if (pio == NULL) {
4383                 ASSERT0(write_sz);
4384                 kmem_cache_free(hdr_cache, head);
4385                 return (0);
4386         }
4387 
4388         ASSERT3U(write_sz, <=, target_sz);
4389         ARCSTAT_BUMP(arcstat_l2_writes_sent);
4390         ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz);
4391         ARCSTAT_INCR(arcstat_l2_size, write_sz);
4392         vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0);
4393 
4394         /*
4395          * Bump device hand to the device start if it is approaching the end.
4396          * l2arc_evict() will already have evicted ahead for this case.
4397          */
4398         if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4399                 vdev_space_update(dev->l2ad_vdev,
4400                     dev->l2ad_end - dev->l2ad_hand, 0, 0);
4401                 dev->l2ad_hand = dev->l2ad_start;
4402                 dev->l2ad_evict = dev->l2ad_start;
4403                 dev->l2ad_first = B_FALSE;