Print this page
9525 kmem_dump_size is a corrupting influence


2194  * changing kmem state while memory is being saved to the dump device.
2195  * Otherwise, ::kmem_verify will report "corrupt buffers".  Note that
2196  * there are no locks because only one CPU calls kmem during a crash
2197  * dump. To enable this feature, first create the associated vmem
2198  * arena with VMC_DUMPSAFE.
2199  */
2200 static void *kmem_dump_start;   /* start of pre-reserved heap */
2201 static void *kmem_dump_end;     /* end of heap area */
2202 static void *kmem_dump_curr;    /* current free heap pointer */
2203 static size_t kmem_dump_size;   /* size of heap area */
2204 
2205 /* append to each buf created in the pre-reserved heap */
2206 typedef struct kmem_dumpctl {
2207         void    *kdc_next;      /* cache dump free list linkage */
2208 } kmem_dumpctl_t;
2209 
2210 #define KMEM_DUMPCTL(cp, buf)   \
2211         ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2212             sizeof (void *)))
2213 
2214 /* Keep some simple stats. */
2215 #define KMEM_DUMP_LOGS  (100)
2216 
2217 typedef struct kmem_dump_log {
2218         kmem_cache_t    *kdl_cache;
2219         uint_t          kdl_allocs;             /* # of dump allocations */
2220         uint_t          kdl_frees;              /* # of dump frees */
2221         uint_t          kdl_alloc_fails;        /* # of allocation failures */
2222         uint_t          kdl_free_nondump;       /* # of non-dump frees */
2223         uint_t          kdl_unsafe;             /* cache was used, but unsafe */
2224 } kmem_dump_log_t;
2225 
2226 static kmem_dump_log_t *kmem_dump_log;
2227 static int kmem_dump_log_idx;
2228 
2229 #define KDI_LOG(cp, stat) {                                             \
2230         kmem_dump_log_t *kdl;                                           \
2231         if ((kdl = (kmem_dump_log_t *)((cp)->cache_dumplog)) != NULL) {      \
2232                 kdl->stat++;                                         \
2233         } else if (kmem_dump_log_idx < KMEM_DUMP_LOGS) {             \
2234                 kdl = &kmem_dump_log[kmem_dump_log_idx++];          \
2235                 kdl->stat++;                                         \
2236                 kdl->kdl_cache = (cp);                                       \
2237                 (cp)->cache_dumplog = kdl;                           \
2238         }                                                               \
2239 }
2240 
2241 /* set non zero for full report */
2242 uint_t kmem_dump_verbose = 0;
2243 
2244 /* stats for overize heap */
2245 uint_t kmem_dump_oversize_allocs = 0;
2246 uint_t kmem_dump_oversize_max = 0;
2247 
2248 static void
2249 kmem_dumppr(char **pp, char *e, const char *format, ...)
2250 {
2251         char *p = *pp;
2252 
2253         if (p < e) {
2254                 int n;
2255                 va_list ap;
2256 
2257                 va_start(ap, format);
2258                 n = vsnprintf(p, e - p, format, ap);
2259                 va_end(ap);
2260                 *pp = p + n;
2261         }
2262 }
2263 
2264 /*
2265  * Called when dumpadm(1M) configures dump parameters.
2266  */
2267 void
2268 kmem_dump_init(size_t size)
2269 {



2270         if (kmem_dump_start != NULL)
2271                 kmem_free(kmem_dump_start, kmem_dump_size);
2272 
2273         if (kmem_dump_log == NULL)
2274                 kmem_dump_log = (kmem_dump_log_t *)kmem_zalloc(KMEM_DUMP_LOGS *
2275                     sizeof (kmem_dump_log_t), KM_SLEEP);
2276 
2277         kmem_dump_start = kmem_alloc(size, KM_SLEEP);
2278 
2279         if (kmem_dump_start != NULL) {
2280                 kmem_dump_size = size;
2281                 kmem_dump_curr = kmem_dump_start;
2282                 kmem_dump_end = (void *)((char *)kmem_dump_start + size);
2283                 copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
2284         } else {
2285                 kmem_dump_size = 0;
2286                 kmem_dump_curr = NULL;
2287                 kmem_dump_end = NULL;
2288         }
2289 }
2290 
2291 /*
2292  * Set flag for each kmem_cache_t if is safe to use alternate dump
2293  * memory. Called just before panic crash dump starts. Set the flag
2294  * for the calling CPU.
2295  */
2296 void
2297 kmem_dump_begin(void)
2298 {
2299         ASSERT(panicstr != NULL);
2300         if (kmem_dump_start != NULL) {
2301                 kmem_cache_t *cp;
2302 


2303                 for (cp = list_head(&kmem_caches); cp != NULL;
2304                     cp = list_next(&kmem_caches, cp)) {
2305                         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2306 
2307                         if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2308                                 cp->cache_flags |= KMF_DUMPDIVERT;
2309                                 ccp->cc_flags |= KMF_DUMPDIVERT;
2310                                 ccp->cc_dump_rounds = ccp->cc_rounds;
2311                                 ccp->cc_dump_prounds = ccp->cc_prounds;
2312                                 ccp->cc_rounds = ccp->cc_prounds = -1;
2313                         } else {
2314                                 cp->cache_flags |= KMF_DUMPUNSAFE;
2315                                 ccp->cc_flags |= KMF_DUMPUNSAFE;
2316                         }
2317                 }
2318         }
2319 }
2320 
2321 /*
2322  * finished dump intercept
2323  * print any warnings on the console
2324  * return verbose information to dumpsys() in the given buffer
2325  */
2326 size_t
2327 kmem_dump_finish(char *buf, size_t size)
2328 {
2329         int kdi_idx;
2330         int kdi_end = kmem_dump_log_idx;
2331         int percent = 0;
2332         int header = 0;
2333         int warn = 0;
2334         size_t used;
2335         kmem_cache_t *cp;
2336         kmem_dump_log_t *kdl;
2337         char *e = buf + size;
2338         char *p = buf;
2339 
2340         if (kmem_dump_size == 0 || kmem_dump_verbose == 0)






2341                 return (0);
2342 
2343         used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
2344         percent = (used * 100) / kmem_dump_size;
2345 
2346         kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
2347         kmem_dumppr(&p, e, "used bytes,%ld\n", used);
2348         kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
2349         kmem_dumppr(&p, e, "Oversize allocs,%d\n",
2350             kmem_dump_oversize_allocs);
2351         kmem_dumppr(&p, e, "Oversize max size,%ld\n",
2352             kmem_dump_oversize_max);
2353 
2354         for (kdi_idx = 0; kdi_idx < kdi_end; kdi_idx++) {
2355                 kdl = &kmem_dump_log[kdi_idx];
2356                 cp = kdl->kdl_cache;
2357                 if (cp == NULL)
2358                         break;
2359                 if (kdl->kdl_alloc_fails)
2360                         ++warn;
2361                 if (header == 0) {
2362                         kmem_dumppr(&p, e,
2363                             "Cache Name,Allocs,Frees,Alloc Fails,"
2364                             "Nondump Frees,Unsafe Allocs/Frees\n");
2365                         header = 1;
2366                 }
2367                 kmem_dumppr(&p, e, "%s,%d,%d,%d,%d,%d\n",
2368                     cp->cache_name, kdl->kdl_allocs, kdl->kdl_frees,
2369                     kdl->kdl_alloc_fails, kdl->kdl_free_nondump,
2370                     kdl->kdl_unsafe);
2371         }
2372 
2373         /* return buffer size used */
2374         if (p < e)
2375                 bzero(p, e - p);
2376         return (p - buf);
2377 }
2378 
2379 /*
2380  * Allocate a constructed object from alternate dump memory.
2381  */
2382 void *
2383 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2384 {
2385         void *buf;
2386         void *curr;
2387         char *bufend;
2388 
2389         /* return a constructed object */
2390         if ((buf = cp->cache_dumpfreelist) != NULL) {
2391                 cp->cache_dumpfreelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2392                 KDI_LOG(cp, kdl_allocs);
2393                 return (buf);
2394         }
2395 
2396         /* create a new constructed object */
2397         curr = kmem_dump_curr;
2398         buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2399         bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2400 
2401         /* hat layer objects cannot cross a page boundary */
2402         if (cp->cache_align < PAGESIZE) {
2403                 char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2404                 if (bufend > page) {
2405                         bufend += page - (char *)buf;
2406                         buf = (void *)page;
2407                 }
2408         }
2409 
2410         /* fall back to normal alloc if reserved area is used up */
2411         if (bufend > (char *)kmem_dump_end) {
2412                 kmem_dump_curr = kmem_dump_end;
2413                 KDI_LOG(cp, kdl_alloc_fails);
2414                 return (NULL);
2415         }
2416 
2417         /*
2418          * Must advance curr pointer before calling a constructor that
2419          * may also allocate memory.
2420          */
2421         kmem_dump_curr = bufend;
2422 
2423         /* run constructor */
2424         if (cp->cache_constructor != NULL &&
2425             cp->cache_constructor(buf, cp->cache_private, kmflag)
2426             != 0) {
2427 #ifdef DEBUG
2428                 printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
2429                     cp->cache_name, (void *)cp);
2430 #endif
2431                 /* reset curr pointer iff no allocs were done */
2432                 if (kmem_dump_curr == bufend)
2433                         kmem_dump_curr = curr;
2434 

2435                 /* fall back to normal alloc if the constructor fails */
2436                 KDI_LOG(cp, kdl_alloc_fails);
2437                 return (NULL);
2438         }
2439 
2440         KDI_LOG(cp, kdl_allocs);
2441         return (buf);
2442 }
2443 
2444 /*
2445  * Free a constructed object in alternate dump memory.
2446  */
2447 int
2448 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2449 {
2450         /* save constructed buffers for next time */
2451         if ((char *)buf >= (char *)kmem_dump_start &&
2452             (char *)buf < (char *)kmem_dump_end) {
2453                 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dumpfreelist;
2454                 cp->cache_dumpfreelist = buf;
2455                 KDI_LOG(cp, kdl_frees);
2456                 return (0);
2457         }
2458 
2459         /* count all non-dump buf frees */
2460         KDI_LOG(cp, kdl_free_nondump);
2461 
2462         /* just drop buffers that were allocated before dump started */
2463         if (kmem_dump_curr < kmem_dump_end)
2464                 return (0);
2465 
2466         /* fall back to normal free if reserved area is used up */
2467         return (1);
2468 }
2469 
2470 /*
2471  * Allocate a constructed object from cache cp.
2472  */
2473 void *
2474 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2475 {
2476         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2477         kmem_magazine_t *fmp;
2478         void *buf;
2479 
2480         mutex_enter(&ccp->cc_lock);
2481         for (;;) {
2482                 /*
2483                  * If there's an object available in the current CPU's
2484                  * loaded magazine, just take it and return.
2485                  */
2486                 if (ccp->cc_rounds > 0) {
2487                         buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2488                         ccp->cc_alloc++;
2489                         mutex_exit(&ccp->cc_lock);
2490                         if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
2491                                 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2492                                         ASSERT(!(ccp->cc_flags &
2493                                             KMF_DUMPDIVERT));
2494                                         KDI_LOG(cp, kdl_unsafe);
2495                                 }
2496                                 if ((ccp->cc_flags & KMF_BUFTAG) &&
2497                                     kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2498                                     caller()) != 0) {
2499                                         if (kmflag & KM_NOSLEEP)
2500                                                 return (NULL);
2501                                         mutex_enter(&ccp->cc_lock);
2502                                         continue;
2503                                 }
2504                         }
2505                         return (buf);
2506                 }
2507 
2508                 /*
2509                  * The loaded magazine is empty.  If the previously loaded
2510                  * magazine was full, exchange them and try again.
2511                  */
2512                 if (ccp->cc_prounds > 0) {
2513                         kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2514                         continue;
2515                 }
2516 
2517                 /*
2518                  * Return an alternate buffer at dump time to preserve
2519                  * the heap.
2520                  */
2521                 if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2522                         if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2523                                 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2524                                 /* log it so that we can warn about it */
2525                                 KDI_LOG(cp, kdl_unsafe);
2526                         } else {
2527                                 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2528                                     NULL) {
2529                                         mutex_exit(&ccp->cc_lock);
2530                                         return (buf);
2531                                 }
2532                                 break;          /* fall back to slab layer */
2533                         }
2534                 }
2535 
2536                 /*
2537                  * If the magazine layer is disabled, break out now.
2538                  */
2539                 if (ccp->cc_magsize == 0)
2540                         break;
2541 
2542                 /*
2543                  * Try to get a full magazine from the depot.
2544                  */
2545                 fmp = kmem_depot_alloc(cp, &cp->cache_full);


2701  * Free a constructed object to cache cp.
2702  */
2703 void
2704 kmem_cache_free(kmem_cache_t *cp, void *buf)
2705 {
2706         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2707 
2708         /*
2709          * The client must not free either of the buffers passed to the move
2710          * callback function.
2711          */
2712         ASSERT(cp->cache_defrag == NULL ||
2713             cp->cache_defrag->kmd_thread != curthread ||
2714             (buf != cp->cache_defrag->kmd_from_buf &&
2715             buf != cp->cache_defrag->kmd_to_buf));
2716 
2717         if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2718                 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2719                         ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2720                         /* log it so that we can warn about it */
2721                         KDI_LOG(cp, kdl_unsafe);
2722                 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2723                         return;
2724                 }
2725                 if (ccp->cc_flags & KMF_BUFTAG) {
2726                         if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2727                                 return;
2728                 }
2729         }
2730 
2731         mutex_enter(&ccp->cc_lock);
2732         /*
2733          * Any changes to this logic should be reflected in kmem_slab_prefill()
2734          */
2735         for (;;) {
2736                 /*
2737                  * If there's a slot available in the current CPU's
2738                  * loaded magazine, just put the object there and return.
2739                  */
2740                 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2741                         ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;




2194  * changing kmem state while memory is being saved to the dump device.
2195  * Otherwise, ::kmem_verify will report "corrupt buffers".  Note that
2196  * there are no locks because only one CPU calls kmem during a crash
2197  * dump. To enable this feature, first create the associated vmem
2198  * arena with VMC_DUMPSAFE.
2199  */
2200 static void *kmem_dump_start;   /* start of pre-reserved heap */
2201 static void *kmem_dump_end;     /* end of heap area */
2202 static void *kmem_dump_curr;    /* current free heap pointer */
2203 static size_t kmem_dump_size;   /* size of heap area */
2204 
2205 /* append to each buf created in the pre-reserved heap */
2206 typedef struct kmem_dumpctl {
2207         void    *kdc_next;      /* cache dump free list linkage */
2208 } kmem_dumpctl_t;
2209 
2210 #define KMEM_DUMPCTL(cp, buf)   \
2211         ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2212             sizeof (void *)))
2213 



























2214 /* set non zero for full report */
2215 uint_t kmem_dump_verbose = 0;
2216 
2217 /* stats for overize heap */
2218 uint_t kmem_dump_oversize_allocs = 0;
2219 uint_t kmem_dump_oversize_max = 0;
2220 
2221 static void
2222 kmem_dumppr(char **pp, char *e, const char *format, ...)
2223 {
2224         char *p = *pp;
2225 
2226         if (p < e) {
2227                 int n;
2228                 va_list ap;
2229 
2230                 va_start(ap, format);
2231                 n = vsnprintf(p, e - p, format, ap);
2232                 va_end(ap);
2233                 *pp = p + n;
2234         }
2235 }
2236 
2237 /*
2238  * Called when dumpadm(1M) configures dump parameters.
2239  */
2240 void
2241 kmem_dump_init(size_t size)
2242 {
2243         /* Our caller ensures size is always set. */
2244         ASSERT3U(size, >, 0);
2245 
2246         if (kmem_dump_start != NULL)
2247                 kmem_free(kmem_dump_start, kmem_dump_size);
2248 




2249         kmem_dump_start = kmem_alloc(size, KM_SLEEP);


2250         kmem_dump_size = size;
2251         kmem_dump_curr = kmem_dump_start;
2252         kmem_dump_end = (void *)((char *)kmem_dump_start + size);
2253         copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);





2254 }
2255 
2256 /*
2257  * Set flag for each kmem_cache_t if is safe to use alternate dump
2258  * memory. Called just before panic crash dump starts. Set the flag
2259  * for the calling CPU.
2260  */
2261 void
2262 kmem_dump_begin(void)
2263 {


2264         kmem_cache_t *cp;
2265 
2266         ASSERT(panicstr != NULL);
2267 
2268         for (cp = list_head(&kmem_caches); cp != NULL;
2269             cp = list_next(&kmem_caches, cp)) {
2270                 kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2271 
2272                 if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2273                         cp->cache_flags |= KMF_DUMPDIVERT;
2274                         ccp->cc_flags |= KMF_DUMPDIVERT;
2275                         ccp->cc_dump_rounds = ccp->cc_rounds;
2276                         ccp->cc_dump_prounds = ccp->cc_prounds;
2277                         ccp->cc_rounds = ccp->cc_prounds = -1;
2278                 } else {
2279                         cp->cache_flags |= KMF_DUMPUNSAFE;
2280                         ccp->cc_flags |= KMF_DUMPUNSAFE;
2281                 }
2282         }

2283 }
2284 
2285 /*
2286  * finished dump intercept
2287  * print any warnings on the console
2288  * return verbose information to dumpsys() in the given buffer
2289  */
2290 size_t
2291 kmem_dump_finish(char *buf, size_t size)
2292 {


2293         int percent = 0;


2294         size_t used;


2295         char *e = buf + size;
2296         char *p = buf;
2297 
2298         if (kmem_dump_curr == kmem_dump_end) {
2299                 cmn_err(CE_WARN, "exceeded kmem_dump space of %lu "
2300                     "bytes: kmem state in dump may be inconsistent",
2301                     kmem_dump_size);
2302         }
2303 
2304         if (kmem_dump_verbose == 0)
2305                 return (0);
2306 
2307         used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
2308         percent = (used * 100) / kmem_dump_size;
2309 
2310         kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
2311         kmem_dumppr(&p, e, "used bytes,%ld\n", used);
2312         kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
2313         kmem_dumppr(&p, e, "Oversize allocs,%d\n",
2314             kmem_dump_oversize_allocs);
2315         kmem_dumppr(&p, e, "Oversize max size,%ld\n",
2316             kmem_dump_oversize_max);
2317 



















2318         /* return buffer size used */
2319         if (p < e)
2320                 bzero(p, e - p);
2321         return (p - buf);
2322 }
2323 
2324 /*
2325  * Allocate a constructed object from alternate dump memory.
2326  */
2327 void *
2328 kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2329 {
2330         void *buf;
2331         void *curr;
2332         char *bufend;
2333 
2334         /* return a constructed object */
2335         if ((buf = cp->cache_dump.kd_freelist) != NULL) {
2336                 cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next;

2337                 return (buf);
2338         }
2339 
2340         /* create a new constructed object */
2341         curr = kmem_dump_curr;
2342         buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2343         bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2344 
2345         /* hat layer objects cannot cross a page boundary */
2346         if (cp->cache_align < PAGESIZE) {
2347                 char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2348                 if (bufend > page) {
2349                         bufend += page - (char *)buf;
2350                         buf = (void *)page;
2351                 }
2352         }
2353 
2354         /* fall back to normal alloc if reserved area is used up */
2355         if (bufend > (char *)kmem_dump_end) {
2356                 kmem_dump_curr = kmem_dump_end;
2357                 cp->cache_dump.kd_alloc_fails++;
2358                 return (NULL);
2359         }
2360 
2361         /*
2362          * Must advance curr pointer before calling a constructor that
2363          * may also allocate memory.
2364          */
2365         kmem_dump_curr = bufend;
2366 
2367         /* run constructor */
2368         if (cp->cache_constructor != NULL &&
2369             cp->cache_constructor(buf, cp->cache_private, kmflag)
2370             != 0) {
2371 #ifdef DEBUG
2372                 printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
2373                     cp->cache_name, (void *)cp);
2374 #endif
2375                 /* reset curr pointer iff no allocs were done */
2376                 if (kmem_dump_curr == bufend)
2377                         kmem_dump_curr = curr;
2378 
2379                 cp->cache_dump.kd_alloc_fails++;
2380                 /* fall back to normal alloc if the constructor fails */

2381                 return (NULL);
2382         }
2383 

2384         return (buf);
2385 }
2386 
2387 /*
2388  * Free a constructed object in alternate dump memory.
2389  */
2390 int
2391 kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2392 {
2393         /* save constructed buffers for next time */
2394         if ((char *)buf >= (char *)kmem_dump_start &&
2395             (char *)buf < (char *)kmem_dump_end) {
2396                 KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist;
2397                 cp->cache_dump.kd_freelist = buf;

2398                 return (0);
2399         }
2400 



2401         /* just drop buffers that were allocated before dump started */
2402         if (kmem_dump_curr < kmem_dump_end)
2403                 return (0);
2404 
2405         /* fall back to normal free if reserved area is used up */
2406         return (1);
2407 }
2408 
2409 /*
2410  * Allocate a constructed object from cache cp.
2411  */
2412 void *
2413 kmem_cache_alloc(kmem_cache_t *cp, int kmflag)
2414 {
2415         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2416         kmem_magazine_t *fmp;
2417         void *buf;
2418 
2419         mutex_enter(&ccp->cc_lock);
2420         for (;;) {
2421                 /*
2422                  * If there's an object available in the current CPU's
2423                  * loaded magazine, just take it and return.
2424                  */
2425                 if (ccp->cc_rounds > 0) {
2426                         buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2427                         ccp->cc_alloc++;
2428                         mutex_exit(&ccp->cc_lock);
2429                         if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
2430                                 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2431                                         ASSERT(!(ccp->cc_flags &
2432                                             KMF_DUMPDIVERT));
2433                                         cp->cache_dump.kd_unsafe++;
2434                                 }
2435                                 if ((ccp->cc_flags & KMF_BUFTAG) &&
2436                                     kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2437                                     caller()) != 0) {
2438                                         if (kmflag & KM_NOSLEEP)
2439                                                 return (NULL);
2440                                         mutex_enter(&ccp->cc_lock);
2441                                         continue;
2442                                 }
2443                         }
2444                         return (buf);
2445                 }
2446 
2447                 /*
2448                  * The loaded magazine is empty.  If the previously loaded
2449                  * magazine was full, exchange them and try again.
2450                  */
2451                 if (ccp->cc_prounds > 0) {
2452                         kmem_cpu_reload(ccp, ccp->cc_ploaded, ccp->cc_prounds);
2453                         continue;
2454                 }
2455 
2456                 /*
2457                  * Return an alternate buffer at dump time to preserve
2458                  * the heap.
2459                  */
2460                 if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2461                         if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2462                                 ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2463                                 /* log it so that we can warn about it */
2464                                 cp->cache_dump.kd_unsafe++;
2465                         } else {
2466                                 if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2467                                     NULL) {
2468                                         mutex_exit(&ccp->cc_lock);
2469                                         return (buf);
2470                                 }
2471                                 break;          /* fall back to slab layer */
2472                         }
2473                 }
2474 
2475                 /*
2476                  * If the magazine layer is disabled, break out now.
2477                  */
2478                 if (ccp->cc_magsize == 0)
2479                         break;
2480 
2481                 /*
2482                  * Try to get a full magazine from the depot.
2483                  */
2484                 fmp = kmem_depot_alloc(cp, &cp->cache_full);


2640  * Free a constructed object to cache cp.
2641  */
2642 void
2643 kmem_cache_free(kmem_cache_t *cp, void *buf)
2644 {
2645         kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2646 
2647         /*
2648          * The client must not free either of the buffers passed to the move
2649          * callback function.
2650          */
2651         ASSERT(cp->cache_defrag == NULL ||
2652             cp->cache_defrag->kmd_thread != curthread ||
2653             (buf != cp->cache_defrag->kmd_from_buf &&
2654             buf != cp->cache_defrag->kmd_to_buf));
2655 
2656         if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2657                 if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2658                         ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2659                         /* log it so that we can warn about it */
2660                         cp->cache_dump.kd_unsafe++;
2661                 } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2662                         return;
2663                 }
2664                 if (ccp->cc_flags & KMF_BUFTAG) {
2665                         if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2666                                 return;
2667                 }
2668         }
2669 
2670         mutex_enter(&ccp->cc_lock);
2671         /*
2672          * Any changes to this logic should be reflected in kmem_slab_prefill()
2673          */
2674         for (;;) {
2675                 /*
2676                  * If there's a slot available in the current CPU's
2677                  * loaded magazine, just put the object there and return.
2678                  */
2679                 if ((uint_t)ccp->cc_rounds < ccp->cc_magsize) {
2680                         ccp->cc_loaded->mag_round[ccp->cc_rounds++] = buf;