Print this page
9525 kmem_dump_size is a corrupting influence

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/os/kmem.c
          +++ new/usr/src/uts/common/os/kmem.c
↓ open down ↓ 2203 lines elided ↑ open up ↑
2204 2204  
2205 2205  /* append to each buf created in the pre-reserved heap */
2206 2206  typedef struct kmem_dumpctl {
2207 2207          void    *kdc_next;      /* cache dump free list linkage */
2208 2208  } kmem_dumpctl_t;
2209 2209  
2210 2210  #define KMEM_DUMPCTL(cp, buf)   \
2211 2211          ((kmem_dumpctl_t *)P2ROUNDUP((uintptr_t)(buf) + (cp)->cache_bufsize, \
2212 2212              sizeof (void *)))
2213 2213  
2214      -/* Keep some simple stats. */
2215      -#define KMEM_DUMP_LOGS  (100)
2216      -
2217      -typedef struct kmem_dump_log {
2218      -        kmem_cache_t    *kdl_cache;
2219      -        uint_t          kdl_allocs;             /* # of dump allocations */
2220      -        uint_t          kdl_frees;              /* # of dump frees */
2221      -        uint_t          kdl_alloc_fails;        /* # of allocation failures */
2222      -        uint_t          kdl_free_nondump;       /* # of non-dump frees */
2223      -        uint_t          kdl_unsafe;             /* cache was used, but unsafe */
2224      -} kmem_dump_log_t;
2225      -
2226      -static kmem_dump_log_t *kmem_dump_log;
2227      -static int kmem_dump_log_idx;
2228      -
2229      -#define KDI_LOG(cp, stat) {                                             \
2230      -        kmem_dump_log_t *kdl;                                           \
2231      -        if ((kdl = (kmem_dump_log_t *)((cp)->cache_dumplog)) != NULL) { \
2232      -                kdl->stat++;                                            \
2233      -        } else if (kmem_dump_log_idx < KMEM_DUMP_LOGS) {                \
2234      -                kdl = &kmem_dump_log[kmem_dump_log_idx++];              \
2235      -                kdl->stat++;                                            \
2236      -                kdl->kdl_cache = (cp);                                  \
2237      -                (cp)->cache_dumplog = kdl;                              \
2238      -        }                                                               \
2239      -}
2240      -
2241 2214  /* set non zero for full report */
2242 2215  uint_t kmem_dump_verbose = 0;
2243 2216  
2244 2217  /* stats for overize heap */
2245 2218  uint_t kmem_dump_oversize_allocs = 0;
2246 2219  uint_t kmem_dump_oversize_max = 0;
2247 2220  
2248 2221  static void
2249 2222  kmem_dumppr(char **pp, char *e, const char *format, ...)
2250 2223  {
↓ open down ↓ 9 lines elided ↑ open up ↑
2260 2233                  *pp = p + n;
2261 2234          }
2262 2235  }
2263 2236  
2264 2237  /*
2265 2238   * Called when dumpadm(1M) configures dump parameters.
2266 2239   */
2267 2240  void
2268 2241  kmem_dump_init(size_t size)
2269 2242  {
     2243 +        /* Our caller ensures size is always set. */
     2244 +        ASSERT3U(size, >, 0);
     2245 +
2270 2246          if (kmem_dump_start != NULL)
2271 2247                  kmem_free(kmem_dump_start, kmem_dump_size);
2272 2248  
2273      -        if (kmem_dump_log == NULL)
2274      -                kmem_dump_log = (kmem_dump_log_t *)kmem_zalloc(KMEM_DUMP_LOGS *
2275      -                    sizeof (kmem_dump_log_t), KM_SLEEP);
2276      -
2277 2249          kmem_dump_start = kmem_alloc(size, KM_SLEEP);
2278      -
2279      -        if (kmem_dump_start != NULL) {
2280      -                kmem_dump_size = size;
2281      -                kmem_dump_curr = kmem_dump_start;
2282      -                kmem_dump_end = (void *)((char *)kmem_dump_start + size);
2283      -                copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
2284      -        } else {
2285      -                kmem_dump_size = 0;
2286      -                kmem_dump_curr = NULL;
2287      -                kmem_dump_end = NULL;
2288      -        }
     2250 +        kmem_dump_size = size;
     2251 +        kmem_dump_curr = kmem_dump_start;
     2252 +        kmem_dump_end = (void *)((char *)kmem_dump_start + size);
     2253 +        copy_pattern(KMEM_UNINITIALIZED_PATTERN, kmem_dump_start, size);
2289 2254  }
2290 2255  
2291 2256  /*
2292 2257   * Set flag for each kmem_cache_t if is safe to use alternate dump
2293 2258   * memory. Called just before panic crash dump starts. Set the flag
2294 2259   * for the calling CPU.
2295 2260   */
2296 2261  void
2297 2262  kmem_dump_begin(void)
2298 2263  {
     2264 +        kmem_cache_t *cp;
     2265 +
2299 2266          ASSERT(panicstr != NULL);
2300      -        if (kmem_dump_start != NULL) {
2301      -                kmem_cache_t *cp;
2302 2267  
2303      -                for (cp = list_head(&kmem_caches); cp != NULL;
2304      -                    cp = list_next(&kmem_caches, cp)) {
2305      -                        kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
     2268 +        for (cp = list_head(&kmem_caches); cp != NULL;
     2269 +            cp = list_next(&kmem_caches, cp)) {
     2270 +                kmem_cpu_cache_t *ccp = KMEM_CPU_CACHE(cp);
2306 2271  
2307      -                        if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
2308      -                                cp->cache_flags |= KMF_DUMPDIVERT;
2309      -                                ccp->cc_flags |= KMF_DUMPDIVERT;
2310      -                                ccp->cc_dump_rounds = ccp->cc_rounds;
2311      -                                ccp->cc_dump_prounds = ccp->cc_prounds;
2312      -                                ccp->cc_rounds = ccp->cc_prounds = -1;
2313      -                        } else {
2314      -                                cp->cache_flags |= KMF_DUMPUNSAFE;
2315      -                                ccp->cc_flags |= KMF_DUMPUNSAFE;
2316      -                        }
     2272 +                if (cp->cache_arena->vm_cflags & VMC_DUMPSAFE) {
     2273 +                        cp->cache_flags |= KMF_DUMPDIVERT;
     2274 +                        ccp->cc_flags |= KMF_DUMPDIVERT;
     2275 +                        ccp->cc_dump_rounds = ccp->cc_rounds;
     2276 +                        ccp->cc_dump_prounds = ccp->cc_prounds;
     2277 +                        ccp->cc_rounds = ccp->cc_prounds = -1;
     2278 +                } else {
     2279 +                        cp->cache_flags |= KMF_DUMPUNSAFE;
     2280 +                        ccp->cc_flags |= KMF_DUMPUNSAFE;
2317 2281                  }
2318 2282          }
2319 2283  }
2320 2284  
2321 2285  /*
2322 2286   * finished dump intercept
2323 2287   * print any warnings on the console
2324 2288   * return verbose information to dumpsys() in the given buffer
2325 2289   */
2326 2290  size_t
2327 2291  kmem_dump_finish(char *buf, size_t size)
2328 2292  {
2329      -        int kdi_idx;
2330      -        int kdi_end = kmem_dump_log_idx;
2331 2293          int percent = 0;
2332      -        int header = 0;
2333      -        int warn = 0;
2334 2294          size_t used;
2335      -        kmem_cache_t *cp;
2336      -        kmem_dump_log_t *kdl;
2337 2295          char *e = buf + size;
2338 2296          char *p = buf;
2339 2297  
2340      -        if (kmem_dump_size == 0 || kmem_dump_verbose == 0)
     2298 +        if (kmem_dump_curr == kmem_dump_end) {
     2299 +                cmn_err(CE_WARN, "exceeded kmem_dump space of %lu "
     2300 +                    "bytes: kmem state in dump may be inconsistent",
     2301 +                    kmem_dump_size);
     2302 +        }
     2303 +
     2304 +        if (kmem_dump_verbose == 0)
2341 2305                  return (0);
2342 2306  
2343 2307          used = (char *)kmem_dump_curr - (char *)kmem_dump_start;
2344 2308          percent = (used * 100) / kmem_dump_size;
2345 2309  
2346 2310          kmem_dumppr(&p, e, "%% heap used,%d\n", percent);
2347 2311          kmem_dumppr(&p, e, "used bytes,%ld\n", used);
2348 2312          kmem_dumppr(&p, e, "heap size,%ld\n", kmem_dump_size);
2349 2313          kmem_dumppr(&p, e, "Oversize allocs,%d\n",
2350 2314              kmem_dump_oversize_allocs);
2351 2315          kmem_dumppr(&p, e, "Oversize max size,%ld\n",
2352 2316              kmem_dump_oversize_max);
2353 2317  
2354      -        for (kdi_idx = 0; kdi_idx < kdi_end; kdi_idx++) {
2355      -                kdl = &kmem_dump_log[kdi_idx];
2356      -                cp = kdl->kdl_cache;
2357      -                if (cp == NULL)
2358      -                        break;
2359      -                if (kdl->kdl_alloc_fails)
2360      -                        ++warn;
2361      -                if (header == 0) {
2362      -                        kmem_dumppr(&p, e,
2363      -                            "Cache Name,Allocs,Frees,Alloc Fails,"
2364      -                            "Nondump Frees,Unsafe Allocs/Frees\n");
2365      -                        header = 1;
2366      -                }
2367      -                kmem_dumppr(&p, e, "%s,%d,%d,%d,%d,%d\n",
2368      -                    cp->cache_name, kdl->kdl_allocs, kdl->kdl_frees,
2369      -                    kdl->kdl_alloc_fails, kdl->kdl_free_nondump,
2370      -                    kdl->kdl_unsafe);
2371      -        }
2372      -
2373 2318          /* return buffer size used */
2374 2319          if (p < e)
2375 2320                  bzero(p, e - p);
2376 2321          return (p - buf);
2377 2322  }
2378 2323  
2379 2324  /*
2380 2325   * Allocate a constructed object from alternate dump memory.
2381 2326   */
2382 2327  void *
2383 2328  kmem_cache_alloc_dump(kmem_cache_t *cp, int kmflag)
2384 2329  {
2385 2330          void *buf;
2386 2331          void *curr;
2387 2332          char *bufend;
2388 2333  
2389 2334          /* return a constructed object */
2390      -        if ((buf = cp->cache_dumpfreelist) != NULL) {
2391      -                cp->cache_dumpfreelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2392      -                KDI_LOG(cp, kdl_allocs);
     2335 +        if ((buf = cp->cache_dump.kd_freelist) != NULL) {
     2336 +                cp->cache_dump.kd_freelist = KMEM_DUMPCTL(cp, buf)->kdc_next;
2393 2337                  return (buf);
2394 2338          }
2395 2339  
2396 2340          /* create a new constructed object */
2397 2341          curr = kmem_dump_curr;
2398 2342          buf = (void *)P2ROUNDUP((uintptr_t)curr, cp->cache_align);
2399 2343          bufend = (char *)KMEM_DUMPCTL(cp, buf) + sizeof (kmem_dumpctl_t);
2400 2344  
2401 2345          /* hat layer objects cannot cross a page boundary */
2402 2346          if (cp->cache_align < PAGESIZE) {
2403 2347                  char *page = (char *)P2ROUNDUP((uintptr_t)buf, PAGESIZE);
2404 2348                  if (bufend > page) {
2405 2349                          bufend += page - (char *)buf;
2406 2350                          buf = (void *)page;
2407 2351                  }
2408 2352          }
2409 2353  
2410 2354          /* fall back to normal alloc if reserved area is used up */
2411 2355          if (bufend > (char *)kmem_dump_end) {
2412 2356                  kmem_dump_curr = kmem_dump_end;
2413      -                KDI_LOG(cp, kdl_alloc_fails);
     2357 +                cp->cache_dump.kd_alloc_fails++;
2414 2358                  return (NULL);
2415 2359          }
2416 2360  
2417 2361          /*
2418 2362           * Must advance curr pointer before calling a constructor that
2419 2363           * may also allocate memory.
2420 2364           */
2421 2365          kmem_dump_curr = bufend;
2422 2366  
2423 2367          /* run constructor */
↓ open down ↓ 1 lines elided ↑ open up ↑
2425 2369              cp->cache_constructor(buf, cp->cache_private, kmflag)
2426 2370              != 0) {
2427 2371  #ifdef DEBUG
2428 2372                  printf("name='%s' cache=0x%p: kmem cache constructor failed\n",
2429 2373                      cp->cache_name, (void *)cp);
2430 2374  #endif
2431 2375                  /* reset curr pointer iff no allocs were done */
2432 2376                  if (kmem_dump_curr == bufend)
2433 2377                          kmem_dump_curr = curr;
2434 2378  
     2379 +                cp->cache_dump.kd_alloc_fails++;
2435 2380                  /* fall back to normal alloc if the constructor fails */
2436      -                KDI_LOG(cp, kdl_alloc_fails);
2437 2381                  return (NULL);
2438 2382          }
2439 2383  
2440      -        KDI_LOG(cp, kdl_allocs);
2441 2384          return (buf);
2442 2385  }
2443 2386  
2444 2387  /*
2445 2388   * Free a constructed object in alternate dump memory.
2446 2389   */
2447 2390  int
2448 2391  kmem_cache_free_dump(kmem_cache_t *cp, void *buf)
2449 2392  {
2450 2393          /* save constructed buffers for next time */
2451 2394          if ((char *)buf >= (char *)kmem_dump_start &&
2452 2395              (char *)buf < (char *)kmem_dump_end) {
2453      -                KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dumpfreelist;
2454      -                cp->cache_dumpfreelist = buf;
2455      -                KDI_LOG(cp, kdl_frees);
     2396 +                KMEM_DUMPCTL(cp, buf)->kdc_next = cp->cache_dump.kd_freelist;
     2397 +                cp->cache_dump.kd_freelist = buf;
2456 2398                  return (0);
2457 2399          }
2458 2400  
2459      -        /* count all non-dump buf frees */
2460      -        KDI_LOG(cp, kdl_free_nondump);
2461      -
2462 2401          /* just drop buffers that were allocated before dump started */
2463 2402          if (kmem_dump_curr < kmem_dump_end)
2464 2403                  return (0);
2465 2404  
2466 2405          /* fall back to normal free if reserved area is used up */
2467 2406          return (1);
2468 2407  }
2469 2408  
2470 2409  /*
2471 2410   * Allocate a constructed object from cache cp.
↓ open down ↓ 12 lines elided ↑ open up ↑
2484 2423                   * loaded magazine, just take it and return.
2485 2424                   */
2486 2425                  if (ccp->cc_rounds > 0) {
2487 2426                          buf = ccp->cc_loaded->mag_round[--ccp->cc_rounds];
2488 2427                          ccp->cc_alloc++;
2489 2428                          mutex_exit(&ccp->cc_lock);
2490 2429                          if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPUNSAFE)) {
2491 2430                                  if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2492 2431                                          ASSERT(!(ccp->cc_flags &
2493 2432                                              KMF_DUMPDIVERT));
2494      -                                        KDI_LOG(cp, kdl_unsafe);
     2433 +                                        cp->cache_dump.kd_unsafe++;
2495 2434                                  }
2496 2435                                  if ((ccp->cc_flags & KMF_BUFTAG) &&
2497 2436                                      kmem_cache_alloc_debug(cp, buf, kmflag, 0,
2498 2437                                      caller()) != 0) {
2499 2438                                          if (kmflag & KM_NOSLEEP)
2500 2439                                                  return (NULL);
2501 2440                                          mutex_enter(&ccp->cc_lock);
2502 2441                                          continue;
2503 2442                                  }
2504 2443                          }
↓ open down ↓ 10 lines elided ↑ open up ↑
2515 2454                  }
2516 2455  
2517 2456                  /*
2518 2457                   * Return an alternate buffer at dump time to preserve
2519 2458                   * the heap.
2520 2459                   */
2521 2460                  if (ccp->cc_flags & (KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2522 2461                          if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2523 2462                                  ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2524 2463                                  /* log it so that we can warn about it */
2525      -                                KDI_LOG(cp, kdl_unsafe);
     2464 +                                cp->cache_dump.kd_unsafe++;
2526 2465                          } else {
2527 2466                                  if ((buf = kmem_cache_alloc_dump(cp, kmflag)) !=
2528 2467                                      NULL) {
2529 2468                                          mutex_exit(&ccp->cc_lock);
2530 2469                                          return (buf);
2531 2470                                  }
2532 2471                                  break;          /* fall back to slab layer */
2533 2472                          }
2534 2473                  }
2535 2474  
↓ open down ↓ 175 lines elided ↑ open up ↑
2711 2650           */
2712 2651          ASSERT(cp->cache_defrag == NULL ||
2713 2652              cp->cache_defrag->kmd_thread != curthread ||
2714 2653              (buf != cp->cache_defrag->kmd_from_buf &&
2715 2654              buf != cp->cache_defrag->kmd_to_buf));
2716 2655  
2717 2656          if (ccp->cc_flags & (KMF_BUFTAG | KMF_DUMPDIVERT | KMF_DUMPUNSAFE)) {
2718 2657                  if (ccp->cc_flags & KMF_DUMPUNSAFE) {
2719 2658                          ASSERT(!(ccp->cc_flags & KMF_DUMPDIVERT));
2720 2659                          /* log it so that we can warn about it */
2721      -                        KDI_LOG(cp, kdl_unsafe);
     2660 +                        cp->cache_dump.kd_unsafe++;
2722 2661                  } else if (KMEM_DUMPCC(ccp) && !kmem_cache_free_dump(cp, buf)) {
2723 2662                          return;
2724 2663                  }
2725 2664                  if (ccp->cc_flags & KMF_BUFTAG) {
2726 2665                          if (kmem_cache_free_debug(cp, buf, caller()) == -1)
2727 2666                                  return;
2728 2667                  }
2729 2668          }
2730 2669  
2731 2670          mutex_enter(&ccp->cc_lock);
↓ open down ↓ 2707 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX