Print this page
10093 kmem_log_enter() dereferences pointer before NULL check


1408 
1409         for (i = 0; i < max_ncpus; i++) {
1410                 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1411                 mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
1412                 clhp->clh_chunk = i;
1413         }
1414 
1415         for (i = max_ncpus; i < nchunks; i++)
1416                 lhp->lh_free[i] = i;
1417 
1418         lhp->lh_head = max_ncpus;
1419         lhp->lh_tail = 0;
1420 
1421         return (lhp);
1422 }
1423 
1424 static void *
1425 kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
1426 {
1427         void *logspace;
1428         kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1429 
1430         if (lhp == NULL || kmem_logging == 0 || panicstr)
1431                 return (NULL);
1432 


1433         mutex_enter(&clhp->clh_lock);
1434         clhp->clh_hits++;
1435         if (size > clhp->clh_avail) {
1436                 mutex_enter(&lhp->lh_lock);
1437                 lhp->lh_hits++;
1438                 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1439                 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1440                 clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1441                 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1442                 clhp->clh_current = lhp->lh_base +
1443                     clhp->clh_chunk * lhp->lh_chunksize;
1444                 clhp->clh_avail = lhp->lh_chunksize;
1445                 if (size > lhp->lh_chunksize)
1446                         size = lhp->lh_chunksize;
1447                 mutex_exit(&lhp->lh_lock);
1448         }
1449         logspace = clhp->clh_current;
1450         clhp->clh_current += size;
1451         clhp->clh_avail -= size;
1452         bcopy(data, logspace, size);




1408 
1409         for (i = 0; i < max_ncpus; i++) {
1410                 kmem_cpu_log_header_t *clhp = &lhp->lh_cpu[i];
1411                 mutex_init(&clhp->clh_lock, NULL, MUTEX_DEFAULT, NULL);
1412                 clhp->clh_chunk = i;
1413         }
1414 
1415         for (i = max_ncpus; i < nchunks; i++)
1416                 lhp->lh_free[i] = i;
1417 
1418         lhp->lh_head = max_ncpus;
1419         lhp->lh_tail = 0;
1420 
1421         return (lhp);
1422 }
1423 
1424 static void *
1425 kmem_log_enter(kmem_log_header_t *lhp, void *data, size_t size)
1426 {
1427         void *logspace;
1428         kmem_cpu_log_header_t *clhp;
1429 
1430         if (lhp == NULL || kmem_logging == 0 || panicstr)
1431                 return (NULL);
1432 
1433         clhp = &lhp->lh_cpu[CPU->cpu_seqid];
1434 
1435         mutex_enter(&clhp->clh_lock);
1436         clhp->clh_hits++;
1437         if (size > clhp->clh_avail) {
1438                 mutex_enter(&lhp->lh_lock);
1439                 lhp->lh_hits++;
1440                 lhp->lh_free[lhp->lh_tail] = clhp->clh_chunk;
1441                 lhp->lh_tail = (lhp->lh_tail + 1) % lhp->lh_nchunks;
1442                 clhp->clh_chunk = lhp->lh_free[lhp->lh_head];
1443                 lhp->lh_head = (lhp->lh_head + 1) % lhp->lh_nchunks;
1444                 clhp->clh_current = lhp->lh_base +
1445                     clhp->clh_chunk * lhp->lh_chunksize;
1446                 clhp->clh_avail = lhp->lh_chunksize;
1447                 if (size > lhp->lh_chunksize)
1448                         size = lhp->lh_chunksize;
1449                 mutex_exit(&lhp->lh_lock);
1450         }
1451         logspace = clhp->clh_current;
1452         clhp->clh_current += size;
1453         clhp->clh_avail -= size;
1454         bcopy(data, logspace, size);