Print this page
4922 all calloc() implementations should check for overflow
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/lib/libmalloc/common/malloc.c
+++ new/usr/src/lib/libmalloc/common/malloc.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
20 20 */
21 21
22 22 /*
23 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /* Copyright (c) 1988 AT&T */
28 28 /* All Rights Reserved */
29 29
30 -#pragma ident "%Z%%M% %I% %E% SMI"
31 -
32 30 #include <sys/types.h>
33 31
34 32 #ifndef debug
35 33 #define NDEBUG
36 34 #endif
37 35
38 36 #include <stdlib.h>
39 37 #include <string.h>
38 +#include <errno.h>
40 39 #include "assert.h"
41 40 #include "malloc.h"
42 41 #include "mallint.h"
43 42 #include <thread.h>
44 43 #include <pthread.h>
45 44 #include <synch.h>
46 45 #include <unistd.h>
47 46 #include <limits.h>
48 47
49 48 static mutex_t mlock = DEFAULTMUTEX;
50 49 static ssize_t freespace(struct holdblk *);
51 50 static void *malloc_unlocked(size_t, int);
52 51 static void *realloc_unlocked(void *, size_t);
53 52 static void free_unlocked(void *);
54 53 static void *morecore(size_t);
55 54
56 55 /*
57 56 * use level memory allocater (malloc, free, realloc)
58 57 *
59 58 * -malloc, free, realloc and mallopt form a memory allocator
60 59 * similar to malloc, free, and realloc. The routines
61 60 * here are much faster than the original, with slightly worse
62 61 * space usage (a few percent difference on most input). They
63 62 * do not have the property that data in freed blocks is left
64 63 * untouched until the space is reallocated.
65 64 *
66 65 * -Memory is kept in the "arena", a singly linked list of blocks.
67 66 * These blocks are of 3 types.
68 67 * 1. A free block. This is a block not in use by the
69 68 * user. It has a 3 word header. (See description
70 69 * of the free queue.)
71 70 * 2. An allocated block. This is a block the user has
72 71 * requested. It has only a 1 word header, pointing
73 72 * to the next block of any sort.
74 73 * 3. A permanently allocated block. This covers space
75 74 * aquired by the user directly through sbrk(). It
76 75 * has a 1 word header, as does 2.
77 76 * Blocks of type 1 have the lower bit of the pointer to the
78 77 * nextblock = 0. Blocks of type 2 and 3 have that bit set,
79 78 * to mark them busy.
80 79 *
81 80 * -Unallocated blocks are kept on an unsorted doubly linked
82 81 * free list.
83 82 *
84 83 * -Memory is allocated in blocks, with sizes specified by the
85 84 * user. A circular first-fit startegy is used, with a roving
86 85 * head of the free queue, which prevents bunching of small
87 86 * blocks at the head of the queue.
88 87 *
89 88 * -Compaction is performed at free time of any blocks immediately
90 89 * following the freed block. The freed block will be combined
91 90 * with a preceding block during the search phase of malloc.
92 91 * Since a freed block is added at the front of the free queue,
93 92 * which is moved to the end of the queue if considered and
94 93 * rejected during the search, fragmentation only occurs if
95 94 * a block with a contiguious preceding block that is free is
96 95 * freed and reallocated on the next call to malloc. The
97 96 * time savings of this strategy is judged to be worth the
98 97 * occasional waste of memory.
99 98 *
100 99 * -Small blocks (of size < MAXSIZE) are not allocated directly.
101 100 * A large "holding" block is allocated via a recursive call to
102 101 * malloc. This block contains a header and ?????? small blocks.
103 102 * Holding blocks for a given size of small block (rounded to the
104 103 * nearest ALIGNSZ bytes) are kept on a queue with the property that any
105 104 * holding block with an unused small block is in front of any without.
106 105 * A list of free blocks is kept within the holding block.
107 106 */
108 107
109 108 /*
110 109 * description of arena, free queue, holding blocks etc.
111 110 *
112 111 * New compiler and linker does not guarentee order of initialized data.
113 112 * Define freeptr as arena[2-3] to guarentee it follows arena in memory.
114 113 * Later code depends on this order.
115 114 */
116 115
117 116 static struct header arena[4] = {
118 117 {0, 0, 0},
119 118 {0, 0, 0},
120 119 {0, 0, 0},
121 120 {0, 0, 0}
122 121 };
123 122 /*
124 123 * the second word is a minimal block to
125 124 * start the arena. The first is a busy
126 125 * block to be pointed to by the last block.
127 126 */
128 127
129 128 #define freeptr (arena + 2)
130 129 /* first and last entry in free list */
131 130 static struct header *arenaend; /* ptr to block marking high end of arena */
132 131 static struct header *lastblk; /* the highest block in the arena */
133 132 static struct holdblk **holdhead; /* pointer to array of head pointers */
134 133 /* to holding block chains */
135 134 /*
136 135 * In order to save time calculating indices, the array is 1 too
137 136 * large, and the first element is unused
138 137 *
139 138 * Variables controlling algorithm, esp. how holding blocs are used
140 139 */
141 140 static int numlblks = NUMLBLKS;
142 141 static int minhead = MINHEAD;
143 142 static int change = 0; /* != 0, once param changes are no longer allowed */
144 143 static int fastct = FASTCT;
145 144 static unsigned int maxfast = MAXFAST;
146 145 /* number of small block sizes to map to one size */
147 146
148 147 static int grain = ALIGNSZ;
149 148
150 149 #ifdef debug
151 150 static int case1count = 0;
152 151
153 152 static void
154 153 checkq(void)
155 154 {
156 155 register struct header *p;
157 156
158 157 p = &freeptr[0];
159 158
160 159 /* check forward */
161 160 /*CSTYLED*/
162 161 while (p != &freeptr[1]) {
163 162 p = p->nextfree;
164 163 assert(p->prevfree->nextfree == p);
165 164 }
166 165
167 166 /* check backward */
168 167 /*CSTYLED*/
169 168 while (p != &freeptr[0]) {
170 169 p = p->prevfree;
171 170 assert(p->nextfree->prevfree == p);
172 171 }
173 172 }
174 173 #endif
175 174
176 175
177 176 /*
178 177 * malloc(nbytes) - give a user nbytes to use
179 178 */
180 179
181 180 void *
182 181 malloc(size_t nbytes)
183 182 {
184 183 void *ret;
185 184
186 185 (void) mutex_lock(&mlock);
187 186 ret = malloc_unlocked(nbytes, 0);
188 187 (void) mutex_unlock(&mlock);
189 188 return (ret);
190 189 }
191 190
192 191 /*
193 192 * Use malloc_unlocked() to get the address to start with; Given this
194 193 * address, find out the closest address that aligns with the request
195 194 * and return that address after doing some house keeping (refer to the
196 195 * ascii art below).
197 196 */
198 197 void *
199 198 memalign(size_t alignment, size_t size)
200 199 {
201 200 void *alloc_buf;
202 201 struct header *hd;
203 202 size_t alloc_size;
204 203 uintptr_t fr;
205 204 static int realloc;
206 205
207 206 if (size == 0 || alignment == 0 ||
208 207 (alignment & (alignment - 1)) != 0) {
209 208 return (NULL);
210 209 }
211 210 if (alignment <= ALIGNSZ)
212 211 return (malloc(size));
213 212
214 213 alloc_size = size + alignment;
215 214 if (alloc_size < size) { /* overflow */
216 215 return (NULL);
217 216 }
218 217
219 218 (void) mutex_lock(&mlock);
220 219 alloc_buf = malloc_unlocked(alloc_size, 1);
221 220 (void) mutex_unlock(&mlock);
222 221
223 222 if (alloc_buf == NULL)
224 223 return (NULL);
225 224 fr = (uintptr_t)alloc_buf;
226 225
227 226 fr = (fr + alignment - 1) / alignment * alignment;
228 227
229 228 if (fr == (uintptr_t)alloc_buf)
230 229 return (alloc_buf);
231 230
232 231 if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
233 232 /*
234 233 * we hit an edge case, where the space ahead of aligned
235 234 * address is not sufficient to hold 'header' and hence we
236 235 * can't free it. So double the allocation request.
237 236 */
238 237 realloc++;
239 238 free(alloc_buf);
240 239 alloc_size = size + alignment*2;
241 240 if (alloc_size < size) {
242 241 return (NULL);
243 242 }
244 243
245 244 (void) mutex_lock(&mlock);
246 245 alloc_buf = malloc_unlocked(alloc_size, 1);
247 246 (void) mutex_unlock(&mlock);
248 247
249 248 if (alloc_buf == NULL)
250 249 return (NULL);
251 250 fr = (uintptr_t)alloc_buf;
252 251
253 252 fr = (fr + alignment - 1) / alignment * alignment;
254 253 if (fr == (uintptr_t)alloc_buf)
255 254 return (alloc_buf);
256 255 if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
257 256 fr = fr + alignment;
258 257 }
259 258 }
260 259
261 260 /*
262 261 * +-------+ +-------+
263 262 * +---| <a> | | <a> |--+
264 263 * | +-------+<--alloc_buf-->+-------+ |
265 264 * | | | | | |
266 265 * | | | | | |
267 266 * | | | | | |
268 267 * | | | hd--> +-------+ |
269 268 * | | | +---| <b> |<-+
270 269 * | | | | +-------+<--- fr
271 270 * | | | | | |
272 271 * | | | | | |
273 272 * | | | | | |
274 273 * | | | | | |
275 274 * | | | | | |
276 275 * | | | | | |
277 276 * | +-------+ | +-------+
278 277 * +-->| next | +-->| next |
279 278 * +-------+ +-------+
280 279 *
281 280 */
282 281 hd = (struct header *)((char *)fr - minhead);
283 282 (void) mutex_lock(&mlock);
284 283 hd->nextblk = ((struct header *)((char *)alloc_buf - minhead))->nextblk;
285 284 ((struct header *)((char *)alloc_buf - minhead))->nextblk = SETBUSY(hd);
286 285 (void) mutex_unlock(&mlock);
287 286 free(alloc_buf);
288 287 CHECKQ
289 288 return ((void *)fr);
290 289 }
291 290
292 291 void *
293 292 valloc(size_t size)
294 293 {
295 294 static unsigned pagesize;
296 295 if (size == 0)
297 296 return (NULL);
298 297
299 298 if (!pagesize)
300 299 pagesize = sysconf(_SC_PAGESIZE);
301 300
302 301 return (memalign(pagesize, size));
303 302 }
304 303
305 304 /*
306 305 * malloc_unlocked(nbytes, nosmall) - Do the real work for malloc
307 306 */
308 307
309 308 static void *
310 309 malloc_unlocked(size_t nbytes, int nosmall)
311 310 {
312 311 struct header *blk;
313 312 size_t nb; /* size of entire block we need */
314 313
315 314 /* on first call, initialize */
316 315 if (freeptr[0].nextfree == GROUND) {
317 316 /* initialize arena */
318 317 arena[1].nextblk = (struct header *)BUSY;
319 318 arena[0].nextblk = (struct header *)BUSY;
320 319 lastblk = arenaend = &(arena[1]);
321 320 /* initialize free queue */
322 321 freeptr[0].nextfree = &(freeptr[1]);
323 322 freeptr[1].nextblk = &(arena[0]);
324 323 freeptr[1].prevfree = &(freeptr[0]);
325 324 /* mark that small blocks not init yet */
326 325 }
327 326 if (nbytes == 0)
328 327 return (NULL);
329 328
330 329 if (nbytes <= maxfast && !nosmall) {
331 330 /*
332 331 * We can allocate out of a holding block
333 332 */
334 333 struct holdblk *holdblk; /* head of right sized queue */
335 334 struct lblk *lblk; /* pointer to a little block */
336 335 struct holdblk *newhold;
337 336
338 337 if (!change) {
339 338 int i;
340 339 /*
341 340 * This allocates space for hold block
342 341 * pointers by calling malloc recursively.
343 342 * Maxfast is temporarily set to 0, to
344 343 * avoid infinite recursion. allocate
345 344 * space for an extra ptr so that an index
346 345 * is just ->blksz/grain, with the first
347 346 * ptr unused.
348 347 */
349 348 change = 1; /* change to algorithm params */
350 349 /* no longer allowed */
351 350 /*
352 351 * temporarily alter maxfast, to avoid
353 352 * infinite recursion
354 353 */
355 354 maxfast = 0;
356 355 holdhead = (struct holdblk **)
357 356 malloc_unlocked(sizeof (struct holdblk *) *
358 357 (fastct + 1), 0);
359 358 if (holdhead == NULL)
360 359 return (malloc_unlocked(nbytes, 0));
361 360 for (i = 1; i <= fastct; i++) {
362 361 holdhead[i] = HGROUND;
363 362 }
364 363 maxfast = fastct * grain;
365 364 }
366 365 /*
367 366 * Note that this uses the absolute min header size (MINHEAD)
368 367 * unlike the large block case which uses minhead
369 368 *
370 369 * round up to nearest multiple of grain
371 370 * code assumes grain is a multiple of MINHEAD
372 371 */
373 372 /* round up to grain */
374 373 nb = (nbytes + grain - 1) / grain * grain;
375 374 holdblk = holdhead[nb / grain];
376 375 nb = nb + MINHEAD;
377 376 /*
378 377 * look for space in the holding block. Blocks with
379 378 * space will be in front of those without
380 379 */
381 380 if ((holdblk != HGROUND) && (holdblk->lfreeq != LGROUND)) {
382 381 /* there is space */
383 382 lblk = holdblk->lfreeq;
384 383
385 384 /*
386 385 * Now make lfreeq point to a free block.
387 386 * If lblk has been previously allocated and
388 387 * freed, it has a valid pointer to use.
389 388 * Otherwise, lblk is at the beginning of
390 389 * the unallocated blocks at the end of
391 390 * the holding block, so, if there is room, take
392 391 * the next space. If not, mark holdblk full,
393 392 * and move holdblk to the end of the queue
394 393 */
395 394 if (lblk < holdblk->unused) {
396 395 /* move to next holdblk, if this one full */
397 396 if ((holdblk->lfreeq =
398 397 CLRSMAL(lblk->header.nextfree)) ==
399 398 LGROUND) {
400 399 holdhead[(nb-MINHEAD) / grain] =
401 400 holdblk->nexthblk;
402 401 }
403 402 } else if (((char *)holdblk->unused + nb) <
404 403 ((char *)holdblk + HOLDSZ(nb))) {
405 404 holdblk->unused = (struct lblk *)
406 405 ((char *)holdblk->unused+nb);
407 406 holdblk->lfreeq = holdblk->unused;
408 407 } else {
409 408 holdblk->unused = (struct lblk *)
410 409 ((char *)holdblk->unused+nb);
411 410 holdblk->lfreeq = LGROUND;
412 411 holdhead[(nb-MINHEAD)/grain] =
413 412 holdblk->nexthblk;
414 413 }
415 414 /* mark as busy and small */
416 415 lblk->header.holder = (struct holdblk *)SETALL(holdblk);
417 416 } else {
418 417 /* we need a new holding block */
419 418 newhold = (struct holdblk *)
420 419 malloc_unlocked(HOLDSZ(nb), 0);
421 420 if ((char *)newhold == NULL) {
422 421 return (NULL);
423 422 }
424 423 /* add to head of free queue */
425 424 if (holdblk != HGROUND) {
426 425 newhold->nexthblk = holdblk;
427 426 newhold->prevhblk = holdblk->prevhblk;
428 427 holdblk->prevhblk = newhold;
429 428 newhold->prevhblk->nexthblk = newhold;
430 429 } else {
431 430 newhold->nexthblk = newhold->prevhblk = newhold;
432 431 }
433 432 holdhead[(nb-MINHEAD)/grain] = newhold;
434 433 /* set up newhold */
435 434 lblk = (struct lblk *)(newhold->space);
436 435 newhold->lfreeq = newhold->unused =
437 436 (struct lblk *)((char *)newhold->space+nb);
438 437 lblk->header.holder = (struct holdblk *)SETALL(newhold);
439 438 newhold->blksz = nb-MINHEAD;
440 439 }
441 440 #ifdef debug
442 441 assert(((struct holdblk *)CLRALL(lblk->header.holder))->blksz >=
443 442 nbytes);
444 443 #endif /* debug */
445 444 return ((char *)lblk + MINHEAD);
446 445 } else {
447 446 /*
448 447 * We need an ordinary block
449 448 */
450 449 struct header *newblk; /* used for creating a block */
451 450
452 451 /* get number of bytes we need */
453 452 nb = nbytes + minhead;
454 453 nb = (nb + ALIGNSZ - 1) / ALIGNSZ * ALIGNSZ; /* align */
455 454 nb = (nb > MINBLKSZ) ? nb : MINBLKSZ;
456 455 /*
457 456 * see if there is a big enough block
458 457 * If none exists, you will get to freeptr[1].
459 458 * freeptr[1].next = &arena[0], so when you do the test,
460 459 * the result is a large positive number, since arena[0]
461 460 * comes before all blocks. Arena[0] is marked busy so
462 461 * that it will not be compacted. This kludge is for the
463 462 * sake of the almighty efficiency.
464 463 */
465 464 /* check that a very large request won't cause an inf. loop */
466 465
467 466 if ((freeptr[1].nextblk-&(freeptr[1])) < nb) {
468 467 return (NULL);
469 468 } else {
470 469 struct header *next; /* following block */
471 470 struct header *nextnext; /* block after next */
472 471
473 472 blk = freeptr;
474 473 do {
475 474 blk = blk->nextfree;
476 475 /* see if we can compact */
477 476 next = blk->nextblk;
478 477 if (!TESTBUSY(nextnext = next->nextblk)) {
479 478 do {
480 479 DELFREEQ(next);
481 480 next = nextnext;
482 481 nextnext = next->nextblk;
483 482 } while (!TESTBUSY(nextnext));
484 483 /*
485 484 * next will be at most == to lastblk,
486 485 * but I think the >= test is faster
487 486 */
488 487 if (next >= arenaend)
489 488 lastblk = blk;
490 489 blk->nextblk = next;
491 490 }
492 491 } while (((char *)(next) - (char *)blk) < nb);
493 492 }
494 493 /*
495 494 * if we didn't find a block, get more memory
496 495 */
497 496 if (blk == &(freeptr[1])) {
498 497 /*
499 498 * careful coding could likely replace
500 499 * newend with arenaend
501 500 */
502 501 struct header *newend; /* new end of arena */
503 502 ssize_t nget; /* number of words to get */
504 503
505 504 /*
506 505 * Three cases - 1. There is space between arenaend
507 506 * and the break value that will become
508 507 * a permanently allocated block.
509 508 * 2. Case 1 is not true, and the last
510 509 * block is allocated.
511 510 * 3. Case 1 is not true, and the last
512 511 * block is free
513 512 */
514 513 if ((newblk = (struct header *)sbrk(0)) !=
515 514 (struct header *)((char *)arenaend + HEADSZ)) {
516 515 /* case 1 */
517 516 #ifdef debug
518 517 if (case1count++ > 0)
519 518 (void) write(2, "Case 1 hit more that once."
520 519 " brk or sbrk?\n", 41);
521 520 #endif
522 521 /* get size to fetch */
523 522 nget = nb + HEADSZ;
524 523 /* round up to a block */
525 524 nget = (nget + BLOCKSZ - 1)/BLOCKSZ * BLOCKSZ;
526 525 assert((uintptr_t)newblk % ALIGNSZ == 0);
527 526 /* get memory */
528 527 if (morecore(nget) == (void *)-1)
529 528 return (NULL);
530 529 /* add to arena */
531 530 newend = (struct header *)((char *)newblk + nget
532 531 - HEADSZ);
533 532 assert((uintptr_t)newblk % ALIGNSZ == 0);
534 533 newend->nextblk = SETBUSY(&(arena[1]));
535 534 /* ??? newblk ?? */
536 535 newblk->nextblk = newend;
537 536
538 537 /*
539 538 * space becomes a permanently allocated block.
540 539 * This is likely not mt-safe as lock is not
541 540 * shared with brk or sbrk
542 541 */
543 542 arenaend->nextblk = SETBUSY(newblk);
544 543 /* adjust other pointers */
545 544 arenaend = newend;
546 545 lastblk = newblk;
547 546 blk = newblk;
548 547 } else if (TESTBUSY(lastblk->nextblk)) {
549 548 /* case 2 */
550 549 nget = (nb + BLOCKSZ - 1) / BLOCKSZ * BLOCKSZ;
551 550 if (morecore(nget) == (void *)-1)
552 551 return (NULL);
553 552 /* block must be word aligned */
554 553 assert(((uintptr_t)newblk%ALIGNSZ) == 0);
555 554 /*
556 555 * stub at old arenaend becomes first word
557 556 * in blk
558 557 */
559 558 /* ??? newblk = arenaend; */
560 559
561 560 newend =
562 561 (struct header *)((char *)arenaend+nget);
563 562 newend->nextblk = SETBUSY(&(arena[1]));
564 563 arenaend->nextblk = newend;
565 564 lastblk = blk = arenaend;
566 565 arenaend = newend;
567 566 } else {
568 567 /* case 3 */
569 568 /*
570 569 * last block in arena is at end of memory and
571 570 * is free
572 571 */
573 572 /* 1.7 had this backward without cast */
574 573 nget = nb -
575 574 ((char *)arenaend - (char *)lastblk);
576 575 nget = (nget + (BLOCKSZ - 1)) /
577 576 BLOCKSZ * BLOCKSZ;
578 577 assert(((uintptr_t)newblk % ALIGNSZ) == 0);
579 578 if (morecore(nget) == (void *)-1)
580 579 return (NULL);
581 580 /* combine with last block, put in arena */
582 581 newend = (struct header *)
583 582 ((char *)arenaend + nget);
584 583 arenaend = lastblk->nextblk = newend;
585 584 newend->nextblk = SETBUSY(&(arena[1]));
586 585 /* set which block to use */
587 586 blk = lastblk;
588 587 DELFREEQ(blk);
589 588 }
590 589 } else {
591 590 struct header *nblk; /* next block */
592 591
593 592 /* take block found of free queue */
594 593 DELFREEQ(blk);
595 594 /*
596 595 * make head of free queue immediately follow blk,
597 596 * unless blk was at the end of the queue
598 597 */
599 598 nblk = blk->nextfree;
600 599 if (nblk != &(freeptr[1])) {
601 600 MOVEHEAD(nblk);
602 601 }
603 602 }
604 603 /* blk now points to an adequate block */
605 604 if (((char *)blk->nextblk - (char *)blk) - nb >= MINBLKSZ) {
606 605 /* carve out the right size block */
607 606 /* newblk will be the remainder */
608 607 newblk = (struct header *)((char *)blk + nb);
609 608 newblk->nextblk = blk->nextblk;
610 609 /* mark the block busy */
611 610 blk->nextblk = SETBUSY(newblk);
612 611 ADDFREEQ(newblk);
613 612 /* if blk was lastblk, make newblk lastblk */
614 613 if (blk == lastblk)
615 614 lastblk = newblk;
616 615 } else {
617 616 /* just mark the block busy */
618 617 blk->nextblk = SETBUSY(blk->nextblk);
619 618 }
620 619 }
621 620 CHECKQ
622 621 assert((char *)CLRALL(blk->nextblk) -
623 622 ((char *)blk + minhead) >= nbytes);
624 623 assert((char *)CLRALL(blk->nextblk) -
625 624 ((char *)blk + minhead) < nbytes + MINBLKSZ);
626 625 return ((char *)blk + minhead);
627 626 }
628 627
629 628 /*
630 629 * free(ptr) - free block that user thinks starts at ptr
631 630 *
632 631 * input - ptr-1 contains the block header.
633 632 * If the header points forward, we have a normal
634 633 * block pointing to the next block
635 634 * if the header points backward, we have a small
636 635 * block from a holding block.
637 636 * In both cases, the busy bit must be set
638 637 */
639 638
640 639 void
641 640 free(void *ptr)
642 641 {
643 642 (void) mutex_lock(&mlock);
644 643 free_unlocked(ptr);
645 644 (void) mutex_unlock(&mlock);
646 645 }
647 646
648 647 /*
649 648 * free_unlocked(ptr) - Do the real work for free()
650 649 */
651 650
652 651 void
653 652 free_unlocked(void *ptr)
654 653 {
655 654 struct holdblk *holdblk; /* block holding blk */
656 655 struct holdblk *oldhead; /* former head of the hold block */
657 656 /* queue containing blk's holder */
658 657
659 658 if (ptr == NULL)
660 659 return;
661 660 if (TESTSMAL(((struct header *)((char *)ptr - MINHEAD))->nextblk)) {
662 661 struct lblk *lblk; /* pointer to freed block */
663 662 ssize_t offset; /* choice of header lists */
664 663
665 664 lblk = (struct lblk *)CLRBUSY((char *)ptr - MINHEAD);
666 665 assert((struct header *)lblk < arenaend);
667 666 assert((struct header *)lblk > arena);
668 667 /* allow twits (e.g. awk) to free a block twice */
669 668 holdblk = lblk->header.holder;
670 669 if (!TESTBUSY(holdblk))
671 670 return;
672 671 holdblk = (struct holdblk *)CLRALL(holdblk);
673 672 /* put lblk on its hold block's free list */
674 673 lblk->header.nextfree = SETSMAL(holdblk->lfreeq);
675 674 holdblk->lfreeq = lblk;
676 675 /* move holdblk to head of queue, if its not already there */
677 676 offset = holdblk->blksz / grain;
678 677 oldhead = holdhead[offset];
679 678 if (oldhead != holdblk) {
680 679 /* first take out of current spot */
681 680 holdhead[offset] = holdblk;
682 681 holdblk->nexthblk->prevhblk = holdblk->prevhblk;
683 682 holdblk->prevhblk->nexthblk = holdblk->nexthblk;
684 683 /* now add at front */
685 684 holdblk->nexthblk = oldhead;
686 685 holdblk->prevhblk = oldhead->prevhblk;
687 686 oldhead->prevhblk = holdblk;
688 687 holdblk->prevhblk->nexthblk = holdblk;
689 688 }
690 689 } else {
691 690 struct header *blk; /* real start of block */
692 691 struct header *next; /* next = blk->nextblk */
693 692 struct header *nextnext; /* block after next */
694 693
695 694 blk = (struct header *)((char *)ptr - minhead);
696 695 next = blk->nextblk;
697 696 /* take care of twits (e.g. awk) who return blocks twice */
698 697 if (!TESTBUSY(next))
699 698 return;
700 699 blk->nextblk = next = CLRBUSY(next);
701 700 ADDFREEQ(blk);
702 701 /* see if we can compact */
703 702 if (!TESTBUSY(nextnext = next->nextblk)) {
704 703 do {
705 704 DELFREEQ(next);
706 705 next = nextnext;
707 706 } while (!TESTBUSY(nextnext = next->nextblk));
708 707 if (next == arenaend) lastblk = blk;
709 708 blk->nextblk = next;
710 709 }
711 710 }
712 711 CHECKQ
713 712 }
714 713
715 714
716 715 /*
717 716 * realloc(ptr, size) - give the user a block of size "size", with
718 717 * the contents pointed to by ptr. Free ptr.
719 718 */
720 719
721 720 void *
722 721 realloc(void *ptr, size_t size)
723 722 {
724 723 void *retval;
725 724
726 725 (void) mutex_lock(&mlock);
727 726 retval = realloc_unlocked(ptr, size);
728 727 (void) mutex_unlock(&mlock);
729 728 return (retval);
730 729 }
731 730
732 731
733 732 /*
734 733 * realloc_unlocked(ptr) - Do the real work for realloc()
735 734 */
736 735
737 736 static void *
738 737 realloc_unlocked(void *ptr, size_t size)
739 738 {
740 739 struct header *blk; /* block ptr is contained in */
741 740 size_t trusize; /* block size as allocater sees it */
742 741 char *newptr; /* pointer to user's new block */
743 742 size_t cpysize; /* amount to copy */
744 743 struct header *next; /* block after blk */
745 744
746 745 if (ptr == NULL)
747 746 return (malloc_unlocked(size, 0));
748 747
749 748 if (size == 0) {
750 749 free_unlocked(ptr);
751 750 return (NULL);
752 751 }
753 752
754 753 if (TESTSMAL(((struct lblk *)((char *)ptr - MINHEAD))->
755 754 header.holder)) {
756 755 /*
757 756 * we have a special small block which can't be expanded
758 757 *
759 758 * This makes the assumption that even if the user is
760 759 * reallocating a free block, malloc doesn't alter the contents
761 760 * of small blocks
762 761 */
763 762 newptr = malloc_unlocked(size, 0);
764 763 if (newptr == NULL)
765 764 return (NULL);
766 765 /* this isn't to save time--its to protect the twits */
767 766 if ((char *)ptr != newptr) {
768 767 struct lblk *lblk;
769 768 lblk = (struct lblk *)((char *)ptr - MINHEAD);
770 769 cpysize = ((struct holdblk *)
771 770 CLRALL(lblk->header.holder))->blksz;
772 771 cpysize = (size > cpysize) ? cpysize : size;
773 772 (void) memcpy(newptr, ptr, cpysize);
774 773 free_unlocked(ptr);
775 774 }
776 775 } else {
777 776 blk = (struct header *)((char *)ptr - minhead);
778 777 next = blk->nextblk;
779 778 /*
780 779 * deal with twits who reallocate free blocks
781 780 *
782 781 * if they haven't reset minblk via getopt, that's
783 782 * their problem
784 783 */
785 784 if (!TESTBUSY(next)) {
786 785 DELFREEQ(blk);
787 786 blk->nextblk = SETBUSY(next);
788 787 }
789 788 next = CLRBUSY(next);
790 789 /* make blk as big as possible */
791 790 if (!TESTBUSY(next->nextblk)) {
792 791 do {
793 792 DELFREEQ(next);
794 793 next = next->nextblk;
795 794 } while (!TESTBUSY(next->nextblk));
796 795 blk->nextblk = SETBUSY(next);
797 796 if (next >= arenaend) lastblk = blk;
798 797 }
799 798 /* get size we really need */
800 799 trusize = size+minhead;
801 800 trusize = (trusize + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
802 801 trusize = (trusize >= MINBLKSZ) ? trusize : MINBLKSZ;
803 802 /* see if we have enough */
804 803 /* this isn't really the copy size, but I need a register */
805 804 cpysize = (char *)next - (char *)blk;
806 805 if (cpysize >= trusize) {
807 806 /* carve out the size we need */
808 807 struct header *newblk; /* remainder */
809 808
810 809 if (cpysize - trusize >= MINBLKSZ) {
811 810 /*
812 811 * carve out the right size block
813 812 * newblk will be the remainder
814 813 */
815 814 newblk = (struct header *)((char *)blk +
816 815 trusize);
817 816 newblk->nextblk = next;
818 817 blk->nextblk = SETBUSY(newblk);
819 818 /* at this point, next is invalid */
820 819 ADDFREEQ(newblk);
821 820 /* if blk was lastblk, make newblk lastblk */
822 821 if (blk == lastblk)
823 822 lastblk = newblk;
824 823 }
825 824 newptr = ptr;
826 825 } else {
827 826 /* bite the bullet, and call malloc */
828 827 cpysize = (size > cpysize) ? cpysize : size;
829 828 newptr = malloc_unlocked(size, 0);
830 829 if (newptr == NULL)
831 830 return (NULL);
832 831 (void) memcpy(newptr, ptr, cpysize);
833 832 free_unlocked(ptr);
834 833 }
835 834 }
836 835 return (newptr);
837 836 }
↓ open down ↓ |
788 lines elided |
↑ open up ↑ |
838 837
839 838
840 839 /*
841 840 * calloc - allocate and clear memory block
842 841 */
843 842
844 843 void *
845 844 calloc(size_t num, size_t size)
846 845 {
847 846 char *mp;
847 + size_t total;
848 +
849 + if (num == 0 || size == 0) {
850 + total = 0;
851 + } else {
852 + total = num * size;
853 +
854 + /* check for overflow */
855 + if ((total / num) != size) {
856 + errno = ENOMEM;
857 + return (NULL);
858 + }
859 + }
848 860
849 - num *= size;
850 - mp = malloc(num);
861 + mp = malloc(total);
851 862 if (mp == NULL)
852 863 return (NULL);
853 - (void) memset(mp, 0, num);
864 + (void) memset(mp, 0, total);
854 865 return (mp);
855 866 }
856 867
857 868
858 869 /*
859 870 * Mallopt - set options for allocation
860 871 *
861 872 * Mallopt provides for control over the allocation algorithm.
862 873 * The cmds available are:
863 874 *
864 875 * M_MXFAST Set maxfast to value. Maxfast is the size of the
865 876 * largest small, quickly allocated block. Maxfast
866 877 * may be set to 0 to disable fast allocation entirely.
867 878 *
868 879 * M_NLBLKS Set numlblks to value. Numlblks is the number of
869 880 * small blocks per holding block. Value must be
870 881 * greater than 0.
871 882 *
872 883 * M_GRAIN Set grain to value. The sizes of all blocks
873 884 * smaller than maxfast are considered to be rounded
874 885 * up to the nearest multiple of grain. The default
875 886 * value of grain is the smallest number of bytes
876 887 * which will allow alignment of any data type. Grain
877 888 * will be rounded up to a multiple of its default,
878 889 * and maxsize will be rounded up to a multiple of
879 890 * grain. Value must be greater than 0.
880 891 *
881 892 * M_KEEP Retain data in freed block until the next malloc,
882 893 * realloc, or calloc. Value is ignored.
883 894 * This option is provided only for compatibility with
884 895 * the old version of malloc, and is not recommended.
885 896 *
886 897 * returns - 0, upon successful completion
887 898 * 1, if malloc has previously been called or
888 899 * if value or cmd have illegal values
889 900 */
890 901
891 902 int
892 903 mallopt(int cmd, int value)
893 904 {
894 905 /* disallow changes once a small block is allocated */
895 906 (void) mutex_lock(&mlock);
896 907 if (change) {
897 908 (void) mutex_unlock(&mlock);
898 909 return (1);
899 910 }
900 911 switch (cmd) {
901 912 case M_MXFAST:
902 913 if (value < 0) {
903 914 (void) mutex_unlock(&mlock);
904 915 return (1);
905 916 }
906 917 fastct = (value + grain - 1) / grain;
907 918 maxfast = grain*fastct;
908 919 break;
909 920 case M_NLBLKS:
910 921 if (value <= 1) {
911 922 (void) mutex_unlock(&mlock);
912 923 return (1);
913 924 }
914 925 numlblks = value;
915 926 break;
916 927 case M_GRAIN:
917 928 if (value <= 0) {
918 929 (void) mutex_unlock(&mlock);
919 930 return (1);
920 931 }
921 932
922 933 /* round grain up to a multiple of ALIGNSZ */
923 934 grain = (value + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
924 935
925 936 /* reduce fastct appropriately */
926 937 fastct = (maxfast + grain - 1) / grain;
927 938 maxfast = grain * fastct;
928 939 break;
929 940 case M_KEEP:
930 941 if (change && holdhead != NULL) {
931 942 (void) mutex_unlock(&mlock);
932 943 return (1);
933 944 }
934 945 minhead = HEADSZ;
935 946 break;
936 947 default:
937 948 (void) mutex_unlock(&mlock);
938 949 return (1);
939 950 }
940 951 (void) mutex_unlock(&mlock);
941 952 return (0);
942 953 }
943 954
944 955 /*
945 956 * mallinfo-provide information about space usage
946 957 *
947 958 * input - max; mallinfo will return the size of the
948 959 * largest block < max.
949 960 *
950 961 * output - a structure containing a description of
951 962 * of space usage, defined in malloc.h
952 963 */
953 964
954 965 struct mallinfo
955 966 mallinfo(void)
956 967 {
957 968 struct header *blk, *next; /* ptr to ordinary blocks */
958 969 struct holdblk *hblk; /* ptr to holding blocks */
959 970 struct mallinfo inf; /* return value */
960 971 int i; /* the ubiquitous counter */
961 972 ssize_t size; /* size of a block */
962 973 ssize_t fsp; /* free space in 1 hold block */
963 974
964 975 (void) mutex_lock(&mlock);
965 976 (void) memset(&inf, 0, sizeof (struct mallinfo));
966 977 if (freeptr[0].nextfree == GROUND) {
967 978 (void) mutex_unlock(&mlock);
968 979 return (inf);
969 980 }
970 981 blk = CLRBUSY(arena[1].nextblk);
971 982 /* return total space used */
972 983 inf.arena = (char *)arenaend - (char *)blk;
973 984
974 985 /*
975 986 * loop through arena, counting # of blocks, and
976 987 * and space used by blocks
977 988 */
978 989 next = CLRBUSY(blk->nextblk);
979 990 while (next != &(arena[1])) {
980 991 inf.ordblks++;
981 992 size = (char *)next - (char *)blk;
982 993 if (TESTBUSY(blk->nextblk)) {
983 994 inf.uordblks += size;
984 995 inf.keepcost += HEADSZ-MINHEAD;
985 996 } else {
986 997 inf.fordblks += size;
987 998 }
988 999 blk = next;
989 1000 next = CLRBUSY(blk->nextblk);
990 1001 }
991 1002
992 1003 /*
993 1004 * if any holding block have been allocated
994 1005 * then examine space in holding blks
995 1006 */
996 1007 if (change && holdhead != NULL) {
997 1008 for (i = fastct; i > 0; i--) { /* loop thru ea. chain */
998 1009 hblk = holdhead[i];
999 1010 /* do only if chain not empty */
1000 1011 if (hblk != HGROUND) {
1001 1012 size = hblk->blksz +
1002 1013 sizeof (struct lblk) - sizeof (int);
1003 1014 do { /* loop thru 1 hold blk chain */
1004 1015 inf.hblks++;
1005 1016 fsp = freespace(hblk);
1006 1017 inf.fsmblks += fsp;
1007 1018 inf.usmblks += numlblks*size - fsp;
1008 1019 inf.smblks += numlblks;
1009 1020 hblk = hblk->nexthblk;
1010 1021 } while (hblk != holdhead[i]);
1011 1022 }
1012 1023 }
1013 1024 }
1014 1025 inf.hblkhd = (inf.smblks / numlblks) * sizeof (struct holdblk);
1015 1026 /* holding block were counted in ordblks, so subtract off */
1016 1027 inf.ordblks -= inf.hblks;
1017 1028 inf.uordblks -= inf.hblkhd + inf.usmblks + inf.fsmblks;
1018 1029 inf.keepcost -= inf.hblks*(HEADSZ - MINHEAD);
1019 1030 (void) mutex_unlock(&mlock);
1020 1031 return (inf);
1021 1032 }
1022 1033
1023 1034
1024 1035 /*
1025 1036 * freespace - calc. how much space is used in the free
1026 1037 * small blocks in a given holding block
1027 1038 *
1028 1039 * input - hblk = given holding block
1029 1040 *
1030 1041 * returns space used in free small blocks of hblk
1031 1042 */
1032 1043
1033 1044 static ssize_t
1034 1045 freespace(struct holdblk *holdblk)
1035 1046 {
1036 1047 struct lblk *lblk;
1037 1048 ssize_t space = 0;
1038 1049 ssize_t size;
1039 1050 struct lblk *unused;
1040 1051
1041 1052 lblk = CLRSMAL(holdblk->lfreeq);
1042 1053 size = holdblk->blksz + sizeof (struct lblk) - sizeof (int);
1043 1054 unused = CLRSMAL(holdblk->unused);
1044 1055 /* follow free chain */
1045 1056 while ((lblk != LGROUND) && (lblk != unused)) {
1046 1057 space += size;
1047 1058 lblk = CLRSMAL(lblk->header.nextfree);
1048 1059 }
1049 1060 space += ((char *)holdblk + HOLDSZ(size)) - (char *)unused;
1050 1061 return (space);
1051 1062 }
1052 1063
1053 1064 static void *
1054 1065 morecore(size_t bytes)
1055 1066 {
1056 1067 void * ret;
1057 1068
1058 1069 if (bytes > LONG_MAX) {
1059 1070 intptr_t wad;
1060 1071 /*
1061 1072 * The request size is too big. We need to do this in
1062 1073 * chunks. Sbrk only takes an int for an arg.
1063 1074 */
1064 1075 if (bytes == ULONG_MAX)
1065 1076 return ((void *)-1);
1066 1077
1067 1078 ret = sbrk(0);
1068 1079 wad = LONG_MAX;
1069 1080 while (wad > 0) {
1070 1081 if (sbrk(wad) == (void *)-1) {
1071 1082 if (ret != sbrk(0))
1072 1083 (void) sbrk(-LONG_MAX);
1073 1084 return ((void *)-1);
1074 1085 }
1075 1086 bytes -= LONG_MAX;
1076 1087 wad = bytes;
1077 1088 }
1078 1089 } else
1079 1090 ret = sbrk(bytes);
1080 1091
1081 1092 return (ret);
1082 1093 }
1083 1094
1084 1095 #ifdef debug
1085 1096 int
1086 1097 check_arena(void)
1087 1098 {
1088 1099 struct header *blk, *prev, *next; /* ptr to ordinary blocks */
1089 1100
1090 1101 (void) mutex_lock(&mlock);
1091 1102 if (freeptr[0].nextfree == GROUND) {
1092 1103 (void) mutex_unlock(&mlock);
1093 1104 return (-1);
1094 1105 }
1095 1106 blk = arena + 1;
1096 1107
1097 1108 /* loop through arena, checking */
1098 1109 blk = (struct header *)CLRALL(blk->nextblk);
1099 1110 next = (struct header *)CLRALL(blk->nextblk);
1100 1111 while (next != arena + 1) {
1101 1112 assert(blk >= arena + 1);
1102 1113 assert(blk <= lastblk);
1103 1114 assert(next >= blk + 1);
1104 1115 assert(((uintptr_t)((struct header *)blk->nextblk) &
1105 1116 (4 | SMAL)) == 0);
1106 1117
1107 1118 if (TESTBUSY(blk->nextblk) == 0) {
1108 1119 assert(blk->nextfree >= freeptr);
1109 1120 assert(blk->prevfree >= freeptr);
1110 1121 assert(blk->nextfree <= lastblk);
1111 1122 assert(blk->prevfree <= lastblk);
1112 1123 assert(((uintptr_t)((struct header *)blk->nextfree) &
1113 1124 7) == 0);
1114 1125 assert(((uintptr_t)((struct header *)blk->prevfree) &
1115 1126 7) == 0 || blk->prevfree == freeptr);
1116 1127 }
1117 1128 blk = next;
1118 1129 next = CLRBUSY(blk->nextblk);
1119 1130 }
1120 1131 (void) mutex_unlock(&mlock);
1121 1132 return (0);
1122 1133 }
1123 1134
1124 1135 #define RSTALLOC 1
1125 1136 #endif
1126 1137
1127 1138 #ifdef RSTALLOC
1128 1139 /*
1129 1140 * rstalloc - reset alloc routines
1130 1141 *
1131 1142 * description - return allocated memory and reset
1132 1143 * allocation pointers.
1133 1144 *
1134 1145 * Warning - This is for debugging purposes only.
1135 1146 * It will return all memory allocated after
1136 1147 * the first call to malloc, even if some
1137 1148 * of it was fetched by a user's sbrk().
1138 1149 */
1139 1150
1140 1151 void
1141 1152 rstalloc(void)
1142 1153 {
1143 1154 (void) mutex_lock(&mlock);
1144 1155 minhead = MINHEAD;
1145 1156 grain = ALIGNSZ;
1146 1157 numlblks = NUMLBLKS;
1147 1158 fastct = FASTCT;
1148 1159 maxfast = MAXFAST;
1149 1160 change = 0;
1150 1161 if (freeptr[0].nextfree == GROUND) {
1151 1162 (void) mutex_unlock(&mlock);
1152 1163 return;
1153 1164 }
1154 1165 brk(CLRBUSY(arena[1].nextblk));
1155 1166 freeptr[0].nextfree = GROUND;
1156 1167 #ifdef debug
1157 1168 case1count = 0;
1158 1169 #endif
1159 1170 (void) mutex_unlock(&mlock);
1160 1171 }
1161 1172 #endif /* RSTALLOC */
1162 1173
1163 1174 /*
1164 1175 * cfree is an undocumented, obsolete function
1165 1176 */
1166 1177
1167 1178 /* ARGSUSED1 */
1168 1179 void
1169 1180 cfree(void *p, size_t num, size_t size)
1170 1181 {
1171 1182 free(p);
1172 1183 }
1173 1184
1174 1185 static void
1175 1186 malloc_prepare()
1176 1187 {
1177 1188 (void) mutex_lock(&mlock);
1178 1189 }
1179 1190
1180 1191 static void
1181 1192 malloc_release()
1182 1193 {
1183 1194 (void) mutex_unlock(&mlock);
1184 1195 }
1185 1196
1186 1197 #pragma init(malloc_init)
1187 1198 static void
1188 1199 malloc_init(void)
1189 1200 {
1190 1201 (void) pthread_atfork(malloc_prepare, malloc_release, malloc_release);
1191 1202 }
↓ open down ↓ |
328 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX