|
1 // mem.c - memory management |
|
2 // |
|
3 // © Portions Copyright (c) Symbian Software Ltd 2007. All rights reserved. |
|
4 // |
|
5 /* |
|
6 * This file is part of zsh, the Z shell. |
|
7 * |
|
8 * Copyright (c) 1992-1997 Paul Falstad |
|
9 * All rights reserved. |
|
10 * |
|
11 * Permission is hereby granted, without written agreement and without |
|
12 * license or royalty fees, to use, copy, modify, and distribute this |
|
13 * software and to distribute modified versions of this software for any |
|
14 * purpose, provided that the above copyright notice and the following |
|
15 * two paragraphs appear in all copies of this software. |
|
16 * |
|
17 * In no event shall Paul Falstad or the Zsh Development Group be liable |
|
18 * to any party for direct, indirect, special, incidental, or consequential |
|
19 * damages arising out of the use of this software and its documentation, |
|
20 * even if Paul Falstad and the Zsh Development Group have been advised of |
|
21 * the possibility of such damage. |
|
22 * |
|
23 * Paul Falstad and the Zsh Development Group specifically disclaim any |
|
24 * warranties, including, but not limited to, the implied warranties of |
|
25 * merchantability and fitness for a particular purpose. The software |
|
26 * provided hereunder is on an "as is" basis, and Paul Falstad and the |
|
27 * Zsh Development Group have no obligation to provide maintenance, |
|
28 * support, updates, enhancements, or modifications. |
|
29 * |
|
30 */ |
|
31 |
|
32 #include "zsh.mdh" |
|
33 #include "mem.pro" |
|
34 |
|
35 /* |
|
36 There are two ways to allocate memory in zsh. The first way is |
|
37 to call zalloc/zshcalloc, which call malloc/calloc directly. It |
|
38 is legal to call realloc() or free() on memory allocated this way. |
|
39 The second way is to call zhalloc/hcalloc, which allocates memory |
|
40 from one of the memory pools on the heap stack. Such memory pools |
|
41 will automatically created when the heap allocation routines are |
|
42 called. To be sure that they are freed at appropriate times |
|
43 one should call pushheap() before one starts using heaps and |
|
44 popheap() after that (when the memory allocated on the heaps since |
|
45 the last pushheap() isn't needed anymore). |
|
46 pushheap() saves the states of all currently allocated heaps and |
|
47 popheap() resets them to the last state saved and destroys the |
|
48 information about that state. If you called pushheap() and |
|
49 allocated some memory on the heaps and then come to a place where |
|
50 you don't need the allocated memory anymore but you still want |
|
51 to allocate memory on the heap, you should call freeheap(). This |
|
52 works like popheap(), only that it doesn't free the information |
|
53 about the heap states (i.e. the heaps are like after the call to |
|
54 pushheap() and you have to call popheap some time later). |
|
55 |
|
56 Memory allocated in this way does not have to be freed explicitly; |
|
57 it will all be freed when the pool is destroyed. In fact, |
|
58 attempting to free this memory may result in a core dump. |
|
59 |
|
60 If possible, the heaps are allocated using mmap() so that the |
|
61 (*real*) heap isn't filled up with empty zsh heaps. If mmap() |
|
62 is not available and zsh's own allocator is used, we use a simple trick |
|
63 to avoid that: we allocate a large block of memory before allocating |
|
64 a heap pool, this memory is freed again immediately after the pool |
|
65 is allocated. If there are only small blocks on the free list this |
|
66 guarantees that the memory for the pool is at the end of the memory |
|
67 which means that we can give it back to the system when the pool is |
|
68 freed. |
|
69 |
|
70 hrealloc(char *p, size_t old, size_t new) is an optimisation |
|
71 with a similar interface to realloc(). Typically the new size |
|
72 will be larger than the old one, since there is no gain in |
|
73 shrinking the allocation (indeed, that will confused hrealloc() |
|
74 since it will forget that the unused space once belonged to this |
|
75 pointer). However, new == 0 is a special case; then if we |
|
76 had to allocate a special heap for this memory it is freed at |
|
77 that point. |
|
78 */ |
|
79 |
|
80 #if defined(HAVE_SYS_MMAN_H) && defined(HAVE_MMAP) && defined(HAVE_MUNMAP) |
|
81 |
|
82 #include <sys/mman.h> |
|
83 |
|
84 #if defined(MAP_ANONYMOUS) && defined(MAP_PRIVATE) |
|
85 |
|
86 #define USE_MMAP 1 |
|
87 #define MMAP_FLAGS (MAP_ANONYMOUS | MAP_PRIVATE) |
|
88 |
|
89 #endif |
|
90 #endif |
|
91 |
|
92 #ifdef __SYMBIAN32__ |
|
93 #ifdef __WINSCW__ |
|
94 #pragma warn_possunwant off |
|
95 #endif//__WINSCW__ |
|
96 #endif//__SYMBIAN32__ |
|
97 |
|
98 #ifdef ZSH_MEM_WARNING |
|
99 # ifndef DEBUG |
|
100 # define DEBUG 1 |
|
101 # endif |
|
102 #endif |
|
103 |
|
104 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG) |
|
105 |
|
106 static int h_m[1025], h_push, h_pop, h_free; |
|
107 |
|
108 #endif |
|
109 |
|
110 /* Make sure we align to the longest fundamental type. */ |
|
111 union mem_align { |
|
112 zlong l; |
|
113 double d; |
|
114 }; |
|
115 |
|
116 #define H_ISIZE sizeof(union mem_align) |
|
117 #define HEAPSIZE (16384 - H_ISIZE) |
|
118 /* Memory available for user data in default arena size */ |
|
119 #define HEAP_ARENA_SIZE (HEAPSIZE - sizeof(struct heap)) |
|
120 #define HEAPFREE (16384 - H_ISIZE) |
|
121 |
|
122 /* Memory available for user data in heap h */ |
|
123 #define ARENA_SIZEOF(h) ((h)->size - sizeof(struct heap)) |
|
124 |
|
125 /* list of zsh heaps */ |
|
126 |
|
127 static Heap heaps; |
|
128 |
|
129 /* a heap with free space, not always correct (it will be the last heap |
|
130 * if that was newly allocated but it may also be another one) */ |
|
131 |
|
132 static Heap fheap; |
|
133 |
|
134 /* Use new heaps from now on. This returns the old heap-list. */ |
|
135 |
|
136 /**/ |
|
137 mod_export Heap |
|
138 new_heaps(void) |
|
139 { |
|
140 Heap h; |
|
141 |
|
142 queue_signals(); |
|
143 h = heaps; |
|
144 |
|
145 fheap = heaps = NULL; |
|
146 unqueue_signals(); |
|
147 |
|
148 return h; |
|
149 } |
|
150 |
|
151 /* Re-install the old heaps again, freeing the new ones. */ |
|
152 |
|
153 /**/ |
|
154 mod_export void |
|
155 old_heaps(Heap old) |
|
156 { |
|
157 Heap h, n; |
|
158 |
|
159 queue_signals(); |
|
160 for (h = heaps; h; h = n) { |
|
161 n = h->next; |
|
162 DPUTS(h->sp, "BUG: old_heaps() with pushed heaps"); |
|
163 #ifdef USE_MMAP |
|
164 munmap((void *) h, sizeof(*h)); |
|
165 #else |
|
166 zfree(h, sizeof(*h)); |
|
167 #endif |
|
168 } |
|
169 heaps = old; |
|
170 fheap = NULL; |
|
171 unqueue_signals(); |
|
172 } |
|
173 |
|
174 /* Temporarily switch to other heaps (or back again). */ |
|
175 |
|
176 /**/ |
|
177 mod_export Heap |
|
178 switch_heaps(Heap new) |
|
179 { |
|
180 Heap h; |
|
181 |
|
182 queue_signals(); |
|
183 h = heaps; |
|
184 |
|
185 heaps = new; |
|
186 fheap = NULL; |
|
187 unqueue_signals(); |
|
188 |
|
189 return h; |
|
190 } |
|
191 |
|
192 /* save states of zsh heaps */ |
|
193 |
|
194 /**/ |
|
195 mod_export void |
|
196 pushheap(void) |
|
197 { |
|
198 Heap h; |
|
199 Heapstack hs; |
|
200 |
|
201 queue_signals(); |
|
202 |
|
203 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG) |
|
204 h_push++; |
|
205 #endif |
|
206 |
|
207 for (h = heaps; h; h = h->next) { |
|
208 DPUTS(!h->used, "BUG: empty heap"); |
|
209 hs = (Heapstack) zalloc(sizeof(*hs)); |
|
210 hs->next = h->sp; |
|
211 h->sp = hs; |
|
212 hs->used = h->used; |
|
213 } |
|
214 unqueue_signals(); |
|
215 } |
|
216 |
|
217 /* reset heaps to previous state */ |
|
218 |
|
219 /**/ |
|
220 mod_export void |
|
221 freeheap(void) |
|
222 { |
|
223 Heap h, hn, hl = NULL; |
|
224 |
|
225 queue_signals(); |
|
226 |
|
227 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG) |
|
228 h_free++; |
|
229 #endif |
|
230 |
|
231 fheap = NULL; |
|
232 for (h = heaps; h; h = hn) { |
|
233 hn = h->next; |
|
234 if (h->sp) { |
|
235 #ifdef ZSH_MEM_DEBUG |
|
236 memset(arena(h) + h->sp->used, 0xff, h->used - h->sp->used); |
|
237 #endif |
|
238 h->used = h->sp->used; |
|
239 if (!fheap && h->used < ARENA_SIZEOF(h)) |
|
240 fheap = h; |
|
241 hl = h; |
|
242 } else { |
|
243 #ifdef USE_MMAP |
|
244 munmap((void *) h, h->size); |
|
245 #else |
|
246 zfree(h, HEAPSIZE); |
|
247 #endif |
|
248 } |
|
249 } |
|
250 if (hl) |
|
251 hl->next = NULL; |
|
252 else |
|
253 heaps = NULL; |
|
254 |
|
255 unqueue_signals(); |
|
256 } |
|
257 |
|
258 /* reset heap to previous state and destroy state information */ |
|
259 |
|
260 /**/ |
|
261 mod_export void |
|
262 popheap(void) |
|
263 { |
|
264 Heap h, hn, hl = NULL; |
|
265 Heapstack hs; |
|
266 |
|
267 queue_signals(); |
|
268 |
|
269 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG) |
|
270 h_pop++; |
|
271 #endif |
|
272 |
|
273 fheap = NULL; |
|
274 for (h = heaps; h; h = hn) { |
|
275 hn = h->next; |
|
276 if ((hs = h->sp)) { |
|
277 h->sp = hs->next; |
|
278 #ifdef ZSH_MEM_DEBUG |
|
279 memset(arena(h) + hs->used, 0xff, h->used - hs->used); |
|
280 #endif |
|
281 h->used = hs->used; |
|
282 if (!fheap && h->used < ARENA_SIZEOF(h)) |
|
283 fheap = h; |
|
284 zfree(hs, sizeof(*hs)); |
|
285 |
|
286 hl = h; |
|
287 } else { |
|
288 #ifdef USE_MMAP |
|
289 munmap((void *) h, h->size); |
|
290 #else |
|
291 zfree(h, HEAPSIZE); |
|
292 #endif |
|
293 } |
|
294 } |
|
295 if (hl) |
|
296 hl->next = NULL; |
|
297 else |
|
298 heaps = NULL; |
|
299 |
|
300 unqueue_signals(); |
|
301 } |
|
302 |
|
303 #ifdef USE_MMAP |
|
304 /* |
|
305 * Utility function to allocate a heap area of at least *n bytes. |
|
306 * *n will be rounded up to the next page boundary. |
|
307 */ |
|
308 static Heap |
|
309 mmap_heap_alloc(size_t *n) |
|
310 { |
|
311 Heap h; |
|
312 static size_t pgsz = 0; |
|
313 |
|
314 if (!pgsz) { |
|
315 |
|
316 #ifdef _SC_PAGESIZE |
|
317 pgsz = sysconf(_SC_PAGESIZE); /* SVR4 */ |
|
318 #else |
|
319 # ifdef _SC_PAGE_SIZE |
|
320 pgsz = sysconf(_SC_PAGE_SIZE); /* HPUX */ |
|
321 # else |
|
322 pgsz = getpagesize(); |
|
323 # endif |
|
324 #endif |
|
325 |
|
326 pgsz--; |
|
327 } |
|
328 *n = (*n + pgsz) & ~pgsz; |
|
329 h = (Heap) mmap(NULL, *n, PROT_READ | PROT_WRITE, |
|
330 MMAP_FLAGS, -1, 0); |
|
331 if (h == ((Heap) -1)) { |
|
332 zerr("fatal error: out of heap memory", NULL, 0); |
|
333 exit(1); |
|
334 } |
|
335 |
|
336 return h; |
|
337 } |
|
338 #endif |
|
339 |
|
340 |
|
341 /* allocate memory from the current memory pool */ |
|
342 |
|
343 /**/ |
|
344 mod_export void * |
|
345 zhalloc(size_t size) |
|
346 { |
|
347 Heap h; |
|
348 size_t n; |
|
349 |
|
350 size = (size + H_ISIZE - 1) & ~(H_ISIZE - 1); |
|
351 |
|
352 queue_signals(); |
|
353 |
|
354 #if defined(ZSH_MEM) && defined(ZSH_MEM_DEBUG) |
|
355 h_m[size < (1024 * H_ISIZE) ? (size / H_ISIZE) : 1024]++; |
|
356 #endif |
|
357 |
|
358 /* find a heap with enough free space */ |
|
359 |
|
360 for (h = ((fheap && ARENA_SIZEOF(fheap) >= (size + fheap->used)) |
|
361 ? fheap : heaps); |
|
362 h; h = h->next) { |
|
363 if (ARENA_SIZEOF(h) >= (n = size + h->used)) { |
|
364 void *ret; |
|
365 |
|
366 h->used = n; |
|
367 ret = arena(h) + n - size; |
|
368 unqueue_signals(); |
|
369 return ret; |
|
370 } |
|
371 } |
|
372 { |
|
373 Heap hp; |
|
374 /* not found, allocate new heap */ |
|
375 #if defined(ZSH_MEM) && !defined(USE_MMAP) |
|
376 static int called = 0; |
|
377 void *foo = called ? (void *)malloc(HEAPFREE) : NULL; |
|
378 /* tricky, see above */ |
|
379 #endif |
|
380 |
|
381 n = HEAP_ARENA_SIZE > size ? HEAPSIZE : size + sizeof(*h); |
|
382 for (hp = NULL, h = heaps; h; hp = h, h = h->next); |
|
383 |
|
384 #ifdef USE_MMAP |
|
385 h = mmap_heap_alloc(&n); |
|
386 #else |
|
387 h = (Heap) zalloc(n); |
|
388 #endif |
|
389 |
|
390 #if defined(ZSH_MEM) && !defined(USE_MMAP) |
|
391 if (called) |
|
392 zfree(foo, HEAPFREE); |
|
393 called = 1; |
|
394 #endif |
|
395 |
|
396 h->size = n; |
|
397 h->used = size; |
|
398 h->next = NULL; |
|
399 h->sp = NULL; |
|
400 |
|
401 if (hp) |
|
402 hp->next = h; |
|
403 else |
|
404 heaps = h; |
|
405 fheap = h; |
|
406 |
|
407 unqueue_signals(); |
|
408 return arena(h); |
|
409 } |
|
410 } |
|
411 |
|
412 /**/ |
|
413 mod_export void * |
|
414 hrealloc(char *p, size_t old, size_t new) |
|
415 { |
|
416 Heap h, ph; |
|
417 |
|
418 old = (old + H_ISIZE - 1) & ~(H_ISIZE - 1); |
|
419 new = (new + H_ISIZE - 1) & ~(H_ISIZE - 1); |
|
420 |
|
421 if (old == new) |
|
422 return p; |
|
423 if (!old && !p) |
|
424 return zhalloc(new); |
|
425 |
|
426 /* find the heap with p */ |
|
427 |
|
428 queue_signals(); |
|
429 for (h = heaps, ph = NULL; h; ph = h, h = h->next) |
|
430 if (p >= arena(h) && p < arena(h) + ARENA_SIZEOF(h)) |
|
431 break; |
|
432 |
|
433 DPUTS(!h, "BUG: hrealloc() called for non-heap memory."); |
|
434 DPUTS(h->sp && arena(h) + h->sp->used > p, |
|
435 "BUG: hrealloc() wants to realloc pushed memory"); |
|
436 |
|
437 /* |
|
438 * If the end of the old chunk is before the used pointer, |
|
439 * more memory has been zhalloc'ed afterwards. |
|
440 * We can't tell if that's still in use, obviously, since |
|
441 * that's the whole point of heap memory. |
|
442 * We have no choice other than to grab some more memory |
|
443 * somewhere else and copy in the old stuff. |
|
444 */ |
|
445 if (p + old < arena(h) + h->used) { |
|
446 if (new > old) { |
|
447 char *ptr = (char *) zhalloc(new); |
|
448 memcpy(ptr, p, old); |
|
449 #ifdef ZSH_MEM_DEBUG |
|
450 memset(p, 0xff, old); |
|
451 #endif |
|
452 unqueue_signals(); |
|
453 return ptr; |
|
454 } else { |
|
455 unqueue_signals(); |
|
456 return new ? p : NULL; |
|
457 } |
|
458 } |
|
459 |
|
460 DPUTS(p + old != arena(h) + h->used, "BUG: hrealloc more than allocated"); |
|
461 |
|
462 /* |
|
463 * We now know there's nothing afterwards in the heap, now see if |
|
464 * there's nothing before. Then we can reallocate the whole thing. |
|
465 * Otherwise, we need to keep the stuff at the start of the heap, |
|
466 * then allocate a new one too; this is handled below. (This will |
|
467 * guarantee we occupy a full heap next time round, provided we |
|
468 * don't use the heap for anything else.) |
|
469 */ |
|
470 if (p == arena(h)) { |
|
471 /* |
|
472 * Zero new seems to be a special case saying we've finished |
|
473 * with the specially reallocated memory, see scanner() in glob.c. |
|
474 */ |
|
475 if (!new) { |
|
476 if (ph) |
|
477 ph->next = h->next; |
|
478 else |
|
479 heaps = h->next; |
|
480 fheap = NULL; |
|
481 #ifdef USE_MMAP |
|
482 munmap((void *) h, h->size); |
|
483 #else |
|
484 zfree(h, HEAPSIZE); |
|
485 #endif |
|
486 unqueue_signals(); |
|
487 return NULL; |
|
488 } |
|
489 if (new > ARENA_SIZEOF(h)) { |
|
490 /* |
|
491 * Not enough memory in this heap. Allocate a new |
|
492 * one of sufficient size. |
|
493 * |
|
494 * To avoid this happening too often, allocate |
|
495 * chunks in multiples of HEAPSIZE. |
|
496 * (Historical note: there didn't used to be any |
|
497 * point in this since we didn't consistently record |
|
498 * the allocated size of the heap, but now we do.) |
|
499 */ |
|
500 size_t n = (new + sizeof(*h) + HEAPSIZE); |
|
501 n -= n % HEAPSIZE; |
|
502 fheap = NULL; |
|
503 |
|
504 #ifdef USE_MMAP |
|
505 { |
|
506 /* |
|
507 * I don't know any easy portable way of requesting |
|
508 * a mmap'd segment be extended, so simply allocate |
|
509 * a new one and copy. |
|
510 */ |
|
511 Heap hnew; |
|
512 |
|
513 hnew = mmap_heap_alloc(&n); |
|
514 /* Copy the entire heap, header (with next pointer) included */ |
|
515 memcpy(hnew, h, h->size); |
|
516 munmap((void *)h, h->size); |
|
517 h = hnew; |
|
518 } |
|
519 #else |
|
520 h = (Heap) realloc(h, n); |
|
521 #endif |
|
522 |
|
523 h->size = n; |
|
524 if (ph) |
|
525 ph->next = h; |
|
526 else |
|
527 heaps = h; |
|
528 } |
|
529 h->used = new; |
|
530 unqueue_signals(); |
|
531 return arena(h); |
|
532 } |
|
533 #ifndef USE_MMAP |
|
534 DPUTS(h->used > ARENA_SIZEOF(h), "BUG: hrealloc at invalid address"); |
|
535 #endif |
|
536 if (h->used + (new - old) <= ARENA_SIZEOF(h)) { |
|
537 h->used += new - old; |
|
538 unqueue_signals(); |
|
539 return p; |
|
540 } else { |
|
541 char *t = zhalloc(new); |
|
542 memcpy(t, p, old > new ? new : old); |
|
543 h->used -= old; |
|
544 #ifdef ZSH_MEM_DEBUG |
|
545 memset(p, 0xff, old); |
|
546 #endif |
|
547 unqueue_signals(); |
|
548 return t; |
|
549 } |
|
550 } |
|
551 |
|
552 /* allocate memory from the current memory pool and clear it */ |
|
553 |
|
554 /**/ |
|
555 mod_export void * |
|
556 hcalloc(size_t size) |
|
557 { |
|
558 void *ptr; |
|
559 |
|
560 ptr = zhalloc(size); |
|
561 memset(ptr, 0, size); |
|
562 return ptr; |
|
563 } |
|
564 |
|
565 /* allocate permanent memory */ |
|
566 |
|
567 /**/ |
|
568 mod_export void * |
|
569 zalloc(size_t size) |
|
570 { |
|
571 void *ptr; |
|
572 |
|
573 if (!size) |
|
574 size = 1; |
|
575 queue_signals(); |
|
576 if (!(ptr = (void *) malloc(size))) { |
|
577 zerr("fatal error: out of memory", NULL, 0); |
|
578 exit(1); |
|
579 } |
|
580 unqueue_signals(); |
|
581 |
|
582 return ptr; |
|
583 } |
|
584 |
|
585 /**/ |
|
586 mod_export void * |
|
587 zshcalloc(size_t size) |
|
588 { |
|
589 void *ptr; |
|
590 |
|
591 if (!size) |
|
592 size = 1; |
|
593 queue_signals(); |
|
594 if (!(ptr = (void *) malloc(size))) { |
|
595 zerr("fatal error: out of memory", NULL, 0); |
|
596 exit(1); |
|
597 } |
|
598 unqueue_signals(); |
|
599 memset(ptr, 0, size); |
|
600 |
|
601 return ptr; |
|
602 } |
|
603 |
|
604 /* This front-end to realloc is used to make sure we have a realloc * |
|
605 * that conforms to POSIX realloc. Older realloc's can fail if * |
|
606 * passed a NULL pointer, but POSIX realloc should handle this. A * |
|
607 * better solution would be for configure to check if realloc is * |
|
608 * POSIX compliant, but I'm not sure how to do that. */ |
|
609 |
|
610 /**/ |
|
611 mod_export void * |
|
612 zrealloc(void *ptr, size_t size) |
|
613 { |
|
614 queue_signals(); |
|
615 if (ptr) { |
|
616 if (size) { |
|
617 /* Do normal realloc */ |
|
618 if (!(ptr = (void *) realloc(ptr, size))) { |
|
619 zerr("fatal error: out of memory", NULL, 0); |
|
620 exit(1); |
|
621 } |
|
622 unqueue_signals(); |
|
623 return ptr; |
|
624 } |
|
625 else |
|
626 /* If ptr is not NULL, but size is zero, * |
|
627 * then object pointed to is freed. */ |
|
628 free(ptr); |
|
629 |
|
630 ptr = NULL; |
|
631 } else { |
|
632 /* If ptr is NULL, then behave like malloc */ |
|
633 ptr = malloc(size); |
|
634 } |
|
635 unqueue_signals(); |
|
636 |
|
637 return ptr; |
|
638 } |
|
639 |
|
640 /**/ |
|
641 #ifdef ZSH_MEM |
|
642 |
|
643 /* |
|
644 Below is a simple segment oriented memory allocator for systems on |
|
645 which it is better than the system's one. Memory is given in blocks |
|
646 aligned to an integer multiple of sizeof(union mem_align), which will |
|
647 probably be 64-bit as it is the longer of zlong or double. Each block is |
|
648 preceded by a header which contains the length of the data part (in |
|
649 bytes). In allocated blocks only this field of the structure m_hdr is |
|
650 senseful. In free blocks the second field (next) is a pointer to the next |
|
651 free segment on the free list. |
|
652 |
|
653 On top of this simple allocator there is a second allocator for small |
|
654 chunks of data. It should be both faster and less space-consuming than |
|
655 using the normal segment mechanism for such blocks. |
|
656 For the first M_NSMALL-1 possible sizes memory is allocated in arrays |
|
657 that can hold M_SNUM blocks. Each array is stored in one segment of the |
|
658 main allocator. In these segments the third field of the header structure |
|
659 (free) contains a pointer to the first free block in the array. The |
|
660 last field (used) gives the number of already used blocks in the array. |
|
661 |
|
662 If the macro name ZSH_MEM_DEBUG is defined, some information about the memory |
|
663 usage is stored. This information can than be viewed by calling the |
|
664 builtin `mem' (which is only available if ZSH_MEM_DEBUG is set). |
|
665 |
|
666 If ZSH_MEM_WARNING is defined, error messages are printed in case of errors. |
|
667 |
|
668 If ZSH_SECURE_FREE is defined, free() checks if the given address is really |
|
669 one that was returned by malloc(), it ignores it if it wasn't (printing |
|
670 an error message if ZSH_MEM_WARNING is also defined). |
|
671 */ |
|
672 #if !defined(__hpux) && !defined(DGUX) && !defined(__osf__) |
|
673 # if defined(_BSD) |
|
674 # ifndef HAVE_BRK_PROTO |
|
675 extern int brk _((caddr_t)); |
|
676 # endif |
|
677 # ifndef HAVE_SBRK_PROTO |
|
678 extern caddr_t sbrk _((int)); |
|
679 # endif |
|
680 # else |
|
681 # ifndef HAVE_BRK_PROTO |
|
682 extern int brk _((void *)); |
|
683 # endif |
|
684 # ifndef HAVE_SBRK_PROTO |
|
685 extern void *sbrk _((int)); |
|
686 # endif |
|
687 # endif |
|
688 #endif |
|
689 |
|
690 #if defined(_BSD) && !defined(STDC_HEADERS) |
|
691 # define FREE_RET_T int |
|
692 # define FREE_ARG_T char * |
|
693 # define FREE_DO_RET |
|
694 # define MALLOC_RET_T char * |
|
695 # define MALLOC_ARG_T size_t |
|
696 #else |
|
697 # define FREE_RET_T void |
|
698 # define FREE_ARG_T void * |
|
699 # define MALLOC_RET_T void * |
|
700 # define MALLOC_ARG_T size_t |
|
701 #endif |
|
702 |
|
703 /* structure for building free list in blocks holding small blocks */ |
|
704 |
|
705 struct m_shdr { |
|
706 struct m_shdr *next; /* next one on free list */ |
|
707 #ifdef PAD_64_BIT |
|
708 /* dummy to make this 64-bit aligned */ |
|
709 struct m_shdr *dummy; |
|
710 #endif |
|
711 }; |
|
712 |
|
713 struct m_hdr { |
|
714 zlong len; /* length of memory block */ |
|
715 #if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE) |
|
716 /* either 1 or 2 zlong's, whichever makes up 64 bits. */ |
|
717 zlong dummy1; |
|
718 #endif |
|
719 struct m_hdr *next; /* if free: next on free list |
|
720 if block of small blocks: next one with |
|
721 small blocks of same size*/ |
|
722 struct m_shdr *free; /* if block of small blocks: free list */ |
|
723 zlong used; /* if block of small blocks: number of used |
|
724 blocks */ |
|
725 #if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE) |
|
726 zlong dummy2; |
|
727 #endif |
|
728 }; |
|
729 |
|
730 |
|
731 /* alignment for memory blocks */ |
|
732 |
|
733 #define M_ALIGN (sizeof(union mem_align)) |
|
734 |
|
735 /* length of memory header, length of first field of memory header and |
|
736 minimal size of a block left free (if we allocate memory and take a |
|
737 block from the free list that is larger than needed, it must have at |
|
738 least M_MIN extra bytes to be splitted; if it has, the rest is put on |
|
739 the free list) */ |
|
740 |
|
741 #define M_HSIZE (sizeof(struct m_hdr)) |
|
742 #if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE) |
|
743 # define M_ISIZE (2*sizeof(zlong)) |
|
744 #else |
|
745 # define M_ISIZE (sizeof(zlong)) |
|
746 #endif |
|
747 #define M_MIN (2 * M_ISIZE) |
|
748 |
|
749 /* M_FREE is the number of bytes that have to be free before memory is |
|
750 * given back to the system |
|
751 * M_KEEP is the number of bytes that will be kept when memory is given |
|
752 * back; note that this has to be less than M_FREE |
|
753 * M_ALLOC is the number of extra bytes to request from the system */ |
|
754 |
|
755 #define M_FREE 32768 |
|
756 #define M_KEEP 16384 |
|
757 #define M_ALLOC M_KEEP |
|
758 |
|
759 /* a pointer to the last free block, a pointer to the free list (the blocks |
|
760 on this list are kept in order - lowest address first) */ |
|
761 |
|
762 static struct m_hdr *m_lfree, *m_free; |
|
763 |
|
764 /* system's pagesize */ |
|
765 |
|
766 static long m_pgsz = 0; |
|
767 |
|
768 /* the highest and the lowest valid memory addresses, kept for fast validity |
|
769 checks in free() and to find out if and when we can give memory back to |
|
770 the system */ |
|
771 |
|
772 static char *m_high, *m_low; |
|
773 |
|
774 /* Management of blocks for small blocks: |
|
775 Such blocks are kept in lists (one list for each of the sizes that are |
|
776 allocated in such blocks). The lists are stored in the m_small array. |
|
777 M_SIDX() calculates the index into this array for a given size. M_SNUM |
|
778 is the size (in small blocks) of such blocks. M_SLEN() calculates the |
|
779 size of the small blocks held in a memory block, given a pointer to the |
|
780 header of it. M_SBLEN() gives the size of a memory block that can hold |
|
781 an array of small blocks, given the size of these small blocks. M_BSLEN() |
|
782 calculates the size of the small blocks held in a memory block, given the |
|
783 length of that block (including the header of the memory block. M_NSMALL |
|
784 is the number of possible block sizes that small blocks should be used |
|
785 for. */ |
|
786 |
|
787 |
|
788 #define M_SIDX(S) ((S) / M_ISIZE) |
|
789 #define M_SNUM 128 |
|
790 #define M_SLEN(M) ((M)->len / M_SNUM) |
|
791 #if defined(PAD_64_BIT) && !defined(ZSH_64_BIT_TYPE) |
|
792 /* Include the dummy in the alignment */ |
|
793 #define M_SBLEN(S) ((S) * M_SNUM + sizeof(struct m_shdr *) + \ |
|
794 2*sizeof(zlong) + sizeof(struct m_hdr *)) |
|
795 #define M_BSLEN(S) (((S) - sizeof(struct m_shdr *) - \ |
|
796 2*sizeof(zlong) - sizeof(struct m_hdr *)) / M_SNUM) |
|
797 #else |
|
798 #define M_SBLEN(S) ((S) * M_SNUM + sizeof(struct m_shdr *) + \ |
|
799 sizeof(zlong) + sizeof(struct m_hdr *)) |
|
800 #define M_BSLEN(S) (((S) - sizeof(struct m_shdr *) - \ |
|
801 sizeof(zlong) - sizeof(struct m_hdr *)) / M_SNUM) |
|
802 #endif |
|
803 #define M_NSMALL 8 |
|
804 |
|
805 static struct m_hdr *m_small[M_NSMALL]; |
|
806 |
|
807 #ifdef ZSH_MEM_DEBUG |
|
808 |
|
809 static int m_s = 0, m_b = 0; |
|
810 static int m_m[1025], m_f[1025]; |
|
811 |
|
812 static struct m_hdr *m_l; |
|
813 |
|
814 #endif /* ZSH_MEM_DEBUG */ |
|
815 |
|
816 MALLOC_RET_T |
|
817 malloc(MALLOC_ARG_T size) |
|
818 { |
|
819 struct m_hdr *m, *mp, *mt; |
|
820 long n, s, os = 0; |
|
821 #ifndef USE_MMAP |
|
822 struct heap *h, *hp, *hf = NULL, *hfp = NULL; |
|
823 #endif |
|
824 |
|
825 /* some systems want malloc to return the highest valid address plus one |
|
826 if it is called with an argument of zero */ |
|
827 |
|
828 if (!size) |
|
829 return (MALLOC_RET_T) m_high; |
|
830 |
|
831 queue_signals(); /* just queue signals rather than handling them */ |
|
832 |
|
833 /* first call, get page size */ |
|
834 |
|
835 if (!m_pgsz) { |
|
836 |
|
837 #ifdef _SC_PAGESIZE |
|
838 m_pgsz = sysconf(_SC_PAGESIZE); /* SVR4 */ |
|
839 #else |
|
840 # ifdef _SC_PAGE_SIZE |
|
841 m_pgsz = sysconf(_SC_PAGE_SIZE); /* HPUX */ |
|
842 # else |
|
843 m_pgsz = getpagesize(); |
|
844 # endif |
|
845 #endif |
|
846 |
|
847 m_free = m_lfree = NULL; |
|
848 } |
|
849 size = (size + M_ALIGN - 1) & ~(M_ALIGN - 1); |
|
850 |
|
851 /* Do we need a small block? */ |
|
852 |
|
853 if ((s = M_SIDX(size)) && s < M_NSMALL) { |
|
854 /* yep, find a memory block with free small blocks of the |
|
855 appropriate size (if we find it in this list, this means that |
|
856 it has room for at least one more small block) */ |
|
857 for (mp = NULL, m = m_small[s]; m && !m->free; mp = m, m = m->next); |
|
858 |
|
859 if (m) { |
|
860 /* we found one */ |
|
861 struct m_shdr *sh = m->free; |
|
862 |
|
863 m->free = sh->next; |
|
864 m->used++; |
|
865 |
|
866 /* if all small blocks in this block are allocated, the block is |
|
867 put at the end of the list blocks with small blocks of this |
|
868 size (i.e., we try to keep blocks with free blocks at the |
|
869 beginning of the list, to make the search faster) */ |
|
870 |
|
871 if (m->used == M_SNUM && m->next) { |
|
872 for (mt = m; mt->next; mt = mt->next); |
|
873 |
|
874 mt->next = m; |
|
875 if (mp) |
|
876 mp->next = m->next; |
|
877 else |
|
878 m_small[s] = m->next; |
|
879 m->next = NULL; |
|
880 } |
|
881 #ifdef ZSH_MEM_DEBUG |
|
882 m_m[size / M_ISIZE]++; |
|
883 #endif |
|
884 |
|
885 unqueue_signals(); |
|
886 return (MALLOC_RET_T) sh; |
|
887 } |
|
888 /* we still want a small block but there were no block with a free |
|
889 small block of the requested size; so we use the real allocation |
|
890 routine to allocate a block for small blocks of this size */ |
|
891 os = size; |
|
892 size = M_SBLEN(size); |
|
893 } else |
|
894 s = 0; |
|
895 |
|
896 /* search the free list for an block of at least the requested size */ |
|
897 for (mp = NULL, m = m_free; m && m->len < size; mp = m, m = m->next); |
|
898 |
|
899 #ifndef USE_MMAP |
|
900 |
|
901 /* if there is an empty zsh heap at a lower address we steal it and take |
|
902 the memory from it, putting the rest on the free list (remember |
|
903 that the blocks on the free list are ordered) */ |
|
904 |
|
905 for (hp = NULL, h = heaps; h; hp = h, h = h->next) |
|
906 if (!h->used && |
|
907 (!hf || h < hf) && |
|
908 (!m || ((char *)m) > ((char *)h))) |
|
909 hf = h, hfp = hp; |
|
910 |
|
911 if (hf) { |
|
912 /* we found such a heap */ |
|
913 Heapstack hso, hsn; |
|
914 |
|
915 /* delete structures on the list holding the heap states */ |
|
916 for (hso = hf->sp; hso; hso = hsn) { |
|
917 hsn = hso->next; |
|
918 zfree(hso, sizeof(*hso)); |
|
919 } |
|
920 /* take it from the list of heaps */ |
|
921 if (hfp) |
|
922 hfp->next = hf->next; |
|
923 else |
|
924 heaps = hf->next; |
|
925 /* now we simply free it and than search the free list again */ |
|
926 zfree(hf, HEAPSIZE); |
|
927 |
|
928 for (mp = NULL, m = m_free; m && m->len < size; mp = m, m = m->next); |
|
929 } |
|
930 #endif |
|
931 if (!m) { |
|
932 long nal; |
|
933 /* no matching free block was found, we have to request new |
|
934 memory from the system */ |
|
935 n = (size + M_HSIZE + M_ALLOC + m_pgsz - 1) & ~(m_pgsz - 1); |
|
936 |
|
937 if (((char *)(m = (struct m_hdr *)sbrk(n))) == ((char *)-1)) { |
|
938 DPUTS(1, "MEM: allocation error at sbrk."); |
|
939 unqueue_signals(); |
|
940 return NULL; |
|
941 } |
|
942 if ((nal = ((long)(char *)m) & (M_ALIGN-1))) { |
|
943 if ((char *)sbrk(M_ALIGN - nal) == (char *)-1) { |
|
944 DPUTS(1, "MEM: allocation error at sbrk."); |
|
945 unqueue_signals(); |
|
946 return NULL; |
|
947 } |
|
948 m = (struct m_hdr *) ((char *)m + (M_ALIGN - nal)); |
|
949 } |
|
950 /* set m_low, for the check in free() */ |
|
951 if (!m_low) |
|
952 m_low = (char *)m; |
|
953 |
|
954 #ifdef ZSH_MEM_DEBUG |
|
955 m_s += n; |
|
956 |
|
957 if (!m_l) |
|
958 m_l = m; |
|
959 #endif |
|
960 |
|
961 /* save new highest address */ |
|
962 m_high = ((char *)m) + n; |
|
963 |
|
964 /* initialize header */ |
|
965 m->len = n - M_ISIZE; |
|
966 m->next = NULL; |
|
967 |
|
968 /* put it on the free list and set m_lfree pointing to it */ |
|
969 if ((mp = m_lfree)) |
|
970 m_lfree->next = m; |
|
971 m_lfree = m; |
|
972 } |
|
973 if ((n = m->len - size) > M_MIN) { |
|
974 /* the block we want to use has more than M_MIN bytes plus the |
|
975 number of bytes that were requested; we split it in two and |
|
976 leave the rest on the free list */ |
|
977 struct m_hdr *mtt = (struct m_hdr *)(((char *)m) + M_ISIZE + size); |
|
978 |
|
979 mtt->len = n - M_ISIZE; |
|
980 mtt->next = m->next; |
|
981 |
|
982 m->len = size; |
|
983 |
|
984 /* put the rest on the list */ |
|
985 if (m_lfree == m) |
|
986 m_lfree = mtt; |
|
987 |
|
988 if (mp) |
|
989 mp->next = mtt; |
|
990 else |
|
991 m_free = mtt; |
|
992 } else if (mp) { |
|
993 /* the block we found wasn't the first one on the free list */ |
|
994 if (m == m_lfree) |
|
995 m_lfree = mp; |
|
996 mp->next = m->next; |
|
997 } else { |
|
998 /* it was the first one */ |
|
999 m_free = m->next; |
|
1000 if (m == m_lfree) |
|
1001 m_lfree = m_free; |
|
1002 } |
|
1003 |
|
1004 if (s) { |
|
1005 /* we are allocating a block that should hold small blocks */ |
|
1006 struct m_shdr *sh, *shn; |
|
1007 |
|
1008 /* build the free list in this block and set `used' filed */ |
|
1009 m->free = sh = (struct m_shdr *)(((char *)m) + |
|
1010 sizeof(struct m_hdr) + os); |
|
1011 |
|
1012 for (n = M_SNUM - 2; n--; sh = shn) |
|
1013 shn = sh->next = sh + s; |
|
1014 sh->next = NULL; |
|
1015 |
|
1016 m->used = 1; |
|
1017 |
|
1018 /* put the block on the list of blocks holding small blocks if |
|
1019 this size */ |
|
1020 m->next = m_small[s]; |
|
1021 m_small[s] = m; |
|
1022 |
|
1023 #ifdef ZSH_MEM_DEBUG |
|
1024 m_m[os / M_ISIZE]++; |
|
1025 #endif |
|
1026 |
|
1027 unqueue_signals(); |
|
1028 return (MALLOC_RET_T) (((char *)m) + sizeof(struct m_hdr)); |
|
1029 } |
|
1030 #ifdef ZSH_MEM_DEBUG |
|
1031 m_m[m->len < (1024 * M_ISIZE) ? (m->len / M_ISIZE) : 1024]++; |
|
1032 #endif |
|
1033 |
|
1034 unqueue_signals(); |
|
1035 return (MALLOC_RET_T) & m->next; |
|
1036 } |
|
1037 |
|
1038 /* this is an internal free(); the second argument may, but need not hold |
|
1039 the size of the block the first argument is pointing to; if it is the |
|
1040 right size of this block, freeing it will be faster, though; the value |
|
1041 0 for this parameter means: `don't know' */ |
|
1042 |
|
1043 /**/ |
|
1044 mod_export void |
|
1045 zfree(void *p, int sz) |
|
1046 { |
|
1047 struct m_hdr *m = (struct m_hdr *)(((char *)p) - M_ISIZE), *mp, *mt = NULL; |
|
1048 int i; |
|
1049 # ifdef DEBUG |
|
1050 int osz = sz; |
|
1051 # endif |
|
1052 |
|
1053 #ifdef ZSH_SECURE_FREE |
|
1054 sz = 0; |
|
1055 #else |
|
1056 sz = (sz + M_ALIGN - 1) & ~(M_ALIGN - 1); |
|
1057 #endif |
|
1058 |
|
1059 if (!p) |
|
1060 return; |
|
1061 |
|
1062 /* first a simple check if the given address is valid */ |
|
1063 if (((char *)p) < m_low || ((char *)p) > m_high || |
|
1064 ((long)p) & (M_ALIGN - 1)) { |
|
1065 DPUTS(1, "BUG: attempt to free storage at invalid address"); |
|
1066 return; |
|
1067 } |
|
1068 |
|
1069 queue_signals(); |
|
1070 |
|
1071 fr_rec: |
|
1072 |
|
1073 if ((i = sz / M_ISIZE) < M_NSMALL || !sz) |
|
1074 /* if the given sizes says that it is a small block, find the |
|
1075 memory block holding it; we search all blocks with blocks |
|
1076 of at least the given size; if the size parameter is zero, |
|
1077 this means, that all blocks are searched */ |
|
1078 for (; i < M_NSMALL; i++) { |
|
1079 for (mp = NULL, mt = m_small[i]; |
|
1080 mt && (((char *)mt) > ((char *)p) || |
|
1081 (((char *)mt) + mt->len) < ((char *)p)); |
|
1082 mp = mt, mt = mt->next); |
|
1083 |
|
1084 if (mt) { |
|
1085 /* we found the block holding the small block */ |
|
1086 struct m_shdr *sh = (struct m_shdr *)p; |
|
1087 |
|
1088 #ifdef ZSH_SECURE_FREE |
|
1089 struct m_shdr *sh2; |
|
1090 |
|
1091 /* check if the given address is equal to the address of |
|
1092 the first small block plus an integer multiple of the |
|
1093 block size */ |
|
1094 if ((((char *)p) - (((char *)mt) + sizeof(struct m_hdr))) % |
|
1095 M_BSLEN(mt->len)) { |
|
1096 |
|
1097 DPUTS(1, "BUG: attempt to free storage at invalid address"); |
|
1098 unqueue_signals(); |
|
1099 return; |
|
1100 } |
|
1101 /* check, if the address is on the (block-intern) free list */ |
|
1102 for (sh2 = mt->free; sh2; sh2 = sh2->next) |
|
1103 if (((char *)p) == ((char *)sh2)) { |
|
1104 |
|
1105 DPUTS(1, "BUG: attempt to free already free storage"); |
|
1106 unqueue_signals(); |
|
1107 return; |
|
1108 } |
|
1109 #endif |
|
1110 DPUTS(M_BSLEN(mt->len) < osz, |
|
1111 "BUG: attempt to free more than allocated."); |
|
1112 |
|
1113 #ifdef ZSH_MEM_DEBUG |
|
1114 m_f[M_BSLEN(mt->len) / M_ISIZE]++; |
|
1115 memset(sh, 0xff, M_BSLEN(mt->len)); |
|
1116 #endif |
|
1117 |
|
1118 /* put the block onto the free list */ |
|
1119 sh->next = mt->free; |
|
1120 mt->free = sh; |
|
1121 |
|
1122 if (--mt->used) { |
|
1123 /* if there are still used blocks in this block, we |
|
1124 put it at the beginning of the list with blocks |
|
1125 holding small blocks of the same size (since we |
|
1126 know that there is at least one free block in it, |
|
1127 this will make allocation of small blocks faster; |
|
1128 it also guarantees that long living memory blocks |
|
1129 are preferred over younger ones */ |
|
1130 if (mp) { |
|
1131 mp->next = mt->next; |
|
1132 mt->next = m_small[i]; |
|
1133 m_small[i] = mt; |
|
1134 } |
|
1135 unqueue_signals(); |
|
1136 return; |
|
1137 } |
|
1138 /* if there are no more used small blocks in this |
|
1139 block, we free the whole block */ |
|
1140 if (mp) |
|
1141 mp->next = mt->next; |
|
1142 else |
|
1143 m_small[i] = mt->next; |
|
1144 |
|
1145 m = mt; |
|
1146 p = (void *) & m->next; |
|
1147 |
|
1148 break; |
|
1149 } else if (sz) { |
|
1150 /* if we didn't find a block and a size was given, try it |
|
1151 again as if no size were given */ |
|
1152 sz = 0; |
|
1153 goto fr_rec; |
|
1154 } |
|
1155 } |
|
1156 #ifdef ZSH_MEM_DEBUG |
|
1157 if (!mt) |
|
1158 m_f[m->len < (1024 * M_ISIZE) ? (m->len / M_ISIZE) : 1024]++; |
|
1159 #endif |
|
1160 |
|
1161 #ifdef ZSH_SECURE_FREE |
|
1162 /* search all memory blocks, if one of them is at the given address */ |
|
1163 for (mt = (struct m_hdr *)m_low; |
|
1164 ((char *)mt) < m_high; |
|
1165 mt = (struct m_hdr *)(((char *)mt) + M_ISIZE + mt->len)) |
|
1166 if (((char *)p) == ((char *)&mt->next)) |
|
1167 break; |
|
1168 |
|
1169 /* no block was found at the given address */ |
|
1170 if (((char *)mt) >= m_high) { |
|
1171 DPUTS(1, "BUG: attempt to free storage at invalid address"); |
|
1172 unqueue_signals(); |
|
1173 return; |
|
1174 } |
|
1175 #endif |
|
1176 |
|
1177 /* see if the block is on the free list */ |
|
1178 for (mp = NULL, mt = m_free; mt && mt < m; mp = mt, mt = mt->next); |
|
1179 |
|
1180 if (m == mt) { |
|
1181 /* it is, ouch! */ |
|
1182 DPUTS(1, "BUG: attempt to free already free storage"); |
|
1183 unqueue_signals(); |
|
1184 return; |
|
1185 } |
|
1186 DPUTS(m->len < osz, "BUG: attempt to free more than allocated"); |
|
1187 #ifdef ZSH_MEM_DEBUG |
|
1188 memset(p, 0xff, m->len); |
|
1189 #endif |
|
1190 if (mt && ((char *)mt) == (((char *)m) + M_ISIZE + m->len)) { |
|
1191 /* the block after the one we are freeing is free, we put them |
|
1192 together */ |
|
1193 m->len += mt->len + M_ISIZE; |
|
1194 m->next = mt->next; |
|
1195 |
|
1196 if (mt == m_lfree) |
|
1197 m_lfree = m; |
|
1198 } else |
|
1199 m->next = mt; |
|
1200 |
|
1201 if (mp && ((char *)m) == (((char *)mp) + M_ISIZE + mp->len)) { |
|
1202 /* the block before the one we are freeing is free, we put them |
|
1203 together */ |
|
1204 mp->len += m->len + M_ISIZE; |
|
1205 mp->next = m->next; |
|
1206 |
|
1207 if (m == m_lfree) |
|
1208 m_lfree = mp; |
|
1209 } else if (mp) |
|
1210 /* otherwise, we just put it on the free list */ |
|
1211 mp->next = m; |
|
1212 else { |
|
1213 m_free = m; |
|
1214 if (!m_lfree) |
|
1215 m_lfree = m_free; |
|
1216 } |
|
1217 |
|
1218 /* if the block we have just freed was at the end of the process heap |
|
1219 and now there is more than one page size of memory, we can give |
|
1220 it back to the system (and we do it ;-) */ |
|
1221 if ((((char *)m_lfree) + M_ISIZE + m_lfree->len) == m_high && |
|
1222 m_lfree->len >= m_pgsz + M_MIN + M_FREE) { |
|
1223 long n = (m_lfree->len - M_MIN - M_KEEP) & ~(m_pgsz - 1); |
|
1224 |
|
1225 m_lfree->len -= n; |
|
1226 #ifdef HAVE_BRK |
|
1227 if (brk(m_high -= n) == -1) { |
|
1228 #else |
|
1229 m_high -= n; |
|
1230 if (sbrk(-n) == (void *)-1) { |
|
1231 #endif /* HAVE_BRK */ |
|
1232 DPUTS(1, "MEM: allocation error at brk."); |
|
1233 } |
|
1234 |
|
1235 #ifdef ZSH_MEM_DEBUG |
|
1236 m_b += n; |
|
1237 #endif |
|
1238 } |
|
1239 unqueue_signals(); |
|
1240 } |
|
1241 |
|
1242 FREE_RET_T |
|
1243 free(FREE_ARG_T p) |
|
1244 { |
|
1245 zfree(p, 0); /* 0 means: size is unknown */ |
|
1246 |
|
1247 #ifdef FREE_DO_RET |
|
1248 return 0; |
|
1249 #endif |
|
1250 } |
|
1251 |
|
1252 /* this one is for strings (and only strings, real strings, real C strings, |
|
1253 those that have a zero byte at the end) */ |
|
1254 |
|
1255 /**/ |
|
1256 mod_export void |
|
1257 zsfree(char *p) |
|
1258 { |
|
1259 if (p) |
|
1260 zfree(p, strlen(p) + 1); |
|
1261 } |
|
1262 |
|
1263 MALLOC_RET_T |
|
1264 realloc(MALLOC_RET_T p, MALLOC_ARG_T size) |
|
1265 { |
|
1266 struct m_hdr *m = (struct m_hdr *)(((char *)p) - M_ISIZE), *mp, *mt; |
|
1267 char *r; |
|
1268 int i, l = 0; |
|
1269 |
|
1270 /* some system..., see above */ |
|
1271 if (!p && size) |
|
1272 return (MALLOC_RET_T) malloc(size); |
|
1273 /* and some systems even do this... */ |
|
1274 if (!p || !size) |
|
1275 return (MALLOC_RET_T) p; |
|
1276 |
|
1277 queue_signals(); /* just queue signals caught rather than handling them */ |
|
1278 |
|
1279 /* check if we are reallocating a small block, if we do, we have |
|
1280 to compute the size of the block from the sort of block it is in */ |
|
1281 for (i = 0; i < M_NSMALL; i++) { |
|
1282 for (mp = NULL, mt = m_small[i]; |
|
1283 mt && (((char *)mt) > ((char *)p) || |
|
1284 (((char *)mt) + mt->len) < ((char *)p)); |
|
1285 mp = mt, mt = mt->next); |
|
1286 |
|
1287 if (mt) { |
|
1288 l = M_BSLEN(mt->len); |
|
1289 break; |
|
1290 } |
|
1291 } |
|
1292 if (!l) |
|
1293 /* otherwise the size of the block is in the memory just before |
|
1294 the given address */ |
|
1295 l = m->len; |
|
1296 |
|
1297 /* now allocate the new block, copy the old contents, and free the |
|
1298 old block */ |
|
1299 r = malloc(size); |
|
1300 memcpy(r, (char *)p, (size > l) ? l : size); |
|
1301 free(p); |
|
1302 |
|
1303 unqueue_signals(); |
|
1304 return (MALLOC_RET_T) r; |
|
1305 } |
|
1306 |
|
1307 MALLOC_RET_T |
|
1308 calloc(MALLOC_ARG_T n, MALLOC_ARG_T size) |
|
1309 { |
|
1310 long l; |
|
1311 char *r; |
|
1312 |
|
1313 if (!(l = n * size)) |
|
1314 return (MALLOC_RET_T) m_high; |
|
1315 |
|
1316 r = malloc(l); |
|
1317 |
|
1318 memset(r, 0, l); |
|
1319 |
|
1320 return (MALLOC_RET_T) r; |
|
1321 } |
|
1322 |
|
1323 #ifdef ZSH_MEM_DEBUG |
|
1324 |
|
1325 /**/ |
|
1326 int |
|
1327 bin_mem(char *name, char **argv, Options ops, int func) |
|
1328 { |
|
1329 int i, ii, fi, ui, j; |
|
1330 struct m_hdr *m, *mf, *ms; |
|
1331 char *b, *c, buf[40]; |
|
1332 long u = 0, f = 0, to, cu; |
|
1333 |
|
1334 queue_signals(); |
|
1335 if (OPT_ISSET(ops,'v')) { |
|
1336 printf("The lower and the upper addresses of the heap. Diff gives\n"); |
|
1337 printf("the difference between them, i.e. the size of the heap.\n\n"); |
|
1338 } |
|
1339 printf("low mem %ld\t high mem %ld\t diff %ld\n", |
|
1340 (long)m_l, (long)m_high, (long)(m_high - ((char *)m_l))); |
|
1341 |
|
1342 if (OPT_ISSET(ops,'v')) { |
|
1343 printf("\nThe number of bytes that were allocated using sbrk() and\n"); |
|
1344 printf("the number of bytes that were given back to the system\n"); |
|
1345 printf("via brk().\n"); |
|
1346 } |
|
1347 printf("\nsbrk %d\tbrk %d\n", m_s, m_b); |
|
1348 |
|
1349 if (OPT_ISSET(ops,'v')) { |
|
1350 printf("\nInformation about the sizes that were allocated or freed.\n"); |
|
1351 printf("For each size that were used the number of mallocs and\n"); |
|
1352 printf("frees is shown. Diff gives the difference between these\n"); |
|
1353 printf("values, i.e. the number of blocks of that size that is\n"); |
|
1354 printf("currently allocated. Total is the product of size and diff,\n"); |
|
1355 printf("i.e. the number of bytes that are allocated for blocks of\n"); |
|
1356 printf("this size. The last field gives the accumulated number of\n"); |
|
1357 printf("bytes for all sizes.\n"); |
|
1358 } |
|
1359 printf("\nsize\tmalloc\tfree\tdiff\ttotal\tcum\n"); |
|
1360 for (i = 0, cu = 0; i < 1024; i++) |
|
1361 if (m_m[i] || m_f[i]) { |
|
1362 to = (long) i * M_ISIZE * (m_m[i] - m_f[i]); |
|
1363 printf("%ld\t%d\t%d\t%d\t%ld\t%ld\n", |
|
1364 (long)i * M_ISIZE, m_m[i], m_f[i], m_m[i] - m_f[i], |
|
1365 to, (cu += to)); |
|
1366 } |
|
1367 |
|
1368 if (m_m[i] || m_f[i]) |
|
1369 printf("big\t%d\t%d\t%d\n", m_m[i], m_f[i], m_m[i] - m_f[i]); |
|
1370 |
|
1371 if (OPT_ISSET(ops,'v')) { |
|
1372 printf("\nThe list of memory blocks. For each block the following\n"); |
|
1373 printf("information is shown:\n\n"); |
|
1374 printf("num\tthe number of this block\n"); |
|
1375 printf("tnum\tlike num but counted separately for used and free\n"); |
|
1376 printf("\tblocks\n"); |
|
1377 printf("addr\tthe address of this block\n"); |
|
1378 printf("len\tthe length of the block\n"); |
|
1379 printf("state\tthe state of this block, this can be:\n"); |
|
1380 printf("\t used\tthis block is used for one big block\n"); |
|
1381 printf("\t free\tthis block is free\n"); |
|
1382 printf("\t small\tthis block is used for an array of small blocks\n"); |
|
1383 printf("cum\tthe accumulated sizes of the blocks, counted\n"); |
|
1384 printf("\tseparately for used and free blocks\n"); |
|
1385 printf("\nFor blocks holding small blocks the number of free\n"); |
|
1386 printf("blocks, the number of used blocks and the size of the\n"); |
|
1387 printf("blocks is shown. For otherwise used blocks the first few\n"); |
|
1388 printf("bytes are shown as an ASCII dump.\n"); |
|
1389 } |
|
1390 printf("\nblock list:\nnum\ttnum\taddr\t\tlen\tstate\tcum\n"); |
|
1391 for (m = m_l, mf = m_free, ii = fi = ui = 1; ((char *)m) < m_high; |
|
1392 m = (struct m_hdr *)(((char *)m) + M_ISIZE + m->len), ii++) { |
|
1393 for (j = 0, ms = NULL; j < M_NSMALL && !ms; j++) |
|
1394 for (ms = m_small[j]; ms; ms = ms->next) |
|
1395 if (ms == m) |
|
1396 break; |
|
1397 |
|
1398 if (m == mf) |
|
1399 buf[0] = '\0'; |
|
1400 else if (m == ms) |
|
1401 sprintf(buf, "%ld %ld %ld", (long)(M_SNUM - ms->used), |
|
1402 (long)ms->used, |
|
1403 (long)(m->len - sizeof(struct m_hdr)) / M_SNUM + 1); |
|
1404 |
|
1405 else { |
|
1406 for (i = 0, b = buf, c = (char *)&m->next; i < 20 && i < m->len; |
|
1407 i++, c++) |
|
1408 *b++ = (*c >= ' ' && *c < 127) ? *c : '.'; |
|
1409 *b = '\0'; |
|
1410 } |
|
1411 |
|
1412 printf("%d\t%d\t%ld\t%ld\t%s\t%ld\t%s\n", ii, |
|
1413 (m == mf) ? fi++ : ui++, |
|
1414 (long)m, (long)m->len, |
|
1415 (m == mf) ? "free" : ((m == ms) ? "small" : "used"), |
|
1416 (m == mf) ? (f += m->len) : (u += m->len), |
|
1417 buf); |
|
1418 |
|
1419 if (m == mf) |
|
1420 mf = mf->next; |
|
1421 } |
|
1422 |
|
1423 if (OPT_ISSET(ops,'v')) { |
|
1424 printf("\nHere is some information about the small blocks used.\n"); |
|
1425 printf("For each size the arrays with the number of free and the\n"); |
|
1426 printf("number of used blocks are shown.\n"); |
|
1427 } |
|
1428 printf("\nsmall blocks:\nsize\tblocks (free/used)\n"); |
|
1429 |
|
1430 for (i = 0; i < M_NSMALL; i++) |
|
1431 if (m_small[i]) { |
|
1432 printf("%ld\t", (long)i * M_ISIZE); |
|
1433 |
|
1434 for (ii = 0, m = m_small[i]; m; m = m->next) { |
|
1435 printf("(%ld/%ld) ", (long)(M_SNUM - m->used), |
|
1436 (long)m->used); |
|
1437 if (!((++ii) & 7)) |
|
1438 printf("\n\t"); |
|
1439 } |
|
1440 putchar('\n'); |
|
1441 } |
|
1442 if (OPT_ISSET(ops,'v')) { |
|
1443 printf("\n\nBelow is some information about the allocation\n"); |
|
1444 printf("behaviour of the zsh heaps. First the number of times\n"); |
|
1445 printf("pushheap(), popheap(), and freeheap() were called.\n"); |
|
1446 } |
|
1447 printf("\nzsh heaps:\n\n"); |
|
1448 |
|
1449 printf("push %d\tpop %d\tfree %d\n\n", h_push, h_pop, h_free); |
|
1450 |
|
1451 if (OPT_ISSET(ops,'v')) { |
|
1452 printf("\nThe next list shows for several sizes the number of times\n"); |
|
1453 printf("memory of this size were taken from heaps.\n\n"); |
|
1454 } |
|
1455 printf("size\tmalloc\ttotal\n"); |
|
1456 for (i = 0; i < 1024; i++) |
|
1457 if (h_m[i]) |
|
1458 printf("%ld\t%d\t%ld\n", (long)i * H_ISIZE, h_m[i], |
|
1459 (long)i * H_ISIZE * h_m[i]); |
|
1460 if (h_m[1024]) |
|
1461 printf("big\t%d\n", h_m[1024]); |
|
1462 |
|
1463 unqueue_signals(); |
|
1464 return 0; |
|
1465 } |
|
1466 |
|
1467 #endif |
|
1468 |
|
1469 /**/ |
|
1470 #else /* not ZSH_MEM */ |
|
1471 |
|
1472 /**/ |
|
1473 mod_export void |
|
1474 zfree(void *p, UNUSED(int sz)) |
|
1475 { |
|
1476 #ifdef __SYMBIAN32__ |
|
1477 sz=sz; |
|
1478 #endif |
|
1479 |
|
1480 if (p) |
|
1481 free(p); |
|
1482 } |
|
1483 |
|
1484 /**/ |
|
1485 mod_export void |
|
1486 zsfree(char *p) |
|
1487 { |
|
1488 if (p) |
|
1489 free(p); |
|
1490 } |
|
1491 |
|
1492 /**/ |
|
1493 #endif |