Mercurial > emacs
annotate src/gmalloc.c @ 29861:09d316feea21
(EmacsFrameRealize): Remove SubstructureRedirectMask.
| author | Gerd Moellmann <gerd@gnu.org> |
|---|---|
| date | Thu, 22 Jun 2000 20:12:21 +0000 |
| parents | f9aeac6780a1 |
| children | 8223a86fa594 |
| rev | line source |
|---|---|
| 17130 | 1 /* This file is no longer automatically generated from libc. */ |
| 2 | |
| 3 #define _MALLOC_INTERNAL | |
| 4 | |
| 5 /* The malloc headers and source files from the C library follow here. */ | |
| 6 | |
| 7 /* Declarations for `malloc' and friends. | |
|
26088
b7aa6ac26872
Add support for large files, 64-bit Solaris, system locale codings.
Paul Eggert <eggert@twinsun.com>
parents:
18667
diff
changeset
|
8 Copyright 1990, 91, 92, 93, 95, 96, 99 Free Software Foundation, Inc. |
| 17130 | 9 Written May 1989 by Mike Haertel. |
| 10 | |
| 11 This library is free software; you can redistribute it and/or | |
| 12 modify it under the terms of the GNU Library General Public License as | |
| 13 published by the Free Software Foundation; either version 2 of the | |
| 14 License, or (at your option) any later version. | |
| 15 | |
| 16 This library is distributed in the hope that it will be useful, | |
| 17 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| 18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
| 19 Library General Public License for more details. | |
| 20 | |
| 21 You should have received a copy of the GNU Library General Public | |
| 22 License along with this library; see the file COPYING.LIB. If | |
| 23 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
| 24 Cambridge, MA 02139, USA. | |
| 25 | |
| 26 The author may be reached (Email) at the address mike@ai.mit.edu, | |
| 27 or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
| 28 | |
| 29 #ifndef _MALLOC_H | |
| 30 | |
| 31 #define _MALLOC_H 1 | |
| 32 | |
| 33 #ifdef _MALLOC_INTERNAL | |
| 34 | |
| 35 #ifdef HAVE_CONFIG_H | |
| 36 #include <config.h> | |
| 37 #endif | |
| 38 | |
|
29837
f9aeac6780a1
(PP): Test PROTOTYPES as well as __STDC__ &c.
Dave Love <fx@gnu.org>
parents:
26526
diff
changeset
|
39 #if defined __cplusplus || (defined (__STDC__) && __STDC__) || \ |
|
f9aeac6780a1
(PP): Test PROTOTYPES as well as __STDC__ &c.
Dave Love <fx@gnu.org>
parents:
26526
diff
changeset
|
40 defined STDC_HEADERS || defined PROTOTYPES |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
41 #undef PP |
|
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
42 #define PP(args) args |
| 17130 | 43 #undef __ptr_t |
| 44 #define __ptr_t void * | |
| 45 #else /* Not C++ or ANSI C. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
46 #undef PP |
|
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
47 #define PP(args) () |
| 17130 | 48 #undef __ptr_t |
| 49 #define __ptr_t char * | |
| 50 #endif /* C++ or ANSI C. */ | |
| 51 | |
| 52 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG) | |
| 53 #include <string.h> | |
| 54 #else | |
| 55 #ifndef memset | |
| 56 #define memset(s, zero, n) bzero ((s), (n)) | |
| 57 #endif | |
| 58 #ifndef memcpy | |
| 59 #define memcpy(d, s, n) bcopy ((s), (d), (n)) | |
| 60 #endif | |
| 61 #endif | |
| 62 | |
|
26526
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
63 #ifdef HAVE_LIMITS_H |
| 17130 | 64 #include <limits.h> |
|
26526
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
65 #endif |
| 17130 | 66 #ifndef CHAR_BIT |
| 67 #define CHAR_BIT 8 | |
| 68 #endif | |
| 69 | |
| 70 #ifdef HAVE_UNISTD_H | |
| 71 #include <unistd.h> | |
| 72 #endif | |
| 73 | |
| 74 #endif /* _MALLOC_INTERNAL. */ | |
| 75 | |
| 76 | |
| 77 #ifdef __cplusplus | |
| 78 extern "C" | |
| 79 { | |
| 80 #endif | |
| 81 | |
|
26526
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
82 #ifdef STDC_HEADERS |
| 17130 | 83 #include <stddef.h> |
| 84 #define __malloc_size_t size_t | |
| 85 #define __malloc_ptrdiff_t ptrdiff_t | |
| 86 #else | |
| 87 #define __malloc_size_t unsigned int | |
| 88 #define __malloc_ptrdiff_t int | |
| 89 #endif | |
| 90 | |
| 91 #ifndef NULL | |
| 92 #define NULL 0 | |
| 93 #endif | |
| 94 | |
|
26526
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
95 #ifndef FREE_RETURN_TYPE |
|
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
96 #define FREE_RETURN_TYPE void |
|
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
97 #endif |
|
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
98 |
| 17130 | 99 |
| 100 /* Allocate SIZE bytes of memory. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
101 extern __ptr_t malloc PP ((__malloc_size_t __size)); |
| 17130 | 102 /* Re-allocate the previously allocated block |
| 103 in __ptr_t, making the new block SIZE bytes long. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
104 extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size)); |
| 17130 | 105 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */ |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
106 extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size)); |
| 17130 | 107 /* Free a block allocated by `malloc', `realloc' or `calloc'. */ |
|
26526
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
108 extern FREE_RETURN_TYPE free PP ((__ptr_t __ptr)); |
| 17130 | 109 |
| 110 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */ | |
| 111 #if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
112 extern __ptr_t memalign PP ((__malloc_size_t __alignment, |
|
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
113 __malloc_size_t __size)); |
| 17130 | 114 #endif |
| 115 | |
| 116 /* Allocate SIZE bytes on a page boundary. */ | |
| 117 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)) | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
118 extern __ptr_t valloc PP ((__malloc_size_t __size)); |
| 17130 | 119 #endif |
| 120 | |
| 121 | |
| 122 #ifdef _MALLOC_INTERNAL | |
| 123 | |
| 124 /* The allocator divides the heap into blocks of fixed size; large | |
| 125 requests receive one or more whole blocks, and small requests | |
| 126 receive a fragment of a block. Fragment sizes are powers of two, | |
| 127 and all fragments of a block are the same size. When all the | |
| 128 fragments in a block have been freed, the block itself is freed. */ | |
| 129 #define INT_BIT (CHAR_BIT * sizeof(int)) | |
| 130 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9) | |
| 131 #define BLOCKSIZE (1 << BLOCKLOG) | |
| 132 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE) | |
| 133 | |
| 134 /* Determine the amount of memory spanned by the initial heap table | |
| 135 (not an absolute limit). */ | |
| 136 #define HEAP (INT_BIT > 16 ? 4194304 : 65536) | |
| 137 | |
| 138 /* Number of contiguous free blocks allowed to build up at the end of | |
| 139 memory before they will be returned to the system. */ | |
| 140 #define FINAL_FREE_BLOCKS 8 | |
| 141 | |
| 142 /* Data structure giving per-block information. */ | |
| 143 typedef union | |
| 144 { | |
| 145 /* Heap information for a busy block. */ | |
| 146 struct | |
| 147 { | |
| 148 /* Zero for a large (multiblock) object, or positive giving the | |
| 149 logarithm to the base two of the fragment size. */ | |
| 150 int type; | |
| 151 union | |
| 152 { | |
| 153 struct | |
| 154 { | |
| 155 __malloc_size_t nfree; /* Free frags in a fragmented block. */ | |
| 156 __malloc_size_t first; /* First free fragment of the block. */ | |
| 157 } frag; | |
| 158 /* For a large object, in its first block, this has the number | |
| 159 of blocks in the object. In the other blocks, this has a | |
| 160 negative number which says how far back the first block is. */ | |
| 161 __malloc_ptrdiff_t size; | |
| 162 } info; | |
| 163 } busy; | |
| 164 /* Heap information for a free block | |
| 165 (that may be the first of a free cluster). */ | |
| 166 struct | |
| 167 { | |
| 168 __malloc_size_t size; /* Size (in blocks) of a free cluster. */ | |
| 169 __malloc_size_t next; /* Index of next free cluster. */ | |
| 170 __malloc_size_t prev; /* Index of previous free cluster. */ | |
| 171 } free; | |
| 172 } malloc_info; | |
| 173 | |
| 174 /* Pointer to first block of the heap. */ | |
| 175 extern char *_heapbase; | |
| 176 | |
| 177 /* Table indexed by block number giving per-block information. */ | |
| 178 extern malloc_info *_heapinfo; | |
| 179 | |
| 180 /* Address to block number and vice versa. */ | |
| 181 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1) | |
| 182 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase)) | |
| 183 | |
| 184 /* Current search index for the heap table. */ | |
| 185 extern __malloc_size_t _heapindex; | |
| 186 | |
| 187 /* Limit of valid info table indices. */ | |
| 188 extern __malloc_size_t _heaplimit; | |
| 189 | |
| 190 /* Doubly linked lists of free fragments. */ | |
| 191 struct list | |
| 192 { | |
| 193 struct list *next; | |
| 194 struct list *prev; | |
| 195 }; | |
| 196 | |
| 197 /* Free list headers for each fragment size. */ | |
| 198 extern struct list _fraghead[]; | |
| 199 | |
| 200 /* List of blocks allocated with `memalign' (or `valloc'). */ | |
| 201 struct alignlist | |
| 202 { | |
| 203 struct alignlist *next; | |
| 204 __ptr_t aligned; /* The address that memaligned returned. */ | |
| 205 __ptr_t exact; /* The address that malloc returned. */ | |
| 206 }; | |
| 207 extern struct alignlist *_aligned_blocks; | |
| 208 | |
| 209 /* Instrumentation. */ | |
| 210 extern __malloc_size_t _chunks_used; | |
| 211 extern __malloc_size_t _bytes_used; | |
| 212 extern __malloc_size_t _chunks_free; | |
| 213 extern __malloc_size_t _bytes_free; | |
| 214 | |
| 215 /* Internal versions of `malloc', `realloc', and `free' | |
| 216 used when these functions need to call each other. | |
| 217 They are the same but don't call the hooks. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
218 extern __ptr_t _malloc_internal PP ((__malloc_size_t __size)); |
|
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
219 extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size)); |
|
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
220 extern void _free_internal PP ((__ptr_t __ptr)); |
| 17130 | 221 |
| 222 #endif /* _MALLOC_INTERNAL. */ | |
| 223 | |
| 224 /* Given an address in the middle of a malloc'd object, | |
| 225 return the address of the beginning of the object. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
226 extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr)); |
| 17130 | 227 |
| 228 /* Underlying allocation function; successive calls should | |
| 229 return contiguous pieces of memory. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
230 extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)); |
| 17130 | 231 |
| 232 /* Default value of `__morecore'. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
233 extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size)); |
| 17130 | 234 |
| 235 /* If not NULL, this function is called after each time | |
| 236 `__morecore' is called to increase the data size. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
237 extern void (*__after_morecore_hook) PP ((void)); |
| 17130 | 238 |
| 239 /* Number of extra blocks to get each time we ask for more core. | |
| 240 This reduces the frequency of calling `(*__morecore)'. */ | |
| 241 extern __malloc_size_t __malloc_extra_blocks; | |
| 242 | |
| 243 /* Nonzero if `malloc' has been called and done its initialization. */ | |
| 244 extern int __malloc_initialized; | |
| 245 /* Function called to initialize malloc data structures. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
246 extern int __malloc_initialize PP ((void)); |
| 17130 | 247 |
| 248 /* Hooks for debugging versions. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
249 extern void (*__malloc_initialize_hook) PP ((void)); |
|
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
250 extern void (*__free_hook) PP ((__ptr_t __ptr)); |
|
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
251 extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size)); |
|
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
252 extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size)); |
|
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
253 extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size, |
|
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
254 __malloc_size_t __alignment)); |
| 17130 | 255 |
| 256 /* Return values for `mprobe': these are the kinds of inconsistencies that | |
| 257 `mcheck' enables detection of. */ | |
| 258 enum mcheck_status | |
| 259 { | |
| 260 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */ | |
| 261 MCHECK_OK, /* Block is fine. */ | |
| 262 MCHECK_FREE, /* Block freed twice. */ | |
| 263 MCHECK_HEAD, /* Memory before the block was clobbered. */ | |
| 264 MCHECK_TAIL /* Memory after the block was clobbered. */ | |
| 265 }; | |
| 266 | |
| 267 /* Activate a standard collection of debugging hooks. This must be called | |
| 268 before `malloc' is ever called. ABORTFUNC is called with an error code | |
| 269 (see enum above) when an inconsistency is detected. If ABORTFUNC is | |
| 270 null, the standard function prints on stderr and then calls `abort'. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
271 extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status)))); |
| 17130 | 272 |
| 273 /* Check for aberrations in a particular malloc'd block. You must have | |
| 274 called `mcheck' already. These are the same checks that `mcheck' does | |
| 275 when you free or reallocate a block. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
276 extern enum mcheck_status mprobe PP ((__ptr_t __ptr)); |
| 17130 | 277 |
| 278 /* Activate a standard collection of tracing hooks. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
279 extern void mtrace PP ((void)); |
|
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
280 extern void muntrace PP ((void)); |
| 17130 | 281 |
| 282 /* Statistics available to the user. */ | |
| 283 struct mstats | |
| 284 { | |
| 285 __malloc_size_t bytes_total; /* Total size of the heap. */ | |
| 286 __malloc_size_t chunks_used; /* Chunks allocated by the user. */ | |
| 287 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */ | |
| 288 __malloc_size_t chunks_free; /* Chunks in the free list. */ | |
| 289 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */ | |
| 290 }; | |
| 291 | |
| 292 /* Pick up the current statistics. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
293 extern struct mstats mstats PP ((void)); |
| 17130 | 294 |
| 295 /* Call WARNFUN with a warning message when memory usage is high. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
296 extern void memory_warnings PP ((__ptr_t __start, |
|
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
297 void (*__warnfun) PP ((const char *)))); |
| 17130 | 298 |
| 299 | |
| 300 /* Relocating allocator. */ | |
| 301 | |
| 302 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
303 extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size)); |
| 17130 | 304 |
| 305 /* Free the storage allocated in HANDLEPTR. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
306 extern void r_alloc_free PP ((__ptr_t *__handleptr)); |
| 17130 | 307 |
| 308 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
309 extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size)); |
| 17130 | 310 |
| 311 | |
| 312 #ifdef __cplusplus | |
| 313 } | |
| 314 #endif | |
| 315 | |
| 316 #endif /* malloc.h */ | |
| 317 /* Memory allocator `malloc'. | |
| 318 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. | |
| 319 Written May 1989 by Mike Haertel. | |
| 320 | |
| 321 This library is free software; you can redistribute it and/or | |
| 322 modify it under the terms of the GNU Library General Public License as | |
| 323 published by the Free Software Foundation; either version 2 of the | |
| 324 License, or (at your option) any later version. | |
| 325 | |
| 326 This library is distributed in the hope that it will be useful, | |
| 327 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| 328 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
| 329 Library General Public License for more details. | |
| 330 | |
| 331 You should have received a copy of the GNU Library General Public | |
| 332 License along with this library; see the file COPYING.LIB. If | |
| 333 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
| 334 Cambridge, MA 02139, USA. | |
| 335 | |
| 336 The author may be reached (Email) at the address mike@ai.mit.edu, | |
| 337 or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
| 338 | |
| 339 #ifndef _MALLOC_INTERNAL | |
| 340 #define _MALLOC_INTERNAL | |
| 341 #include <malloc.h> | |
| 342 #endif | |
| 343 #include <errno.h> | |
| 344 | |
| 345 /* How to really get more memory. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
346 __ptr_t (*__morecore) PP ((ptrdiff_t __size)) = __default_morecore; |
| 17130 | 347 |
| 348 /* Debugging hook for `malloc'. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
349 __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size)); |
| 17130 | 350 |
| 351 /* Pointer to the base of the first block. */ | |
| 352 char *_heapbase; | |
| 353 | |
| 354 /* Block information table. Allocated with align/__free (not malloc/free). */ | |
| 355 malloc_info *_heapinfo; | |
| 356 | |
| 357 /* Number of info entries. */ | |
| 358 static __malloc_size_t heapsize; | |
| 359 | |
| 360 /* Search index in the info table. */ | |
| 361 __malloc_size_t _heapindex; | |
| 362 | |
| 363 /* Limit of valid info table indices. */ | |
| 364 __malloc_size_t _heaplimit; | |
| 365 | |
| 366 /* Free lists for each fragment size. */ | |
| 367 struct list _fraghead[BLOCKLOG]; | |
| 368 | |
| 369 /* Instrumentation. */ | |
| 370 __malloc_size_t _chunks_used; | |
| 371 __malloc_size_t _bytes_used; | |
| 372 __malloc_size_t _chunks_free; | |
| 373 __malloc_size_t _bytes_free; | |
| 374 | |
| 375 /* Are you experienced? */ | |
| 376 int __malloc_initialized; | |
| 377 | |
| 378 __malloc_size_t __malloc_extra_blocks; | |
| 379 | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
380 void (*__malloc_initialize_hook) PP ((void)); |
|
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
381 void (*__after_morecore_hook) PP ((void)); |
| 17130 | 382 |
| 383 | |
| 384 /* Aligned allocation. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
385 static __ptr_t align PP ((__malloc_size_t)); |
| 17130 | 386 static __ptr_t |
| 387 align (size) | |
| 388 __malloc_size_t size; | |
| 389 { | |
| 390 __ptr_t result; | |
| 391 unsigned long int adj; | |
| 392 | |
| 393 result = (*__morecore) (size); | |
| 394 adj = (unsigned long int) ((unsigned long int) ((char *) result - | |
| 395 (char *) NULL)) % BLOCKSIZE; | |
| 396 if (adj != 0) | |
| 397 { | |
| 398 __ptr_t new; | |
| 399 adj = BLOCKSIZE - adj; | |
| 400 new = (*__morecore) (adj); | |
| 401 result = (char *) result + adj; | |
| 402 } | |
| 403 | |
| 404 if (__after_morecore_hook) | |
| 405 (*__after_morecore_hook) (); | |
| 406 | |
| 407 return result; | |
| 408 } | |
| 409 | |
| 410 /* Get SIZE bytes, if we can get them starting at END. | |
| 411 Return the address of the space we got. | |
| 412 If we cannot get space at END, fail and return 0. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
413 static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t)); |
| 17130 | 414 static __ptr_t |
| 415 get_contiguous_space (size, position) | |
| 416 __malloc_ptrdiff_t size; | |
| 417 __ptr_t position; | |
| 418 { | |
| 419 __ptr_t before; | |
| 420 __ptr_t after; | |
| 421 | |
| 422 before = (*__morecore) (0); | |
| 423 /* If we can tell in advance that the break is at the wrong place, | |
| 424 fail now. */ | |
| 425 if (before != position) | |
| 426 return 0; | |
| 427 | |
| 428 /* Allocate SIZE bytes and get the address of them. */ | |
| 429 after = (*__morecore) (size); | |
| 430 if (!after) | |
| 431 return 0; | |
| 432 | |
| 433 /* It was not contiguous--reject it. */ | |
| 434 if (after != position) | |
| 435 { | |
| 436 (*__morecore) (- size); | |
| 437 return 0; | |
| 438 } | |
| 439 | |
| 440 return after; | |
| 441 } | |
| 442 | |
| 443 | |
| 444 /* This is called when `_heapinfo' and `heapsize' have just | |
| 445 been set to describe a new info table. Set up the table | |
| 446 to describe itself and account for it in the statistics. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
447 static void register_heapinfo PP ((void)); |
| 17130 | 448 #ifdef __GNUC__ |
| 449 __inline__ | |
| 450 #endif | |
| 451 static void | |
| 452 register_heapinfo () | |
| 453 { | |
| 454 __malloc_size_t block, blocks; | |
| 455 | |
| 456 block = BLOCK (_heapinfo); | |
| 457 blocks = BLOCKIFY (heapsize * sizeof (malloc_info)); | |
| 458 | |
| 459 /* Account for the _heapinfo block itself in the statistics. */ | |
| 460 _bytes_used += blocks * BLOCKSIZE; | |
| 461 ++_chunks_used; | |
| 462 | |
| 463 /* Describe the heapinfo block itself in the heapinfo. */ | |
| 464 _heapinfo[block].busy.type = 0; | |
| 465 _heapinfo[block].busy.info.size = blocks; | |
| 466 /* Leave back-pointers for malloc_find_address. */ | |
| 467 while (--blocks > 0) | |
| 468 _heapinfo[block + blocks].busy.info.size = -blocks; | |
| 469 } | |
| 470 | |
| 471 /* Set everything up and remember that we have. */ | |
| 472 int | |
| 473 __malloc_initialize () | |
| 474 { | |
| 475 if (__malloc_initialized) | |
| 476 return 0; | |
| 477 | |
| 478 if (__malloc_initialize_hook) | |
| 479 (*__malloc_initialize_hook) (); | |
| 480 | |
| 481 heapsize = HEAP / BLOCKSIZE; | |
| 482 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info)); | |
| 483 if (_heapinfo == NULL) | |
| 484 return 0; | |
| 485 memset (_heapinfo, 0, heapsize * sizeof (malloc_info)); | |
| 486 _heapinfo[0].free.size = 0; | |
| 487 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0; | |
| 488 _heapindex = 0; | |
| 489 _heapbase = (char *) _heapinfo; | |
| 490 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info)); | |
| 491 | |
| 492 register_heapinfo (); | |
| 493 | |
| 494 __malloc_initialized = 1; | |
| 495 return 1; | |
| 496 } | |
| 497 | |
| 498 static int morecore_recursing; | |
| 499 | |
| 500 /* Get neatly aligned memory, initializing or | |
| 501 growing the heap info table as necessary. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
502 static __ptr_t morecore PP ((__malloc_size_t)); |
| 17130 | 503 static __ptr_t |
| 504 morecore (size) | |
| 505 __malloc_size_t size; | |
| 506 { | |
| 507 __ptr_t result; | |
| 508 malloc_info *newinfo, *oldinfo; | |
| 509 __malloc_size_t newsize; | |
| 510 | |
| 511 if (morecore_recursing) | |
| 512 /* Avoid recursion. The caller will know how to handle a null return. */ | |
| 513 return NULL; | |
| 514 | |
| 515 result = align (size); | |
| 516 if (result == NULL) | |
| 517 return NULL; | |
| 518 | |
| 519 /* Check if we need to grow the info table. */ | |
| 520 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize) | |
| 521 { | |
| 522 /* Calculate the new _heapinfo table size. We do not account for the | |
| 523 added blocks in the table itself, as we hope to place them in | |
| 524 existing free space, which is already covered by part of the | |
| 525 existing table. */ | |
| 526 newsize = heapsize; | |
| 527 do | |
| 528 newsize *= 2; | |
| 529 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize); | |
| 530 | |
| 531 /* We must not reuse existing core for the new info table when called | |
| 532 from realloc in the case of growing a large block, because the | |
| 533 block being grown is momentarily marked as free. In this case | |
| 534 _heaplimit is zero so we know not to reuse space for internal | |
| 535 allocation. */ | |
| 536 if (_heaplimit != 0) | |
| 537 { | |
| 538 /* First try to allocate the new info table in core we already | |
| 539 have, in the usual way using realloc. If realloc cannot | |
| 540 extend it in place or relocate it to existing sufficient core, | |
| 541 we will get called again, and the code above will notice the | |
| 542 `morecore_recursing' flag and return null. */ | |
| 543 int save = errno; /* Don't want to clobber errno with ENOMEM. */ | |
| 544 morecore_recursing = 1; | |
| 545 newinfo = (malloc_info *) _realloc_internal | |
| 546 (_heapinfo, newsize * sizeof (malloc_info)); | |
| 547 morecore_recursing = 0; | |
| 548 if (newinfo == NULL) | |
| 549 errno = save; | |
| 550 else | |
| 551 { | |
| 552 /* We found some space in core, and realloc has put the old | |
| 553 table's blocks on the free list. Now zero the new part | |
| 554 of the table and install the new table location. */ | |
| 555 memset (&newinfo[heapsize], 0, | |
| 556 (newsize - heapsize) * sizeof (malloc_info)); | |
| 557 _heapinfo = newinfo; | |
| 558 heapsize = newsize; | |
| 559 goto got_heap; | |
| 560 } | |
| 561 } | |
| 562 | |
| 563 /* Allocate new space for the malloc info table. */ | |
| 564 while (1) | |
| 565 { | |
| 566 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info)); | |
| 567 | |
| 568 /* Did it fail? */ | |
| 569 if (newinfo == NULL) | |
| 570 { | |
| 571 (*__morecore) (-size); | |
| 572 return NULL; | |
| 573 } | |
| 574 | |
| 575 /* Is it big enough to record status for its own space? | |
| 576 If so, we win. */ | |
| 577 if ((__malloc_size_t) BLOCK ((char *) newinfo | |
| 578 + newsize * sizeof (malloc_info)) | |
| 579 < newsize) | |
| 580 break; | |
| 581 | |
| 582 /* Must try again. First give back most of what we just got. */ | |
| 583 (*__morecore) (- newsize * sizeof (malloc_info)); | |
| 584 newsize *= 2; | |
| 585 } | |
| 586 | |
| 587 /* Copy the old table to the beginning of the new, | |
| 588 and zero the rest of the new table. */ | |
| 589 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info)); | |
| 590 memset (&newinfo[heapsize], 0, | |
| 591 (newsize - heapsize) * sizeof (malloc_info)); | |
| 592 oldinfo = _heapinfo; | |
| 593 _heapinfo = newinfo; | |
| 594 heapsize = newsize; | |
| 595 | |
| 596 register_heapinfo (); | |
| 597 | |
| 598 /* Reset _heaplimit so _free_internal never decides | |
| 599 it can relocate or resize the info table. */ | |
| 600 _heaplimit = 0; | |
| 601 _free_internal (oldinfo); | |
| 602 | |
| 603 /* The new heap limit includes the new table just allocated. */ | |
| 604 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info)); | |
| 605 return result; | |
| 606 } | |
| 607 | |
| 608 got_heap: | |
| 609 _heaplimit = BLOCK ((char *) result + size); | |
| 610 return result; | |
| 611 } | |
| 612 | |
| 613 /* Allocate memory from the heap. */ | |
| 614 __ptr_t | |
| 615 _malloc_internal (size) | |
| 616 __malloc_size_t size; | |
| 617 { | |
| 618 __ptr_t result; | |
| 619 __malloc_size_t block, blocks, lastblocks, start; | |
| 620 register __malloc_size_t i; | |
| 621 struct list *next; | |
| 622 | |
| 623 /* ANSI C allows `malloc (0)' to either return NULL, or to return a | |
| 624 valid address you can realloc and free (though not dereference). | |
| 625 | |
| 626 It turns out that some extant code (sunrpc, at least Ultrix's version) | |
| 627 expects `malloc (0)' to return non-NULL and breaks otherwise. | |
| 628 Be compatible. */ | |
| 629 | |
| 630 #if 0 | |
| 631 if (size == 0) | |
| 632 return NULL; | |
| 633 #endif | |
| 634 | |
| 635 if (size < sizeof (struct list)) | |
| 636 size = sizeof (struct list); | |
| 637 | |
| 638 #ifdef SUNOS_LOCALTIME_BUG | |
| 639 if (size < 16) | |
| 640 size = 16; | |
| 641 #endif | |
| 642 | |
| 643 /* Determine the allocation policy based on the request size. */ | |
| 644 if (size <= BLOCKSIZE / 2) | |
| 645 { | |
| 646 /* Small allocation to receive a fragment of a block. | |
| 647 Determine the logarithm to base two of the fragment size. */ | |
| 648 register __malloc_size_t log = 1; | |
| 649 --size; | |
| 650 while ((size /= 2) != 0) | |
| 651 ++log; | |
| 652 | |
| 653 /* Look in the fragment lists for a | |
| 654 free fragment of the desired size. */ | |
| 655 next = _fraghead[log].next; | |
| 656 if (next != NULL) | |
| 657 { | |
| 658 /* There are free fragments of this size. | |
| 659 Pop a fragment out of the fragment list and return it. | |
| 660 Update the block's nfree and first counters. */ | |
| 661 result = (__ptr_t) next; | |
| 662 next->prev->next = next->next; | |
| 663 if (next->next != NULL) | |
| 664 next->next->prev = next->prev; | |
| 665 block = BLOCK (result); | |
| 666 if (--_heapinfo[block].busy.info.frag.nfree != 0) | |
| 667 _heapinfo[block].busy.info.frag.first = (unsigned long int) | |
| 668 ((unsigned long int) ((char *) next->next - (char *) NULL) | |
| 669 % BLOCKSIZE) >> log; | |
| 670 | |
| 671 /* Update the statistics. */ | |
| 672 ++_chunks_used; | |
| 673 _bytes_used += 1 << log; | |
| 674 --_chunks_free; | |
| 675 _bytes_free -= 1 << log; | |
| 676 } | |
| 677 else | |
| 678 { | |
| 679 /* No free fragments of the desired size, so get a new block | |
| 680 and break it into fragments, returning the first. */ | |
| 681 result = malloc (BLOCKSIZE); | |
| 682 if (result == NULL) | |
| 683 return NULL; | |
| 684 | |
| 685 /* Link all fragments but the first into the free list. */ | |
| 686 next = (struct list *) ((char *) result + (1 << log)); | |
| 687 next->next = NULL; | |
| 688 next->prev = &_fraghead[log]; | |
| 689 _fraghead[log].next = next; | |
| 690 | |
| 691 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i) | |
| 692 { | |
| 693 next = (struct list *) ((char *) result + (i << log)); | |
| 694 next->next = _fraghead[log].next; | |
| 695 next->prev = &_fraghead[log]; | |
| 696 next->prev->next = next; | |
| 697 next->next->prev = next; | |
| 698 } | |
| 699 | |
| 700 /* Initialize the nfree and first counters for this block. */ | |
| 701 block = BLOCK (result); | |
| 702 _heapinfo[block].busy.type = log; | |
| 703 _heapinfo[block].busy.info.frag.nfree = i - 1; | |
| 704 _heapinfo[block].busy.info.frag.first = i - 1; | |
| 705 | |
| 706 _chunks_free += (BLOCKSIZE >> log) - 1; | |
| 707 _bytes_free += BLOCKSIZE - (1 << log); | |
| 708 _bytes_used -= BLOCKSIZE - (1 << log); | |
| 709 } | |
| 710 } | |
| 711 else | |
| 712 { | |
| 713 /* Large allocation to receive one or more blocks. | |
| 714 Search the free list in a circle starting at the last place visited. | |
| 715 If we loop completely around without finding a large enough | |
| 716 space we will have to get more memory from the system. */ | |
| 717 blocks = BLOCKIFY (size); | |
| 718 start = block = _heapindex; | |
| 719 while (_heapinfo[block].free.size < blocks) | |
| 720 { | |
| 721 block = _heapinfo[block].free.next; | |
| 722 if (block == start) | |
| 723 { | |
| 724 /* Need to get more from the system. Get a little extra. */ | |
| 725 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks; | |
| 726 block = _heapinfo[0].free.prev; | |
| 727 lastblocks = _heapinfo[block].free.size; | |
| 728 /* Check to see if the new core will be contiguous with the | |
| 729 final free block; if so we don't need to get as much. */ | |
| 730 if (_heaplimit != 0 && block + lastblocks == _heaplimit && | |
| 731 /* We can't do this if we will have to make the heap info | |
| 732 table bigger to accomodate the new space. */ | |
| 733 block + wantblocks <= heapsize && | |
| 734 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE, | |
| 735 ADDRESS (block + lastblocks))) | |
| 736 { | |
| 737 /* We got it contiguously. Which block we are extending | |
| 738 (the `final free block' referred to above) might have | |
| 739 changed, if it got combined with a freed info table. */ | |
| 740 block = _heapinfo[0].free.prev; | |
| 741 _heapinfo[block].free.size += (wantblocks - lastblocks); | |
| 742 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE; | |
| 743 _heaplimit += wantblocks - lastblocks; | |
| 744 continue; | |
| 745 } | |
| 746 result = morecore (wantblocks * BLOCKSIZE); | |
| 747 if (result == NULL) | |
| 748 return NULL; | |
| 749 block = BLOCK (result); | |
| 750 /* Put the new block at the end of the free list. */ | |
| 751 _heapinfo[block].free.size = wantblocks; | |
| 752 _heapinfo[block].free.prev = _heapinfo[0].free.prev; | |
| 753 _heapinfo[block].free.next = 0; | |
| 754 _heapinfo[0].free.prev = block; | |
| 755 _heapinfo[_heapinfo[block].free.prev].free.next = block; | |
| 756 ++_chunks_free; | |
| 757 /* Now loop to use some of that block for this allocation. */ | |
| 758 } | |
| 759 } | |
| 760 | |
| 761 /* At this point we have found a suitable free list entry. | |
| 762 Figure out how to remove what we need from the list. */ | |
| 763 result = ADDRESS (block); | |
| 764 if (_heapinfo[block].free.size > blocks) | |
| 765 { | |
| 766 /* The block we found has a bit left over, | |
| 767 so relink the tail end back into the free list. */ | |
| 768 _heapinfo[block + blocks].free.size | |
| 769 = _heapinfo[block].free.size - blocks; | |
| 770 _heapinfo[block + blocks].free.next | |
| 771 = _heapinfo[block].free.next; | |
| 772 _heapinfo[block + blocks].free.prev | |
| 773 = _heapinfo[block].free.prev; | |
| 774 _heapinfo[_heapinfo[block].free.prev].free.next | |
| 775 = _heapinfo[_heapinfo[block].free.next].free.prev | |
| 776 = _heapindex = block + blocks; | |
| 777 } | |
| 778 else | |
| 779 { | |
| 780 /* The block exactly matches our requirements, | |
| 781 so just remove it from the list. */ | |
| 782 _heapinfo[_heapinfo[block].free.next].free.prev | |
| 783 = _heapinfo[block].free.prev; | |
| 784 _heapinfo[_heapinfo[block].free.prev].free.next | |
| 785 = _heapindex = _heapinfo[block].free.next; | |
| 786 --_chunks_free; | |
| 787 } | |
| 788 | |
| 789 _heapinfo[block].busy.type = 0; | |
| 790 _heapinfo[block].busy.info.size = blocks; | |
| 791 ++_chunks_used; | |
| 792 _bytes_used += blocks * BLOCKSIZE; | |
| 793 _bytes_free -= blocks * BLOCKSIZE; | |
| 794 | |
| 795 /* Mark all the blocks of the object just allocated except for the | |
| 796 first with a negative number so you can find the first block by | |
| 797 adding that adjustment. */ | |
| 798 while (--blocks > 0) | |
| 799 _heapinfo[block + blocks].busy.info.size = -blocks; | |
| 800 } | |
| 801 | |
| 802 return result; | |
| 803 } | |
| 804 | |
| 805 __ptr_t | |
| 806 malloc (size) | |
| 807 __malloc_size_t size; | |
| 808 { | |
| 809 if (!__malloc_initialized && !__malloc_initialize ()) | |
| 810 return NULL; | |
| 811 | |
| 812 return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size); | |
| 813 } | |
| 814 | |
| 815 #ifndef _LIBC | |
| 816 | |
| 817 /* On some ANSI C systems, some libc functions call _malloc, _free | |
| 818 and _realloc. Make them use the GNU functions. */ | |
| 819 | |
| 820 __ptr_t | |
| 821 _malloc (size) | |
| 822 __malloc_size_t size; | |
| 823 { | |
| 824 return malloc (size); | |
| 825 } | |
| 826 | |
| 827 void | |
| 828 _free (ptr) | |
| 829 __ptr_t ptr; | |
| 830 { | |
| 831 free (ptr); | |
| 832 } | |
| 833 | |
| 834 __ptr_t | |
| 835 _realloc (ptr, size) | |
| 836 __ptr_t ptr; | |
| 837 __malloc_size_t size; | |
| 838 { | |
| 839 return realloc (ptr, size); | |
| 840 } | |
| 841 | |
| 842 #endif | |
| 843 /* Free a block of memory allocated by `malloc'. | |
| 844 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc. | |
| 845 Written May 1989 by Mike Haertel. | |
| 846 | |
| 847 This library is free software; you can redistribute it and/or | |
| 848 modify it under the terms of the GNU Library General Public License as | |
| 849 published by the Free Software Foundation; either version 2 of the | |
| 850 License, or (at your option) any later version. | |
| 851 | |
| 852 This library is distributed in the hope that it will be useful, | |
| 853 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| 854 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
| 855 Library General Public License for more details. | |
| 856 | |
| 857 You should have received a copy of the GNU Library General Public | |
| 858 License along with this library; see the file COPYING.LIB. If | |
| 859 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
| 860 Cambridge, MA 02139, USA. | |
| 861 | |
| 862 The author may be reached (Email) at the address mike@ai.mit.edu, | |
| 863 or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
| 864 | |
| 865 #ifndef _MALLOC_INTERNAL | |
| 866 #define _MALLOC_INTERNAL | |
| 867 #include <malloc.h> | |
| 868 #endif | |
| 869 | |
| 870 | |
| 871 /* Cope with systems lacking `memmove'. */ | |
| 872 #ifndef memmove | |
| 873 #if (defined (MEMMOVE_MISSING) || \ | |
| 874 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG)) | |
| 875 #ifdef emacs | |
| 876 #undef __malloc_safe_bcopy | |
| 877 #define __malloc_safe_bcopy safe_bcopy | |
| 878 #endif | |
| 879 /* This function is defined in realloc.c. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
880 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t)); |
| 17130 | 881 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size)) |
| 882 #endif | |
| 883 #endif | |
| 884 | |
| 885 | |
| 886 /* Debugging hook for free. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
887 void (*__free_hook) PP ((__ptr_t __ptr)); |
| 17130 | 888 |
| 889 /* List of blocks allocated by memalign. */ | |
| 890 struct alignlist *_aligned_blocks = NULL; | |
| 891 | |
| 892 /* Return memory to the heap. | |
| 893 Like `free' but don't call a __free_hook if there is one. */ | |
| 894 void | |
| 895 _free_internal (ptr) | |
| 896 __ptr_t ptr; | |
| 897 { | |
| 898 int type; | |
| 899 __malloc_size_t block, blocks; | |
| 900 register __malloc_size_t i; | |
| 901 struct list *prev, *next; | |
| 902 __ptr_t curbrk; | |
| 903 const __malloc_size_t lesscore_threshold | |
| 904 /* Threshold of free space at which we will return some to the system. */ | |
| 905 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks; | |
| 906 | |
| 907 register struct alignlist *l; | |
| 908 | |
| 909 if (ptr == NULL) | |
| 910 return; | |
| 911 | |
| 912 for (l = _aligned_blocks; l != NULL; l = l->next) | |
| 913 if (l->aligned == ptr) | |
| 914 { | |
| 915 l->aligned = NULL; /* Mark the slot in the list as free. */ | |
| 916 ptr = l->exact; | |
| 917 break; | |
| 918 } | |
| 919 | |
| 920 block = BLOCK (ptr); | |
| 921 | |
| 922 type = _heapinfo[block].busy.type; | |
| 923 switch (type) | |
| 924 { | |
| 925 case 0: | |
| 926 /* Get as many statistics as early as we can. */ | |
| 927 --_chunks_used; | |
| 928 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE; | |
| 929 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE; | |
| 930 | |
| 931 /* Find the free cluster previous to this one in the free list. | |
| 932 Start searching at the last block referenced; this may benefit | |
| 933 programs with locality of allocation. */ | |
| 934 i = _heapindex; | |
| 935 if (i > block) | |
| 936 while (i > block) | |
| 937 i = _heapinfo[i].free.prev; | |
| 938 else | |
| 939 { | |
| 940 do | |
| 941 i = _heapinfo[i].free.next; | |
| 942 while (i > 0 && i < block); | |
| 943 i = _heapinfo[i].free.prev; | |
| 944 } | |
| 945 | |
| 946 /* Determine how to link this block into the free list. */ | |
| 947 if (block == i + _heapinfo[i].free.size) | |
| 948 { | |
| 949 /* Coalesce this block with its predecessor. */ | |
| 950 _heapinfo[i].free.size += _heapinfo[block].busy.info.size; | |
| 951 block = i; | |
| 952 } | |
| 953 else | |
| 954 { | |
| 955 /* Really link this block back into the free list. */ | |
| 956 _heapinfo[block].free.size = _heapinfo[block].busy.info.size; | |
| 957 _heapinfo[block].free.next = _heapinfo[i].free.next; | |
| 958 _heapinfo[block].free.prev = i; | |
| 959 _heapinfo[i].free.next = block; | |
| 960 _heapinfo[_heapinfo[block].free.next].free.prev = block; | |
| 961 ++_chunks_free; | |
| 962 } | |
| 963 | |
| 964 /* Now that the block is linked in, see if we can coalesce it | |
| 965 with its successor (by deleting its successor from the list | |
| 966 and adding in its size). */ | |
| 967 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next) | |
| 968 { | |
| 969 _heapinfo[block].free.size | |
| 970 += _heapinfo[_heapinfo[block].free.next].free.size; | |
| 971 _heapinfo[block].free.next | |
| 972 = _heapinfo[_heapinfo[block].free.next].free.next; | |
| 973 _heapinfo[_heapinfo[block].free.next].free.prev = block; | |
| 974 --_chunks_free; | |
| 975 } | |
| 976 | |
| 977 /* How many trailing free blocks are there now? */ | |
| 978 blocks = _heapinfo[block].free.size; | |
| 979 | |
| 980 /* Where is the current end of accessible core? */ | |
| 981 curbrk = (*__morecore) (0); | |
| 982 | |
| 983 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit)) | |
| 984 { | |
| 985 /* The end of the malloc heap is at the end of accessible core. | |
| 986 It's possible that moving _heapinfo will allow us to | |
| 987 return some space to the system. */ | |
| 988 | |
| 989 __malloc_size_t info_block = BLOCK (_heapinfo); | |
| 990 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size; | |
| 991 __malloc_size_t prev_block = _heapinfo[block].free.prev; | |
| 992 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size; | |
| 993 __malloc_size_t next_block = _heapinfo[block].free.next; | |
| 994 __malloc_size_t next_blocks = _heapinfo[next_block].free.size; | |
| 995 | |
| 996 if (/* Win if this block being freed is last in core, the info table | |
| 997 is just before it, the previous free block is just before the | |
| 998 info table, and the two free blocks together form a useful | |
| 999 amount to return to the system. */ | |
| 1000 (block + blocks == _heaplimit && | |
| 1001 info_block + info_blocks == block && | |
| 1002 prev_block != 0 && prev_block + prev_blocks == info_block && | |
| 1003 blocks + prev_blocks >= lesscore_threshold) || | |
| 1004 /* Nope, not the case. We can also win if this block being | |
| 1005 freed is just before the info table, and the table extends | |
| 1006 to the end of core or is followed only by a free block, | |
| 1007 and the total free space is worth returning to the system. */ | |
| 1008 (block + blocks == info_block && | |
| 1009 ((info_block + info_blocks == _heaplimit && | |
| 1010 blocks >= lesscore_threshold) || | |
| 1011 (info_block + info_blocks == next_block && | |
| 1012 next_block + next_blocks == _heaplimit && | |
| 1013 blocks + next_blocks >= lesscore_threshold))) | |
| 1014 ) | |
| 1015 { | |
| 1016 malloc_info *newinfo; | |
| 1017 __malloc_size_t oldlimit = _heaplimit; | |
| 1018 | |
| 1019 /* Free the old info table, clearing _heaplimit to avoid | |
| 1020 recursion into this code. We don't want to return the | |
| 1021 table's blocks to the system before we have copied them to | |
| 1022 the new location. */ | |
| 1023 _heaplimit = 0; | |
| 1024 _free_internal (_heapinfo); | |
| 1025 _heaplimit = oldlimit; | |
| 1026 | |
| 1027 /* Tell malloc to search from the beginning of the heap for | |
| 1028 free blocks, so it doesn't reuse the ones just freed. */ | |
| 1029 _heapindex = 0; | |
| 1030 | |
| 1031 /* Allocate new space for the info table and move its data. */ | |
| 1032 newinfo = (malloc_info *) _malloc_internal (info_blocks | |
| 1033 * BLOCKSIZE); | |
| 1034 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE); | |
| 1035 _heapinfo = newinfo; | |
| 1036 | |
| 1037 /* We should now have coalesced the free block with the | |
| 1038 blocks freed from the old info table. Examine the entire | |
| 1039 trailing free block to decide below whether to return some | |
| 1040 to the system. */ | |
| 1041 block = _heapinfo[0].free.prev; | |
| 1042 blocks = _heapinfo[block].free.size; | |
| 1043 } | |
| 1044 | |
| 1045 /* Now see if we can return stuff to the system. */ | |
| 1046 if (block + blocks == _heaplimit && blocks >= lesscore_threshold) | |
| 1047 { | |
| 1048 register __malloc_size_t bytes = blocks * BLOCKSIZE; | |
| 1049 _heaplimit -= blocks; | |
| 1050 (*__morecore) (-bytes); | |
| 1051 _heapinfo[_heapinfo[block].free.prev].free.next | |
| 1052 = _heapinfo[block].free.next; | |
| 1053 _heapinfo[_heapinfo[block].free.next].free.prev | |
| 1054 = _heapinfo[block].free.prev; | |
| 1055 block = _heapinfo[block].free.prev; | |
| 1056 --_chunks_free; | |
| 1057 _bytes_free -= bytes; | |
| 1058 } | |
| 1059 } | |
| 1060 | |
| 1061 /* Set the next search to begin at this block. */ | |
| 1062 _heapindex = block; | |
| 1063 break; | |
| 1064 | |
| 1065 default: | |
| 1066 /* Do some of the statistics. */ | |
| 1067 --_chunks_used; | |
| 1068 _bytes_used -= 1 << type; | |
| 1069 ++_chunks_free; | |
| 1070 _bytes_free += 1 << type; | |
| 1071 | |
| 1072 /* Get the address of the first free fragment in this block. */ | |
| 1073 prev = (struct list *) ((char *) ADDRESS (block) + | |
| 1074 (_heapinfo[block].busy.info.frag.first << type)); | |
| 1075 | |
| 1076 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1) | |
| 1077 { | |
| 1078 /* If all fragments of this block are free, remove them | |
| 1079 from the fragment list and free the whole block. */ | |
| 1080 next = prev; | |
| 1081 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i) | |
| 1082 next = next->next; | |
| 1083 prev->prev->next = next; | |
| 1084 if (next != NULL) | |
| 1085 next->prev = prev->prev; | |
| 1086 _heapinfo[block].busy.type = 0; | |
| 1087 _heapinfo[block].busy.info.size = 1; | |
| 1088 | |
| 1089 /* Keep the statistics accurate. */ | |
| 1090 ++_chunks_used; | |
| 1091 _bytes_used += BLOCKSIZE; | |
| 1092 _chunks_free -= BLOCKSIZE >> type; | |
| 1093 _bytes_free -= BLOCKSIZE; | |
| 1094 | |
| 1095 free (ADDRESS (block)); | |
| 1096 } | |
| 1097 else if (_heapinfo[block].busy.info.frag.nfree != 0) | |
| 1098 { | |
| 1099 /* If some fragments of this block are free, link this | |
| 1100 fragment into the fragment list after the first free | |
| 1101 fragment of this block. */ | |
| 1102 next = (struct list *) ptr; | |
| 1103 next->next = prev->next; | |
| 1104 next->prev = prev; | |
| 1105 prev->next = next; | |
| 1106 if (next->next != NULL) | |
| 1107 next->next->prev = next; | |
| 1108 ++_heapinfo[block].busy.info.frag.nfree; | |
| 1109 } | |
| 1110 else | |
| 1111 { | |
| 1112 /* No fragments of this block are free, so link this | |
| 1113 fragment into the fragment list and announce that | |
| 1114 it is the first free fragment of this block. */ | |
| 1115 prev = (struct list *) ptr; | |
| 1116 _heapinfo[block].busy.info.frag.nfree = 1; | |
| 1117 _heapinfo[block].busy.info.frag.first = (unsigned long int) | |
| 1118 ((unsigned long int) ((char *) ptr - (char *) NULL) | |
| 1119 % BLOCKSIZE >> type); | |
| 1120 prev->next = _fraghead[type].next; | |
| 1121 prev->prev = &_fraghead[type]; | |
| 1122 prev->prev->next = prev; | |
| 1123 if (prev->next != NULL) | |
| 1124 prev->next->prev = prev; | |
| 1125 } | |
| 1126 break; | |
| 1127 } | |
| 1128 } | |
| 1129 | |
| 1130 /* Return memory to the heap. */ | |
|
26526
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
1131 |
|
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
1132 FREE_RETURN_TYPE |
| 17130 | 1133 free (ptr) |
| 1134 __ptr_t ptr; | |
| 1135 { | |
| 1136 if (__free_hook != NULL) | |
| 1137 (*__free_hook) (ptr); | |
| 1138 else | |
| 1139 _free_internal (ptr); | |
| 1140 } | |
| 1141 | |
| 1142 /* Define the `cfree' alias for `free'. */ | |
| 1143 #ifdef weak_alias | |
| 1144 weak_alias (free, cfree) | |
| 1145 #else | |
| 1146 void | |
| 1147 cfree (ptr) | |
| 1148 __ptr_t ptr; | |
| 1149 { | |
| 1150 free (ptr); | |
| 1151 } | |
| 1152 #endif | |
| 1153 /* Change the size of a block allocated by `malloc'. | |
| 1154 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. | |
| 1155 Written May 1989 by Mike Haertel. | |
| 1156 | |
| 1157 This library is free software; you can redistribute it and/or | |
| 1158 modify it under the terms of the GNU Library General Public License as | |
| 1159 published by the Free Software Foundation; either version 2 of the | |
| 1160 License, or (at your option) any later version. | |
| 1161 | |
| 1162 This library is distributed in the hope that it will be useful, | |
| 1163 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| 1164 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
| 1165 Library General Public License for more details. | |
| 1166 | |
| 1167 You should have received a copy of the GNU Library General Public | |
| 1168 License along with this library; see the file COPYING.LIB. If | |
| 1169 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
| 1170 Cambridge, MA 02139, USA. | |
| 1171 | |
| 1172 The author may be reached (Email) at the address mike@ai.mit.edu, | |
| 1173 or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
| 1174 | |
| 1175 #ifndef _MALLOC_INTERNAL | |
| 1176 #define _MALLOC_INTERNAL | |
| 1177 #include <malloc.h> | |
| 1178 #endif | |
| 1179 | |
| 1180 | |
| 1181 | |
| 1182 /* Cope with systems lacking `memmove'. */ | |
| 1183 #if (defined (MEMMOVE_MISSING) || \ | |
| 1184 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG)) | |
| 1185 | |
| 1186 #ifdef emacs | |
| 1187 #undef __malloc_safe_bcopy | |
| 1188 #define __malloc_safe_bcopy safe_bcopy | |
| 1189 #else | |
| 1190 | |
| 1191 /* Snarfed directly from Emacs src/dispnew.c: | |
| 1192 XXX Should use system bcopy if it handles overlap. */ | |
| 1193 | |
| 1194 /* Like bcopy except never gets confused by overlap. */ | |
| 1195 | |
| 1196 void | |
| 1197 __malloc_safe_bcopy (afrom, ato, size) | |
| 1198 __ptr_t afrom; | |
| 1199 __ptr_t ato; | |
| 1200 __malloc_size_t size; | |
| 1201 { | |
| 1202 char *from = afrom, *to = ato; | |
| 1203 | |
| 1204 if (size <= 0 || from == to) | |
| 1205 return; | |
| 1206 | |
| 1207 /* If the source and destination don't overlap, then bcopy can | |
| 1208 handle it. If they do overlap, but the destination is lower in | |
| 1209 memory than the source, we'll assume bcopy can handle that. */ | |
| 1210 if (to < from || from + size <= to) | |
| 1211 bcopy (from, to, size); | |
| 1212 | |
| 1213 /* Otherwise, we'll copy from the end. */ | |
| 1214 else | |
| 1215 { | |
| 1216 register char *endf = from + size; | |
| 1217 register char *endt = to + size; | |
| 1218 | |
| 1219 /* If TO - FROM is large, then we should break the copy into | |
| 1220 nonoverlapping chunks of TO - FROM bytes each. However, if | |
| 1221 TO - FROM is small, then the bcopy function call overhead | |
| 1222 makes this not worth it. The crossover point could be about | |
| 1223 anywhere. Since I don't think the obvious copy loop is too | |
| 1224 bad, I'm trying to err in its favor. */ | |
| 1225 if (to - from < 64) | |
| 1226 { | |
| 1227 do | |
| 1228 *--endt = *--endf; | |
| 1229 while (endf != from); | |
| 1230 } | |
| 1231 else | |
| 1232 { | |
| 1233 for (;;) | |
| 1234 { | |
| 1235 endt -= (to - from); | |
| 1236 endf -= (to - from); | |
| 1237 | |
| 1238 if (endt < to) | |
| 1239 break; | |
| 1240 | |
| 1241 bcopy (endf, endt, to - from); | |
| 1242 } | |
| 1243 | |
| 1244 /* If SIZE wasn't a multiple of TO - FROM, there will be a | |
| 1245 little left over. The amount left over is | |
| 1246 (endt + (to - from)) - to, which is endt - from. */ | |
| 1247 bcopy (from, to, endt - from); | |
| 1248 } | |
| 1249 } | |
| 1250 } | |
| 1251 #endif /* emacs */ | |
| 1252 | |
| 1253 #ifndef memmove | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
1254 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t)); |
| 17130 | 1255 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size)) |
| 1256 #endif | |
| 1257 | |
| 1258 #endif | |
| 1259 | |
| 1260 | |
| 1261 #define min(A, B) ((A) < (B) ? (A) : (B)) | |
| 1262 | |
| 1263 /* Debugging hook for realloc. */ | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
1264 __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size)); |
| 17130 | 1265 |
| 1266 /* Resize the given region to the new size, returning a pointer | |
| 1267 to the (possibly moved) region. This is optimized for speed; | |
| 1268 some benchmarks seem to indicate that greater compactness is | |
| 1269 achieved by unconditionally allocating and copying to a | |
| 1270 new region. This module has incestuous knowledge of the | |
| 1271 internals of both free and malloc. */ | |
| 1272 __ptr_t | |
| 1273 _realloc_internal (ptr, size) | |
| 1274 __ptr_t ptr; | |
| 1275 __malloc_size_t size; | |
| 1276 { | |
| 1277 __ptr_t result; | |
| 1278 int type; | |
| 1279 __malloc_size_t block, blocks, oldlimit; | |
| 1280 | |
| 1281 if (size == 0) | |
| 1282 { | |
| 1283 _free_internal (ptr); | |
| 1284 return _malloc_internal (0); | |
| 1285 } | |
| 1286 else if (ptr == NULL) | |
| 1287 return _malloc_internal (size); | |
| 1288 | |
| 1289 block = BLOCK (ptr); | |
| 1290 | |
| 1291 type = _heapinfo[block].busy.type; | |
| 1292 switch (type) | |
| 1293 { | |
| 1294 case 0: | |
| 1295 /* Maybe reallocate a large block to a small fragment. */ | |
| 1296 if (size <= BLOCKSIZE / 2) | |
| 1297 { | |
| 1298 result = _malloc_internal (size); | |
| 1299 if (result != NULL) | |
| 1300 { | |
| 1301 memcpy (result, ptr, size); | |
| 1302 _free_internal (ptr); | |
| 1303 return result; | |
| 1304 } | |
| 1305 } | |
| 1306 | |
| 1307 /* The new size is a large allocation as well; | |
| 1308 see if we can hold it in place. */ | |
| 1309 blocks = BLOCKIFY (size); | |
| 1310 if (blocks < _heapinfo[block].busy.info.size) | |
| 1311 { | |
| 1312 /* The new size is smaller; return | |
| 1313 excess memory to the free list. */ | |
| 1314 _heapinfo[block + blocks].busy.type = 0; | |
| 1315 _heapinfo[block + blocks].busy.info.size | |
| 1316 = _heapinfo[block].busy.info.size - blocks; | |
| 1317 _heapinfo[block].busy.info.size = blocks; | |
| 1318 /* We have just created a new chunk by splitting a chunk in two. | |
| 1319 Now we will free this chunk; increment the statistics counter | |
| 1320 so it doesn't become wrong when _free_internal decrements it. */ | |
| 1321 ++_chunks_used; | |
| 1322 _free_internal (ADDRESS (block + blocks)); | |
| 1323 result = ptr; | |
| 1324 } | |
| 1325 else if (blocks == _heapinfo[block].busy.info.size) | |
| 1326 /* No size change necessary. */ | |
| 1327 result = ptr; | |
| 1328 else | |
| 1329 { | |
| 1330 /* Won't fit, so allocate a new region that will. | |
| 1331 Free the old region first in case there is sufficient | |
| 1332 adjacent free space to grow without moving. */ | |
| 1333 blocks = _heapinfo[block].busy.info.size; | |
| 1334 /* Prevent free from actually returning memory to the system. */ | |
| 1335 oldlimit = _heaplimit; | |
| 1336 _heaplimit = 0; | |
| 1337 _free_internal (ptr); | |
| 1338 result = _malloc_internal (size); | |
| 1339 if (_heaplimit == 0) | |
| 1340 _heaplimit = oldlimit; | |
| 1341 if (result == NULL) | |
| 1342 { | |
| 1343 /* Now we're really in trouble. We have to unfree | |
| 1344 the thing we just freed. Unfortunately it might | |
| 1345 have been coalesced with its neighbors. */ | |
| 1346 if (_heapindex == block) | |
| 1347 (void) _malloc_internal (blocks * BLOCKSIZE); | |
| 1348 else | |
| 1349 { | |
| 1350 __ptr_t previous | |
| 1351 = _malloc_internal ((block - _heapindex) * BLOCKSIZE); | |
| 1352 (void) _malloc_internal (blocks * BLOCKSIZE); | |
| 1353 _free_internal (previous); | |
| 1354 } | |
| 1355 return NULL; | |
| 1356 } | |
| 1357 if (ptr != result) | |
| 1358 memmove (result, ptr, blocks * BLOCKSIZE); | |
| 1359 } | |
| 1360 break; | |
| 1361 | |
| 1362 default: | |
| 1363 /* Old size is a fragment; type is logarithm | |
| 1364 to base two of the fragment size. */ | |
| 1365 if (size > (__malloc_size_t) (1 << (type - 1)) && | |
| 1366 size <= (__malloc_size_t) (1 << type)) | |
| 1367 /* The new size is the same kind of fragment. */ | |
| 1368 result = ptr; | |
| 1369 else | |
| 1370 { | |
| 1371 /* The new size is different; allocate a new space, | |
| 1372 and copy the lesser of the new size and the old. */ | |
| 1373 result = _malloc_internal (size); | |
| 1374 if (result == NULL) | |
| 1375 return NULL; | |
| 1376 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type)); | |
| 1377 _free_internal (ptr); | |
| 1378 } | |
| 1379 break; | |
| 1380 } | |
| 1381 | |
| 1382 return result; | |
| 1383 } | |
| 1384 | |
| 1385 __ptr_t | |
| 1386 realloc (ptr, size) | |
| 1387 __ptr_t ptr; | |
| 1388 __malloc_size_t size; | |
| 1389 { | |
| 1390 if (!__malloc_initialized && !__malloc_initialize ()) | |
| 1391 return NULL; | |
| 1392 | |
| 1393 return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal) | |
| 1394 (ptr, size); | |
| 1395 } | |
| 1396 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc. | |
| 1397 | |
| 1398 This library is free software; you can redistribute it and/or | |
| 1399 modify it under the terms of the GNU Library General Public License as | |
| 1400 published by the Free Software Foundation; either version 2 of the | |
| 1401 License, or (at your option) any later version. | |
| 1402 | |
| 1403 This library is distributed in the hope that it will be useful, | |
| 1404 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| 1405 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
| 1406 Library General Public License for more details. | |
| 1407 | |
| 1408 You should have received a copy of the GNU Library General Public | |
| 1409 License along with this library; see the file COPYING.LIB. If | |
| 1410 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
| 1411 Cambridge, MA 02139, USA. | |
| 1412 | |
| 1413 The author may be reached (Email) at the address mike@ai.mit.edu, | |
| 1414 or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
| 1415 | |
| 1416 #ifndef _MALLOC_INTERNAL | |
| 1417 #define _MALLOC_INTERNAL | |
| 1418 #include <malloc.h> | |
| 1419 #endif | |
| 1420 | |
| 1421 /* Allocate an array of NMEMB elements each SIZE bytes long. | |
| 1422 The entire array is initialized to zeros. */ | |
| 1423 __ptr_t | |
| 1424 calloc (nmemb, size) | |
| 1425 register __malloc_size_t nmemb; | |
| 1426 register __malloc_size_t size; | |
| 1427 { | |
| 1428 register __ptr_t result = malloc (nmemb * size); | |
| 1429 | |
| 1430 if (result != NULL) | |
| 1431 (void) memset (result, 0, nmemb * size); | |
| 1432 | |
| 1433 return result; | |
| 1434 } | |
| 1435 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. | |
| 1436 This file is part of the GNU C Library. | |
| 1437 | |
| 1438 The GNU C Library is free software; you can redistribute it and/or modify | |
| 1439 it under the terms of the GNU General Public License as published by | |
| 1440 the Free Software Foundation; either version 2, or (at your option) | |
| 1441 any later version. | |
| 1442 | |
| 1443 The GNU C Library is distributed in the hope that it will be useful, | |
| 1444 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| 1445 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
| 1446 GNU General Public License for more details. | |
| 1447 | |
| 1448 You should have received a copy of the GNU General Public License | |
| 1449 along with the GNU C Library; see the file COPYING. If not, write to | |
| 1450 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ | |
| 1451 | |
| 1452 #ifndef _MALLOC_INTERNAL | |
| 1453 #define _MALLOC_INTERNAL | |
| 1454 #include <malloc.h> | |
| 1455 #endif | |
| 1456 | |
| 1457 #ifndef __GNU_LIBRARY__ | |
| 1458 #define __sbrk sbrk | |
| 1459 #endif | |
| 1460 | |
| 1461 #ifdef __GNU_LIBRARY__ | |
| 1462 /* It is best not to declare this and cast its result on foreign operating | |
| 1463 systems with potentially hostile include files. */ | |
| 1464 | |
| 1465 #include <stddef.h> | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
1466 extern __ptr_t __sbrk PP ((ptrdiff_t increment)); |
| 17130 | 1467 #endif |
| 1468 | |
| 1469 #ifndef NULL | |
| 1470 #define NULL 0 | |
| 1471 #endif | |
| 1472 | |
| 1473 /* Allocate INCREMENT more bytes of data space, | |
| 1474 and return the start of data space, or NULL on errors. | |
| 1475 If INCREMENT is negative, shrink data space. */ | |
| 1476 __ptr_t | |
| 1477 __default_morecore (increment) | |
| 1478 __malloc_ptrdiff_t increment; | |
| 1479 { | |
| 1480 __ptr_t result = (__ptr_t) __sbrk (increment); | |
| 1481 if (result == (__ptr_t) -1) | |
| 1482 return NULL; | |
| 1483 return result; | |
| 1484 } | |
| 1485 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc. | |
| 1486 | |
| 1487 This library is free software; you can redistribute it and/or | |
| 1488 modify it under the terms of the GNU Library General Public License as | |
| 1489 published by the Free Software Foundation; either version 2 of the | |
| 1490 License, or (at your option) any later version. | |
| 1491 | |
| 1492 This library is distributed in the hope that it will be useful, | |
| 1493 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| 1494 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
| 1495 Library General Public License for more details. | |
| 1496 | |
| 1497 You should have received a copy of the GNU Library General Public | |
| 1498 License along with this library; see the file COPYING.LIB. If | |
| 1499 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
| 1500 Cambridge, MA 02139, USA. */ | |
| 1501 | |
| 1502 #ifndef _MALLOC_INTERNAL | |
| 1503 #define _MALLOC_INTERNAL | |
| 1504 #include <malloc.h> | |
| 1505 #endif | |
| 1506 | |
| 1507 #if __DJGPP__ - 0 == 1 | |
| 1508 | |
| 1509 /* There is some problem with memalign in DJGPP v1 and we are supposed | |
| 1510 to omit it. Noone told me why, they just told me to do it. */ | |
| 1511 | |
| 1512 #else | |
| 1513 | |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
1514 __ptr_t (*__memalign_hook) PP ((size_t __size, size_t __alignment)); |
| 17130 | 1515 |
| 1516 __ptr_t | |
| 1517 memalign (alignment, size) | |
| 1518 __malloc_size_t alignment; | |
| 1519 __malloc_size_t size; | |
| 1520 { | |
| 1521 __ptr_t result; | |
| 1522 unsigned long int adj, lastadj; | |
| 1523 | |
| 1524 if (__memalign_hook) | |
| 1525 return (*__memalign_hook) (alignment, size); | |
| 1526 | |
| 1527 /* Allocate a block with enough extra space to pad the block with up to | |
| 1528 (ALIGNMENT - 1) bytes if necessary. */ | |
| 1529 result = malloc (size + alignment - 1); | |
| 1530 if (result == NULL) | |
| 1531 return NULL; | |
| 1532 | |
| 1533 /* Figure out how much we will need to pad this particular block | |
| 1534 to achieve the required alignment. */ | |
| 1535 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment; | |
| 1536 | |
| 1537 do | |
| 1538 { | |
| 1539 /* Reallocate the block with only as much excess as it needs. */ | |
| 1540 free (result); | |
| 1541 result = malloc (adj + size); | |
| 1542 if (result == NULL) /* Impossible unless interrupted. */ | |
| 1543 return NULL; | |
| 1544 | |
| 1545 lastadj = adj; | |
| 1546 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment; | |
| 1547 /* It's conceivable we might have been so unlucky as to get a | |
| 1548 different block with weaker alignment. If so, this block is too | |
| 1549 short to contain SIZE after alignment correction. So we must | |
| 1550 try again and get another block, slightly larger. */ | |
| 1551 } while (adj > lastadj); | |
| 1552 | |
| 1553 if (adj != 0) | |
| 1554 { | |
| 1555 /* Record this block in the list of aligned blocks, so that `free' | |
| 1556 can identify the pointer it is passed, which will be in the middle | |
| 1557 of an allocated block. */ | |
| 1558 | |
| 1559 struct alignlist *l; | |
| 1560 for (l = _aligned_blocks; l != NULL; l = l->next) | |
| 1561 if (l->aligned == NULL) | |
| 1562 /* This slot is free. Use it. */ | |
| 1563 break; | |
| 1564 if (l == NULL) | |
| 1565 { | |
| 1566 l = (struct alignlist *) malloc (sizeof (struct alignlist)); | |
| 1567 if (l == NULL) | |
| 1568 { | |
| 1569 free (result); | |
| 1570 return NULL; | |
| 1571 } | |
| 1572 l->next = _aligned_blocks; | |
| 1573 _aligned_blocks = l; | |
| 1574 } | |
| 1575 l->exact = result; | |
| 1576 result = l->aligned = (char *) result + alignment - adj; | |
| 1577 } | |
| 1578 | |
| 1579 return result; | |
| 1580 } | |
| 1581 | |
| 1582 #endif /* Not DJGPP v1 */ | |
| 1583 /* Allocate memory on a page boundary. | |
| 1584 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc. | |
| 1585 | |
| 1586 This library is free software; you can redistribute it and/or | |
| 1587 modify it under the terms of the GNU Library General Public License as | |
| 1588 published by the Free Software Foundation; either version 2 of the | |
| 1589 License, or (at your option) any later version. | |
| 1590 | |
| 1591 This library is distributed in the hope that it will be useful, | |
| 1592 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| 1593 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
| 1594 Library General Public License for more details. | |
| 1595 | |
| 1596 You should have received a copy of the GNU Library General Public | |
| 1597 License along with this library; see the file COPYING.LIB. If | |
| 1598 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
| 1599 Cambridge, MA 02139, USA. | |
| 1600 | |
| 1601 The author may be reached (Email) at the address mike@ai.mit.edu, | |
| 1602 or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
| 1603 | |
| 1604 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC) | |
| 1605 | |
| 1606 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition | |
| 1607 on MSDOS, where it conflicts with a system header file. */ | |
| 1608 | |
| 1609 #define ELIDE_VALLOC | |
| 1610 | |
| 1611 #endif | |
| 1612 | |
| 1613 #ifndef ELIDE_VALLOC | |
| 1614 | |
| 1615 #if defined (__GNU_LIBRARY__) || defined (_LIBC) | |
| 1616 #include <stddef.h> | |
| 1617 #include <sys/cdefs.h> | |
|
17131
6ff1e0aec51e
[__GLIBC__ >= 2]: Don't declare __getpagesize.
Karl Heuer <kwzh@gnu.org>
parents:
17130
diff
changeset
|
1618 #if defined (__GLIBC__) && __GLIBC__ >= 2 |
|
6ff1e0aec51e
[__GLIBC__ >= 2]: Don't declare __getpagesize.
Karl Heuer <kwzh@gnu.org>
parents:
17130
diff
changeset
|
1619 /* __getpagesize is already declared in <unistd.h> with return type int */ |
|
6ff1e0aec51e
[__GLIBC__ >= 2]: Don't declare __getpagesize.
Karl Heuer <kwzh@gnu.org>
parents:
17130
diff
changeset
|
1620 #else |
|
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
1621 extern size_t __getpagesize PP ((void)); |
|
17131
6ff1e0aec51e
[__GLIBC__ >= 2]: Don't declare __getpagesize.
Karl Heuer <kwzh@gnu.org>
parents:
17130
diff
changeset
|
1622 #endif |
| 17130 | 1623 #else |
| 1624 #include "getpagesize.h" | |
| 1625 #define __getpagesize() getpagesize() | |
| 1626 #endif | |
| 1627 | |
| 1628 #ifndef _MALLOC_INTERNAL | |
| 1629 #define _MALLOC_INTERNAL | |
| 1630 #include <malloc.h> | |
| 1631 #endif | |
| 1632 | |
| 1633 static __malloc_size_t pagesize; | |
| 1634 | |
| 1635 __ptr_t | |
| 1636 valloc (size) | |
| 1637 __malloc_size_t size; | |
| 1638 { | |
| 1639 if (pagesize == 0) | |
| 1640 pagesize = __getpagesize (); | |
| 1641 | |
| 1642 return memalign (pagesize, size); | |
| 1643 } | |
| 1644 | |
| 1645 #endif /* Not ELIDE_VALLOC. */ |
