Mercurial > emacs
comparison src/ralloc.c @ 31414:76dcf201d009
(POINTER, SIZE) [emacs]: Define in terms of
POINTER_TYPE and size_t.
(struct mmap_region) [REL_ALLOC_MMAP]: New structure.
(mmap_regions, mmap_regions_1) [REL_ALLOC_MMAP]: New variables.
(ROUND, MMAP_REGION_STRUCT_SIZE, MMAP_REGION, MMAP_USER_AREA)
[REL_ALLOC_MMAP]: New macros.
(mmap_find, mmap_free, mmap_enlarge, mmap_set_vars)
(mmap_mapped_bytes, r_alloc, r_re_alloc, r_alloc_free)
[REL_ALLOC_MMAP]: New functions.
| author | Gerd Moellmann <gerd@gnu.org> |
|---|---|
| date | Wed, 06 Sep 2000 21:25:32 +0000 |
| parents | 3de459e6c652 |
| children | 78540f2db51b |
comparison
equal
deleted
inserted
replaced
| 31413:c3b1fc2b0d11 | 31414:76dcf201d009 |
|---|---|
| 1 /* Block-relocating memory allocator. | 1 /* Block-relocating memory allocator. |
| 2 Copyright (C) 1993, 1995 Free Software Foundation, Inc. | 2 Copyright (C) 1993, 1995, 2000 Free Software Foundation, Inc. |
| 3 | 3 |
| 4 This file is part of GNU Emacs. | 4 This file is part of GNU Emacs. |
| 5 | 5 |
| 6 GNU Emacs is free software; you can redistribute it and/or modify | 6 GNU Emacs is free software; you can redistribute it and/or modify |
| 7 it under the terms of the GNU General Public License as published by | 7 it under the terms of the GNU General Public License as published by |
| 26 | 26 |
| 27 #ifdef emacs | 27 #ifdef emacs |
| 28 | 28 |
| 29 #include <config.h> | 29 #include <config.h> |
| 30 #include "lisp.h" /* Needed for VALBITS. */ | 30 #include "lisp.h" /* Needed for VALBITS. */ |
| 31 | |
| 31 #ifdef HAVE_UNISTD_H | 32 #ifdef HAVE_UNISTD_H |
| 32 #include <unistd.h> | 33 #include <unistd.h> |
| 33 #endif | 34 #endif |
| 34 #undef NULL | 35 |
| 35 | 36 typedef POINTER_TYPE *POINTER; |
| 36 /* The important properties of this type are that 1) it's a pointer, and | 37 typedef size_t SIZE; |
| 37 2) arithmetic on it should work as if the size of the object pointed | |
| 38 to has a size of 1. */ | |
| 39 #if 0 /* Arithmetic on void* is a GCC extension. */ | |
| 40 #ifdef __STDC__ | |
| 41 typedef void *POINTER; | |
| 42 #else | |
| 43 | |
| 44 #ifdef HAVE_CONFIG_H | |
| 45 #include "config.h" | |
| 46 #endif | |
| 47 | |
| 48 typedef char *POINTER; | |
| 49 | |
| 50 #endif | |
| 51 #endif /* 0 */ | |
| 52 | |
| 53 /* Unconditionally use char * for this. */ | |
| 54 typedef char *POINTER; | |
| 55 | |
| 56 typedef unsigned long SIZE; | |
| 57 | 38 |
| 58 /* Declared in dispnew.c, this version doesn't screw up if regions | 39 /* Declared in dispnew.c, this version doesn't screw up if regions |
| 59 overlap. */ | 40 overlap. */ |
| 41 | |
| 60 extern void safe_bcopy (); | 42 extern void safe_bcopy (); |
| 61 | 43 |
| 62 #ifdef DOUG_LEA_MALLOC | 44 #ifdef DOUG_LEA_MALLOC |
| 63 #define M_TOP_PAD -2 | 45 #define M_TOP_PAD -2 |
| 64 extern int mallopt (); | 46 extern int mallopt (); |
| 65 #else | 47 #else /* not DOUG_LEA_MALLOC */ |
| 66 extern int __malloc_extra_blocks; | 48 extern int __malloc_extra_blocks; |
| 67 #endif | 49 #endif /* not DOUG_LEA_MALLOC */ |
| 68 | 50 |
| 69 #else /* not emacs */ | 51 #else /* not emacs */ |
| 70 | 52 |
| 71 #include <stddef.h> | 53 #include <stddef.h> |
| 72 | 54 |
| 79 | 61 |
| 80 #define safe_bcopy(x, y, z) memmove (y, x, z) | 62 #define safe_bcopy(x, y, z) memmove (y, x, z) |
| 81 #define bzero(x, len) memset (x, 0, len) | 63 #define bzero(x, len) memset (x, 0, len) |
| 82 | 64 |
| 83 #endif /* not emacs */ | 65 #endif /* not emacs */ |
| 66 | |
| 84 | 67 |
| 85 #include "getpagesize.h" | 68 #include "getpagesize.h" |
| 86 | 69 |
| 87 #define NIL ((POINTER) 0) | 70 #define NIL ((POINTER) 0) |
| 88 | 71 |
| 89 /* A flag to indicate whether we have initialized ralloc yet. For | 72 /* A flag to indicate whether we have initialized ralloc yet. For |
| 90 Emacs's sake, please do not make this local to malloc_init; on some | 73 Emacs's sake, please do not make this local to malloc_init; on some |
| 91 machines, the dumping procedure makes all static variables | 74 machines, the dumping procedure makes all static variables |
| 92 read-only. On these machines, the word static is #defined to be | 75 read-only. On these machines, the word static is #defined to be |
| 93 the empty string, meaning that r_alloc_initialized becomes an | 76 the empty string, meaning that r_alloc_initialized becomes an |
| 94 automatic variable, and loses its value each time Emacs is started up. */ | 77 automatic variable, and loses its value each time Emacs is started |
| 78 up. */ | |
| 79 | |
| 95 static int r_alloc_initialized = 0; | 80 static int r_alloc_initialized = 0; |
| 96 | 81 |
| 97 static void r_alloc_init (); | 82 static void r_alloc_init (); |
| 83 | |
| 98 | 84 |
| 99 /* Declarations for working with the malloc, ralloc, and system breaks. */ | 85 /* Declarations for working with the malloc, ralloc, and system breaks. */ |
| 100 | 86 |
| 101 /* Function to set the real break value. */ | 87 /* Function to set the real break value. */ |
| 102 POINTER (*real_morecore) (); | 88 POINTER (*real_morecore) (); |
| 124 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1))) | 110 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1))) |
| 125 | 111 |
| 126 #define MEM_ALIGN sizeof(double) | 112 #define MEM_ALIGN sizeof(double) |
| 127 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \ | 113 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \ |
| 128 & ~(MEM_ALIGN - 1)) | 114 & ~(MEM_ALIGN - 1)) |
| 115 | |
| 129 | 116 |
| 117 /*********************************************************************** | |
| 118 Implementation using sbrk | |
| 119 ***********************************************************************/ | |
| 120 | |
| 130 /* Data structures of heaps and blocs. */ | 121 /* Data structures of heaps and blocs. */ |
| 131 | 122 |
| 132 /* The relocatable objects, or blocs, and the malloc data | 123 /* The relocatable objects, or blocs, and the malloc data |
| 133 both reside within one or more heaps. | 124 both reside within one or more heaps. |
| 134 Each heap contains malloc data, running from `start' to `bloc_start', | 125 Each heap contains malloc data, running from `start' to `bloc_start', |
| 932 relinquish (); | 923 relinquish (); |
| 933 | 924 |
| 934 return address; | 925 return address; |
| 935 } | 926 } |
| 936 | 927 |
| 928 #ifndef REL_ALLOC_MMAP | |
| 929 | |
| 937 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to | 930 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to |
| 938 the data is returned in *PTR. PTR is thus the address of some variable | 931 the data is returned in *PTR. PTR is thus the address of some variable |
| 939 which will use the data area. | 932 which will use the data area. |
| 940 | 933 |
| 941 The allocation of 0 bytes is valid. | 934 The allocation of 0 bytes is valid. |
| 1108 else | 1101 else |
| 1109 b = &(*b)->next; | 1102 b = &(*b)->next; |
| 1110 } | 1103 } |
| 1111 } | 1104 } |
| 1112 | 1105 |
| 1113 | |
| 1114 /* The hook `malloc' uses for the function which gets more space | |
| 1115 from the system. */ | |
| 1116 extern POINTER (*__morecore) (); | |
| 1117 | |
| 1118 /* Initialize various things for memory allocation. */ | |
| 1119 | |
| 1120 static void | |
| 1121 r_alloc_init () | |
| 1122 { | |
| 1123 if (r_alloc_initialized) | |
| 1124 return; | |
| 1125 | |
| 1126 r_alloc_initialized = 1; | |
| 1127 real_morecore = __morecore; | |
| 1128 __morecore = r_alloc_sbrk; | |
| 1129 | |
| 1130 first_heap = last_heap = &heap_base; | |
| 1131 first_heap->next = first_heap->prev = NIL_HEAP; | |
| 1132 first_heap->start = first_heap->bloc_start | |
| 1133 = virtual_break_value = break_value = (*real_morecore) (0); | |
| 1134 if (break_value == NIL) | |
| 1135 abort (); | |
| 1136 | |
| 1137 page_size = PAGE; | |
| 1138 extra_bytes = ROUNDUP (50000); | |
| 1139 | |
| 1140 #ifdef DOUG_LEA_MALLOC | |
| 1141 mallopt (M_TOP_PAD, 64 * 4096); | |
| 1142 #else | |
| 1143 /* Give GNU malloc's morecore some hysteresis | |
| 1144 so that we move all the relocatable blocks much less often. */ | |
| 1145 __malloc_extra_blocks = 64; | |
| 1146 #endif | |
| 1147 | |
| 1148 first_heap->end = (POINTER) ROUNDUP (first_heap->start); | |
| 1149 | |
| 1150 /* The extra call to real_morecore guarantees that the end of the | |
| 1151 address space is a multiple of page_size, even if page_size is | |
| 1152 not really the page size of the system running the binary in | |
| 1153 which page_size is stored. This allows a binary to be built on a | |
| 1154 system with one page size and run on a system with a smaller page | |
| 1155 size. */ | |
| 1156 (*real_morecore) (first_heap->end - first_heap->start); | |
| 1157 | |
| 1158 /* Clear the rest of the last page; this memory is in our address space | |
| 1159 even though it is after the sbrk value. */ | |
| 1160 /* Doubly true, with the additional call that explicitly adds the | |
| 1161 rest of that page to the address space. */ | |
| 1162 bzero (first_heap->start, first_heap->end - first_heap->start); | |
| 1163 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end; | |
| 1164 use_relocatable_buffers = 1; | |
| 1165 } | |
| 1166 | 1106 |
| 1167 #if defined (emacs) && defined (DOUG_LEA_MALLOC) | 1107 #if defined (emacs) && defined (DOUG_LEA_MALLOC) |
| 1168 | 1108 |
| 1169 /* Reinitialize the morecore hook variables after restarting a dumped | 1109 /* Reinitialize the morecore hook variables after restarting a dumped |
| 1170 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */ | 1110 Emacs. This is needed when using Doug Lea's malloc from GNU libc. */ |
| 1177 { | 1117 { |
| 1178 real_morecore = __morecore; | 1118 real_morecore = __morecore; |
| 1179 __morecore = r_alloc_sbrk; | 1119 __morecore = r_alloc_sbrk; |
| 1180 } | 1120 } |
| 1181 } | 1121 } |
| 1182 #endif | 1122 |
| 1123 #endif /* emacs && DOUG_LEA_MALLOC */ | |
| 1183 | 1124 |
| 1184 #ifdef DEBUG | 1125 #ifdef DEBUG |
| 1126 | |
| 1185 #include <assert.h> | 1127 #include <assert.h> |
| 1186 | 1128 |
| 1187 void | 1129 void |
| 1188 r_alloc_check () | 1130 r_alloc_check () |
| 1189 { | 1131 { |
| 1269 if (last_bloc) | 1211 if (last_bloc) |
| 1270 assert (last_bloc->data + last_bloc->size == break_value); | 1212 assert (last_bloc->data + last_bloc->size == break_value); |
| 1271 else | 1213 else |
| 1272 assert (first_heap->bloc_start == break_value); | 1214 assert (first_heap->bloc_start == break_value); |
| 1273 } | 1215 } |
| 1216 | |
| 1274 #endif /* DEBUG */ | 1217 #endif /* DEBUG */ |
| 1218 | |
| 1219 #endif /* not REL_ALLOC_MMAP */ | |
| 1220 | |
| 1221 | |
| 1222 /*********************************************************************** | |
| 1223 Implementation based on mmap | |
| 1224 ***********************************************************************/ | |
| 1225 | |
| 1226 #ifdef REL_ALLOC_MMAP | |
| 1227 | |
| 1228 #include <sys/types.h> | |
| 1229 #include <sys/mman.h> | |
| 1230 #include <stdio.h> | |
| 1231 #include <errno.h> | |
| 1232 | |
| 1233 /* Memory is allocated in regions which are mapped using mmap(2). | |
| 1234 The current implementation let's the system select mapped | |
| 1235 addresses; we're not using MAP_FIXED in general, except when | |
| 1236 trying to enlarge regions. | |
| 1237 | |
| 1238 Each mapped region starts with a mmap_region structure, the user | |
| 1239 area starts after that structure, aligned to MEM_ALIGN. | |
| 1240 | |
| 1241 +-----------------------+ | |
| 1242 | struct mmap_info + | | |
| 1243 | padding | | |
| 1244 +-----------------------+ | |
| 1245 | user data | | |
| 1246 | | | |
| 1247 | | | |
| 1248 +-----------------------+ */ | |
| 1249 | |
| 1250 struct mmap_region | |
| 1251 { | |
| 1252 /* User-specified size. */ | |
| 1253 size_t nbytes_specified; | |
| 1254 | |
| 1255 /* Number of bytes mapped */ | |
| 1256 size_t nbytes_mapped; | |
| 1257 | |
| 1258 /* Pointer to the location holding the address of the memory | |
| 1259 allocated with the mmap'd block. The variable actually points | |
| 1260 after this structure. */ | |
| 1261 POINTER_TYPE **var; | |
| 1262 | |
| 1263 /* Next and previous in list of all mmap'd regions. */ | |
| 1264 struct mmap_region *next, *prev; | |
| 1265 }; | |
| 1266 | |
| 1267 /* Doubly-linked list of mmap'd regions. */ | |
| 1268 | |
| 1269 static struct mmap_region *mmap_regions; | |
| 1270 | |
| 1271 /* Temporary storage for mmap_set_vars, see there. */ | |
| 1272 | |
| 1273 static struct mmap_region *mmap_regions_1; | |
| 1274 | |
| 1275 /* Value is X rounded up to the next multiple of N. */ | |
| 1276 | |
| 1277 #define ROUND(X, N) (((X) + (N) - 1) / (N) * (N)) | |
| 1278 | |
| 1279 /* Size of mmap_region structure plus padding. */ | |
| 1280 | |
| 1281 #define MMAP_REGION_STRUCT_SIZE \ | |
| 1282 ROUND (sizeof (struct mmap_region), MEM_ALIGN) | |
| 1283 | |
| 1284 /* Given a pointer P to the start of the user-visible part of a mapped | |
| 1285 region, return a pointer to the start of the region. */ | |
| 1286 | |
| 1287 #define MMAP_REGION(P) \ | |
| 1288 ((struct mmap_region *) ((char *) (P) - MMAP_REGION_STRUCT_SIZE)) | |
| 1289 | |
| 1290 /* Given a pointer P to the start of a mapped region, return a pointer | |
| 1291 to the start of the user-visible part of the region. */ | |
| 1292 | |
| 1293 #define MMAP_USER_AREA(P) \ | |
| 1294 ((POINTER_TYPE *) ((char *) (P) + MMAP_REGION_STRUCT_SIZE)) | |
| 1295 | |
| 1296 /* Function prototypes. */ | |
| 1297 | |
| 1298 static int mmap_free P_ ((struct mmap_region *)); | |
| 1299 static int mmap_enlarge P_ ((struct mmap_region *, int)); | |
| 1300 static struct mmap_region *mmap_find P_ ((POINTER_TYPE *, POINTER_TYPE *)); | |
| 1301 POINTER_TYPE *r_alloc P_ ((POINTER_TYPE **, size_t)); | |
| 1302 POINTER_TYPE *r_re_alloc P_ ((POINTER_TYPE **, size_t)); | |
| 1303 void r_alloc_free P_ ((POINTER_TYPE **ptr)); | |
| 1304 | |
| 1305 | |
| 1306 /* Return a region overlapping with the address range START...END, or | |
| 1307 null if none. */ | |
| 1308 | |
| 1309 static struct mmap_region * | |
| 1310 mmap_find (start, end) | |
| 1311 POINTER_TYPE *start, *end; | |
| 1312 { | |
| 1313 struct mmap_region *r; | |
| 1314 char *s = (char *) start, *e = (char *) end; | |
| 1315 | |
| 1316 for (r = mmap_regions; r; r = r->next) | |
| 1317 { | |
| 1318 char *rstart = (char *) r; | |
| 1319 char *rend = rstart + r->nbytes_mapped; | |
| 1320 | |
| 1321 if ((s >= rstart && s < rend) | |
| 1322 || (e >= rstart && e < rend) | |
| 1323 || (rstart >= s && rstart < e) | |
| 1324 || (rend >= s && rend < e)) | |
| 1325 break; | |
| 1326 } | |
| 1327 | |
| 1328 return r; | |
| 1329 } | |
| 1330 | |
| 1331 | |
| 1332 /* Unmap a region. P is a pointer to the start of the user-araa of | |
| 1333 the region. Value is non-zero if successful. */ | |
| 1334 | |
| 1335 static int | |
| 1336 mmap_free (r) | |
| 1337 struct mmap_region *r; | |
| 1338 { | |
| 1339 if (r->next) | |
| 1340 r->next->prev = r->prev; | |
| 1341 if (r->prev) | |
| 1342 r->prev->next = r->next; | |
| 1343 else | |
| 1344 mmap_regions = r->next; | |
| 1345 | |
| 1346 if (munmap (r, r->nbytes_mapped) == -1) | |
| 1347 { | |
| 1348 fprintf (stderr, "munmap: %s\n", emacs_strerror (errno)); | |
| 1349 return 0; | |
| 1350 } | |
| 1351 | |
| 1352 return 1; | |
| 1353 } | |
| 1354 | |
| 1355 | |
| 1356 /* Enlarge region R by NPAGES pages. NPAGES < 0 means shrink R. | |
| 1357 Value is non-zero if successful. */ | |
| 1358 | |
| 1359 static int | |
| 1360 mmap_enlarge (r, npages) | |
| 1361 struct mmap_region *r; | |
| 1362 int npages; | |
| 1363 { | |
| 1364 char *region_end = (char *) r + r->nbytes_mapped; | |
| 1365 size_t nbytes; | |
| 1366 int success = 1; | |
| 1367 | |
| 1368 if (npages < 0) | |
| 1369 { | |
| 1370 /* Unmap pages at the end of the region. */ | |
| 1371 nbytes = - npages * page_size; | |
| 1372 if (munmap (region_end - nbytes, nbytes) == -1) | |
| 1373 { | |
| 1374 fprintf (stderr, "munmap: %s\n", emacs_strerror (errno)); | |
| 1375 success = 0; | |
| 1376 } | |
| 1377 else | |
| 1378 r->nbytes_mapped -= nbytes; | |
| 1379 } | |
| 1380 else if (npages > 0) | |
| 1381 { | |
| 1382 /* Try to map additional pages at the end of the region. We | |
| 1383 cannot do this if the address range is already occupied by | |
| 1384 something else because mmap deletes any previous mapping. | |
| 1385 I'm not sure this is worth doing, let's see. */ | |
| 1386 if (mmap_find (region_end, region_end + nbytes)) | |
| 1387 success = 0; | |
| 1388 else | |
| 1389 { | |
| 1390 POINTER_TYPE *p; | |
| 1391 | |
| 1392 nbytes = npages * page_size; | |
| 1393 p = mmap (region_end, nbytes, PROT_READ | PROT_WRITE, | |
| 1394 MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0); | |
| 1395 if (p == MAP_FAILED) | |
| 1396 { | |
| 1397 fprintf (stderr, "mmap: %s\n", emacs_strerror (errno)); | |
| 1398 success = 0; | |
| 1399 } | |
| 1400 else if (p != (POINTER_TYPE *) region_end) | |
| 1401 { | |
| 1402 /* Kernels are free to choose a different address. In | |
| 1403 that case, unmap what we've mapped above; we have | |
| 1404 no use for it. */ | |
| 1405 if (munmap (p, nbytes) == -1) | |
| 1406 fprintf (stderr, "munmap: %s\n", emacs_strerror (errno)); | |
| 1407 success = 0; | |
| 1408 } | |
| 1409 else | |
| 1410 r->nbytes_mapped += nbytes; | |
| 1411 } | |
| 1412 | |
| 1413 success = 0; | |
| 1414 } | |
| 1415 | |
| 1416 return success; | |
| 1417 } | |
| 1418 | |
| 1419 | |
| 1420 /* Set or reset variables holding references to mapped regions. If | |
| 1421 RESTORE_P is zero, set all variables to null. If RESTORE_P is | |
| 1422 non-zero, set all variables to the start of the user-areas | |
| 1423 of mapped regions. | |
| 1424 | |
| 1425 This function is called from Fdump_emacs to ensure that the dumped | |
| 1426 Emacs doesn't contain references to memory that won't be mapped | |
| 1427 when Emacs starts. */ | |
| 1428 | |
| 1429 void | |
| 1430 mmap_set_vars (restore_p) | |
| 1431 int restore_p; | |
| 1432 { | |
| 1433 struct mmap_region *r; | |
| 1434 | |
| 1435 if (restore_p) | |
| 1436 { | |
| 1437 mmap_regions = mmap_regions_1; | |
| 1438 for (r = mmap_regions; r; r = r->next) | |
| 1439 *r->var = MMAP_USER_AREA (r); | |
| 1440 } | |
| 1441 else | |
| 1442 { | |
| 1443 for (r = mmap_regions; r; r = r->next) | |
| 1444 *r->var = NULL; | |
| 1445 mmap_regions_1 = mmap_regions; | |
| 1446 mmap_regions = NULL; | |
| 1447 } | |
| 1448 } | |
| 1449 | |
| 1450 | |
| 1451 /* Return total number of bytes mapped. */ | |
| 1452 | |
| 1453 size_t | |
| 1454 mmap_mapped_bytes () | |
| 1455 { | |
| 1456 struct mmap_region *r; | |
| 1457 size_t n = 0; | |
| 1458 | |
| 1459 for (r = mmap_regions; r; r = r->next) | |
| 1460 n += r->nbytes_mapped; | |
| 1461 | |
| 1462 return n; | |
| 1463 } | |
| 1464 | |
| 1465 | |
| 1466 /* Allocate a block of storage large enough to hold NBYTES bytes of | |
| 1467 data. A pointer to the data is returned in *VAR. VAR is thus the | |
| 1468 address of some variable which will use the data area. | |
| 1469 | |
| 1470 The allocation of 0 bytes is valid. | |
| 1471 | |
| 1472 If we can't allocate the necessary memory, set *VAR to null, and | |
| 1473 return null. */ | |
| 1474 | |
| 1475 POINTER_TYPE * | |
| 1476 r_alloc (var, nbytes) | |
| 1477 POINTER_TYPE **var; | |
| 1478 size_t nbytes; | |
| 1479 { | |
| 1480 void *p; | |
| 1481 size_t map; | |
| 1482 | |
| 1483 if (!r_alloc_initialized) | |
| 1484 r_alloc_init (); | |
| 1485 | |
| 1486 map = ROUND (nbytes + MMAP_REGION_STRUCT_SIZE, page_size); | |
| 1487 p = mmap (NULL, map, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); | |
| 1488 | |
| 1489 if (p == MAP_FAILED) | |
| 1490 { | |
| 1491 if (errno != ENOMEM) | |
| 1492 fprintf (stderr, "mmap: %s\n", emacs_strerror (errno)); | |
| 1493 p = NULL; | |
| 1494 } | |
| 1495 else | |
| 1496 { | |
| 1497 struct mmap_region *r = (struct mmap_region *) p; | |
| 1498 | |
| 1499 r->nbytes_specified = nbytes; | |
| 1500 r->nbytes_mapped = map; | |
| 1501 r->var = var; | |
| 1502 r->prev = NULL; | |
| 1503 r->next = mmap_regions; | |
| 1504 if (r->next) | |
| 1505 r->next->prev = r; | |
| 1506 mmap_regions = r; | |
| 1507 | |
| 1508 p = MMAP_USER_AREA (p); | |
| 1509 } | |
| 1510 | |
| 1511 return *var = p; | |
| 1512 } | |
| 1513 | |
| 1514 | |
| 1515 /* Given a pointer at address VAR to data allocated with r_alloc, | |
| 1516 resize it to size NBYTES. Change *VAR to reflect the new block, | |
| 1517 and return this value. If more memory cannot be allocated, then | |
| 1518 leave *VAR unchanged, and return null. */ | |
| 1519 | |
| 1520 POINTER_TYPE * | |
| 1521 r_re_alloc (var, nbytes) | |
| 1522 POINTER_TYPE **var; | |
| 1523 size_t nbytes; | |
| 1524 { | |
| 1525 POINTER_TYPE *result; | |
| 1526 | |
| 1527 if (!r_alloc_initialized) | |
| 1528 r_alloc_init (); | |
| 1529 | |
| 1530 if (*var == NULL) | |
| 1531 result = r_alloc (var, nbytes); | |
| 1532 else if (nbytes == 0) | |
| 1533 { | |
| 1534 r_alloc_free (var); | |
| 1535 result = r_alloc (var, nbytes); | |
| 1536 } | |
| 1537 else | |
| 1538 { | |
| 1539 struct mmap_region *r = MMAP_REGION (*var); | |
| 1540 size_t room = r->nbytes_mapped - MMAP_REGION_STRUCT_SIZE; | |
| 1541 | |
| 1542 if (room < nbytes) | |
| 1543 { | |
| 1544 /* Must enlarge. */ | |
| 1545 POINTER_TYPE *old_ptr = *var; | |
| 1546 | |
| 1547 /* Try to map additional pages at the end of the region. | |
| 1548 If that fails, allocate a new region, copy data | |
| 1549 from the old region, then free it. */ | |
| 1550 if (mmap_enlarge (r, ROUND (nbytes - room, page_size))) | |
| 1551 { | |
| 1552 r->nbytes_specified = nbytes; | |
| 1553 *var = result = old_ptr; | |
| 1554 } | |
| 1555 else if (r_alloc (var, nbytes)) | |
| 1556 { | |
| 1557 bcopy (old_ptr, *var, r->nbytes_specified); | |
| 1558 mmap_free (MMAP_REGION (old_ptr)); | |
| 1559 result = *var; | |
| 1560 r = MMAP_REGION (result); | |
| 1561 r->nbytes_specified = nbytes; | |
| 1562 } | |
| 1563 else | |
| 1564 { | |
| 1565 *var = old_ptr; | |
| 1566 result = NULL; | |
| 1567 } | |
| 1568 } | |
| 1569 else if (room - nbytes >= page_size) | |
| 1570 { | |
| 1571 /* Shrinking by at least a page. Let's give some | |
| 1572 memory back to the system. */ | |
| 1573 mmap_enlarge (r, - (room - nbytes) / page_size); | |
| 1574 result = *var; | |
| 1575 r->nbytes_specified = nbytes; | |
| 1576 } | |
| 1577 else | |
| 1578 { | |
| 1579 /* Leave it alone. */ | |
| 1580 result = *var; | |
| 1581 r->nbytes_specified = nbytes; | |
| 1582 } | |
| 1583 } | |
| 1584 | |
| 1585 return result; | |
| 1586 } | |
| 1587 | |
| 1588 | |
| 1589 /* Free a block of relocatable storage whose data is pointed to by | |
| 1590 PTR. Store 0 in *PTR to show there's no block allocated. */ | |
| 1591 | |
| 1592 void | |
| 1593 r_alloc_free (var) | |
| 1594 POINTER_TYPE **var; | |
| 1595 { | |
| 1596 if (!r_alloc_initialized) | |
| 1597 r_alloc_init (); | |
| 1598 | |
| 1599 if (*var) | |
| 1600 { | |
| 1601 mmap_free (MMAP_REGION (*var)); | |
| 1602 *var = NULL; | |
| 1603 } | |
| 1604 } | |
| 1605 | |
| 1606 #endif /* REL_ALLOC_MMAP */ | |
| 1607 | |
| 1608 | |
| 1609 | |
| 1610 /*********************************************************************** | |
| 1611 Initialization | |
| 1612 ***********************************************************************/ | |
| 1613 | |
| 1614 /* The hook `malloc' uses for the function which gets more space | |
| 1615 from the system. */ | |
| 1616 | |
| 1617 extern POINTER (*__morecore) (); | |
| 1618 | |
| 1619 /* Initialize various things for memory allocation. */ | |
| 1620 | |
| 1621 static void | |
| 1622 r_alloc_init () | |
| 1623 { | |
| 1624 if (r_alloc_initialized) | |
| 1625 return; | |
| 1626 | |
| 1627 r_alloc_initialized = 1; | |
| 1628 real_morecore = __morecore; | |
| 1629 __morecore = r_alloc_sbrk; | |
| 1630 | |
| 1631 first_heap = last_heap = &heap_base; | |
| 1632 first_heap->next = first_heap->prev = NIL_HEAP; | |
| 1633 first_heap->start = first_heap->bloc_start | |
| 1634 = virtual_break_value = break_value = (*real_morecore) (0); | |
| 1635 if (break_value == NIL) | |
| 1636 abort (); | |
| 1637 | |
| 1638 page_size = PAGE; | |
| 1639 extra_bytes = ROUNDUP (50000); | |
| 1640 | |
| 1641 #ifdef DOUG_LEA_MALLOC | |
| 1642 mallopt (M_TOP_PAD, 64 * 4096); | |
| 1643 #else | |
| 1644 /* Give GNU malloc's morecore some hysteresis | |
| 1645 so that we move all the relocatable blocks much less often. */ | |
| 1646 __malloc_extra_blocks = 64; | |
| 1647 #endif | |
| 1648 | |
| 1649 first_heap->end = (POINTER) ROUNDUP (first_heap->start); | |
| 1650 | |
| 1651 /* The extra call to real_morecore guarantees that the end of the | |
| 1652 address space is a multiple of page_size, even if page_size is | |
| 1653 not really the page size of the system running the binary in | |
| 1654 which page_size is stored. This allows a binary to be built on a | |
| 1655 system with one page size and run on a system with a smaller page | |
| 1656 size. */ | |
| 1657 (*real_morecore) (first_heap->end - first_heap->start); | |
| 1658 | |
| 1659 /* Clear the rest of the last page; this memory is in our address space | |
| 1660 even though it is after the sbrk value. */ | |
| 1661 /* Doubly true, with the additional call that explicitly adds the | |
| 1662 rest of that page to the address space. */ | |
| 1663 bzero (first_heap->start, first_heap->end - first_heap->start); | |
| 1664 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end; | |
| 1665 use_relocatable_buffers = 1; | |
| 1666 } |
