comparison src/gmalloc.c @ 32821:08e5ab6d998f

(state_protected_p, last_state_size, last_heapinfo) [GC_MALLOC_CHECK && GC_PROTECT_MALLOC_STATE]: New variables. (protect_malloc_state) [GC_MALLOC_CHECK && GC_PROTECT_MALLOC_STATE]: New function. (PROTECT_MALLOC_STATE): New macro. (__malloc_initialize, morecore, _malloc_internal) (_free_internal) _realloc_internal): Use it to make _heapinfo read-only outside of gmalloc.
author Gerd Moellmann <gerd@gnu.org>
date Tue, 24 Oct 2000 12:41:02 +0000
parents 8223a86fa594
children c3f68b4d9e37
comparison
equal deleted inserted replaced
32820:d78254659f1d 32821:08e5ab6d998f
378 __malloc_size_t __malloc_extra_blocks; 378 __malloc_size_t __malloc_extra_blocks;
379 379
380 void (*__malloc_initialize_hook) PP ((void)); 380 void (*__malloc_initialize_hook) PP ((void));
381 void (*__after_morecore_hook) PP ((void)); 381 void (*__after_morecore_hook) PP ((void));
382 382
383 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
384
385 /* Some code for hunting a bug writing into _heapinfo.
386
387 Call this macro with argument PROT non-zero to protect internal
388 malloc state against writing to it, call it with a zero argument to
389 make it readable and writable.
390
391 Note that this only works if BLOCKSIZE == page size, which is
392 the case on the i386. */
393
394 #include <sys/types.h>
395 #include <sys/mman.h>
396
397 static int state_protected_p;
398 static __malloc_size_t last_state_size;
399 static malloc_info *last_heapinfo;
400
401 void
402 protect_malloc_state (protect_p)
403 int protect_p;
404 {
405 /* If _heapinfo has been relocated, make sure its old location
406 isn't left read-only; it will be reused by malloc. */
407 if (_heapinfo != last_heapinfo
408 && last_heapinfo
409 && state_protected_p)
410 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
411
412 last_state_size = _heaplimit * sizeof *_heapinfo;
413 last_heapinfo = _heapinfo;
414
415 if (protect_p != state_protected_p)
416 {
417 state_protected_p = protect_p;
418 if (mprotect (_heapinfo, last_state_size,
419 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
420 abort ();
421 }
422 }
423
424 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state(PROT)
425
426 #else
427 #define PROTECT_MALLOC_STATE(PROT) /* empty */
428 #endif
429
383 430
384 /* Aligned allocation. */ 431 /* Aligned allocation. */
385 static __ptr_t align PP ((__malloc_size_t)); 432 static __ptr_t align PP ((__malloc_size_t));
386 static __ptr_t 433 static __ptr_t
387 align (size) 434 align (size)
490 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info)); 537 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
491 538
492 register_heapinfo (); 539 register_heapinfo ();
493 540
494 __malloc_initialized = 1; 541 __malloc_initialized = 1;
542 PROTECT_MALLOC_STATE (1);
495 return 1; 543 return 1;
496 } 544 }
497 545
498 static int morecore_recursing; 546 static int morecore_recursing;
499 547
513 return NULL; 561 return NULL;
514 562
515 result = align (size); 563 result = align (size);
516 if (result == NULL) 564 if (result == NULL)
517 return NULL; 565 return NULL;
566
567 PROTECT_MALLOC_STATE (0);
518 568
519 /* Check if we need to grow the info table. */ 569 /* Check if we need to grow the info table. */
520 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize) 570 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
521 { 571 {
522 /* Calculate the new _heapinfo table size. We do not account for the 572 /* Calculate the new _heapinfo table size. We do not account for the
597 647
598 /* Reset _heaplimit so _free_internal never decides 648 /* Reset _heaplimit so _free_internal never decides
599 it can relocate or resize the info table. */ 649 it can relocate or resize the info table. */
600 _heaplimit = 0; 650 _heaplimit = 0;
601 _free_internal (oldinfo); 651 _free_internal (oldinfo);
652 PROTECT_MALLOC_STATE (0);
602 653
603 /* The new heap limit includes the new table just allocated. */ 654 /* The new heap limit includes the new table just allocated. */
604 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info)); 655 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
605 return result; 656 return result;
606 } 657 }
629 680
630 #if 0 681 #if 0
631 if (size == 0) 682 if (size == 0)
632 return NULL; 683 return NULL;
633 #endif 684 #endif
685
686 PROTECT_MALLOC_STATE (0);
634 687
635 if (size < sizeof (struct list)) 688 if (size < sizeof (struct list))
636 size = sizeof (struct list); 689 size = sizeof (struct list);
637 690
638 #ifdef SUNOS_LOCALTIME_BUG 691 #ifdef SUNOS_LOCALTIME_BUG
678 { 731 {
679 /* No free fragments of the desired size, so get a new block 732 /* No free fragments of the desired size, so get a new block
680 and break it into fragments, returning the first. */ 733 and break it into fragments, returning the first. */
681 #ifdef GC_MALLOC_CHECK 734 #ifdef GC_MALLOC_CHECK
682 result = _malloc_internal (BLOCKSIZE); 735 result = _malloc_internal (BLOCKSIZE);
736 PROTECT_MALLOC_STATE (0);
683 #else 737 #else
684 result = malloc (BLOCKSIZE); 738 result = malloc (BLOCKSIZE);
685 #endif 739 #endif
686 if (result == NULL) 740 if (result == NULL)
687 return NULL; 741 {
742 PROTECT_MALLOC_STATE (1);
743 return NULL;
744 }
688 745
689 /* Link all fragments but the first into the free list. */ 746 /* Link all fragments but the first into the free list. */
690 next = (struct list *) ((char *) result + (1 << log)); 747 next = (struct list *) ((char *) result + (1 << log));
691 next->next = NULL; 748 next->next = NULL;
692 next->prev = &_fraghead[log]; 749 next->prev = &_fraghead[log];
801 adding that adjustment. */ 858 adding that adjustment. */
802 while (--blocks > 0) 859 while (--blocks > 0)
803 _heapinfo[block + blocks].busy.info.size = -blocks; 860 _heapinfo[block + blocks].busy.info.size = -blocks;
804 } 861 }
805 862
863 PROTECT_MALLOC_STATE (1);
806 return result; 864 return result;
807 } 865 }
808 866
809 __ptr_t 867 __ptr_t
810 malloc (size) 868 malloc (size)
911 register struct alignlist *l; 969 register struct alignlist *l;
912 970
913 if (ptr == NULL) 971 if (ptr == NULL)
914 return; 972 return;
915 973
974 PROTECT_MALLOC_STATE (0);
975
916 for (l = _aligned_blocks; l != NULL; l = l->next) 976 for (l = _aligned_blocks; l != NULL; l = l->next)
917 if (l->aligned == ptr) 977 if (l->aligned == ptr)
918 { 978 {
919 l->aligned = NULL; /* Mark the slot in the list as free. */ 979 l->aligned = NULL; /* Mark the slot in the list as free. */
920 ptr = l->exact; 980 ptr = l->exact;
1033 _heapindex = 0; 1093 _heapindex = 0;
1034 1094
1035 /* Allocate new space for the info table and move its data. */ 1095 /* Allocate new space for the info table and move its data. */
1036 newinfo = (malloc_info *) _malloc_internal (info_blocks 1096 newinfo = (malloc_info *) _malloc_internal (info_blocks
1037 * BLOCKSIZE); 1097 * BLOCKSIZE);
1098 PROTECT_MALLOC_STATE (0);
1038 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE); 1099 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1039 _heapinfo = newinfo; 1100 _heapinfo = newinfo;
1040 1101
1041 /* We should now have coalesced the free block with the 1102 /* We should now have coalesced the free block with the
1042 blocks freed from the old info table. Examine the entire 1103 blocks freed from the old info table. Examine the entire
1131 if (prev->next != NULL) 1192 if (prev->next != NULL)
1132 prev->next->prev = prev; 1193 prev->next->prev = prev;
1133 } 1194 }
1134 break; 1195 break;
1135 } 1196 }
1197
1198 PROTECT_MALLOC_STATE (1);
1136 } 1199 }
1137 1200
1138 /* Return memory to the heap. */ 1201 /* Return memory to the heap. */
1139 1202
1140 FREE_RETURN_TYPE 1203 FREE_RETURN_TYPE
1294 else if (ptr == NULL) 1357 else if (ptr == NULL)
1295 return _malloc_internal (size); 1358 return _malloc_internal (size);
1296 1359
1297 block = BLOCK (ptr); 1360 block = BLOCK (ptr);
1298 1361
1362 PROTECT_MALLOC_STATE (0);
1363
1299 type = _heapinfo[block].busy.type; 1364 type = _heapinfo[block].busy.type;
1300 switch (type) 1365 switch (type)
1301 { 1366 {
1302 case 0: 1367 case 0:
1303 /* Maybe reallocate a large block to a small fragment. */ 1368 /* Maybe reallocate a large block to a small fragment. */
1342 /* Prevent free from actually returning memory to the system. */ 1407 /* Prevent free from actually returning memory to the system. */
1343 oldlimit = _heaplimit; 1408 oldlimit = _heaplimit;
1344 _heaplimit = 0; 1409 _heaplimit = 0;
1345 _free_internal (ptr); 1410 _free_internal (ptr);
1346 result = _malloc_internal (size); 1411 result = _malloc_internal (size);
1412 PROTECT_MALLOC_STATE (0);
1347 if (_heaplimit == 0) 1413 if (_heaplimit == 0)
1348 _heaplimit = oldlimit; 1414 _heaplimit = oldlimit;
1349 if (result == NULL) 1415 if (result == NULL)
1350 { 1416 {
1351 /* Now we're really in trouble. We have to unfree 1417 /* Now we're really in trouble. We have to unfree
1385 _free_internal (ptr); 1451 _free_internal (ptr);
1386 } 1452 }
1387 break; 1453 break;
1388 } 1454 }
1389 1455
1456 PROTECT_MALLOC_STATE (1);
1390 return result; 1457 return result;
1391 } 1458 }
1392 1459
1393 __ptr_t 1460 __ptr_t
1394 realloc (ptr, size) 1461 realloc (ptr, size)