Mercurial > emacs
comparison src/ralloc.c @ 49600:23a1cea22d13
Trailing whitespace deleted.
| author | Juanma Barranquero <lekktu@gmail.com> |
|---|---|
| date | Tue, 04 Feb 2003 14:56:31 +0000 |
| parents | 72f30168f26c |
| children | 695cf19ef79e d7ddb3e565de |
comparison
equal
deleted
inserted
replaced
| 49599:5ade352e8d1c | 49600:23a1cea22d13 |
|---|---|
| 1 /* Block-relocating memory allocator. | 1 /* Block-relocating memory allocator. |
| 2 Copyright (C) 1993, 1995, 2000 Free Software Foundation, Inc. | 2 Copyright (C) 1993, 1995, 2000 Free Software Foundation, Inc. |
| 3 | 3 |
| 4 This file is part of GNU Emacs. | 4 This file is part of GNU Emacs. |
| 5 | 5 |
| 6 GNU Emacs is free software; you can redistribute it and/or modify | 6 GNU Emacs is free software; you can redistribute it and/or modify |
| 40 overlap. */ | 40 overlap. */ |
| 41 | 41 |
| 42 extern void safe_bcopy (); | 42 extern void safe_bcopy (); |
| 43 | 43 |
| 44 #ifdef DOUG_LEA_MALLOC | 44 #ifdef DOUG_LEA_MALLOC |
| 45 #define M_TOP_PAD -2 | 45 #define M_TOP_PAD -2 |
| 46 extern int mallopt (); | 46 extern int mallopt (); |
| 47 #else /* not DOUG_LEA_MALLOC */ | 47 #else /* not DOUG_LEA_MALLOC */ |
| 48 #ifndef SYSTEM_MALLOC | 48 #ifndef SYSTEM_MALLOC |
| 49 extern size_t __malloc_extra_blocks; | 49 extern size_t __malloc_extra_blocks; |
| 50 #endif /* SYSTEM_MALLOC */ | 50 #endif /* SYSTEM_MALLOC */ |
| 96 static POINTER break_value; | 96 static POINTER break_value; |
| 97 | 97 |
| 98 /* This is the size of a page. We round memory requests to this boundary. */ | 98 /* This is the size of a page. We round memory requests to this boundary. */ |
| 99 static int page_size; | 99 static int page_size; |
| 100 | 100 |
| 101 /* Whenever we get memory from the system, get this many extra bytes. This | 101 /* Whenever we get memory from the system, get this many extra bytes. This |
| 102 must be a multiple of page_size. */ | 102 must be a multiple of page_size. */ |
| 103 static int extra_bytes; | 103 static int extra_bytes; |
| 104 | 104 |
| 105 /* Macros for rounding. Note that rounding to any value is possible | 105 /* Macros for rounding. Note that rounding to any value is possible |
| 106 by changing the definition of PAGE. */ | 106 by changing the definition of PAGE. */ |
| 139 but they never move. | 139 but they never move. |
| 140 | 140 |
| 141 We try to make just one heap and make it larger as necessary. | 141 We try to make just one heap and make it larger as necessary. |
| 142 But sometimes we can't do that, because we can't get contiguous | 142 But sometimes we can't do that, because we can't get contiguous |
| 143 space to add onto the heap. When that happens, we start a new heap. */ | 143 space to add onto the heap. When that happens, we start a new heap. */ |
| 144 | 144 |
| 145 typedef struct heap | 145 typedef struct heap |
| 146 { | 146 { |
| 147 struct heap *next; | 147 struct heap *next; |
| 148 struct heap *prev; | 148 struct heap *prev; |
| 149 /* Start of memory range of this heap. */ | 149 /* Start of memory range of this heap. */ |
| 172 static heap_ptr first_heap, last_heap; | 172 static heap_ptr first_heap, last_heap; |
| 173 | 173 |
| 174 /* These structures are allocated in the malloc arena. | 174 /* These structures are allocated in the malloc arena. |
| 175 The linked list is kept in order of increasing '.data' members. | 175 The linked list is kept in order of increasing '.data' members. |
| 176 The data blocks abut each other; if b->next is non-nil, then | 176 The data blocks abut each other; if b->next is non-nil, then |
| 177 b->data + b->size == b->next->data. | 177 b->data + b->size == b->next->data. |
| 178 | 178 |
| 179 An element with variable==NIL denotes a freed block, which has not yet | 179 An element with variable==NIL denotes a freed block, which has not yet |
| 180 been collected. They may only appear while r_alloc_freeze > 0, and will be | 180 been collected. They may only appear while r_alloc_freeze > 0, and will be |
| 181 freed when the arena is thawed. Currently, these blocs are not reusable, | 181 freed when the arena is thawed. Currently, these blocs are not reusable, |
| 182 while the arena is frozen. Very inefficient. */ | 182 while the arena is frozen. Very inefficient. */ |
| 465 } | 465 } |
| 466 | 466 |
| 467 /* Calculate new locations of blocs in the list beginning with BLOC, | 467 /* Calculate new locations of blocs in the list beginning with BLOC, |
| 468 relocating it to start at ADDRESS, in heap HEAP. If enough space is | 468 relocating it to start at ADDRESS, in heap HEAP. If enough space is |
| 469 not presently available in our reserve, call obtain for | 469 not presently available in our reserve, call obtain for |
| 470 more space. | 470 more space. |
| 471 | 471 |
| 472 Store the new location of each bloc in its new_data field. | 472 Store the new location of each bloc in its new_data field. |
| 473 Do not touch the contents of blocs or break_value. */ | 473 Do not touch the contents of blocs or break_value. */ |
| 474 | 474 |
| 475 static int | 475 static int |
| 476 relocate_blocs (bloc, heap, address) | 476 relocate_blocs (bloc, heap, address) |
| 479 POINTER address; | 479 POINTER address; |
| 480 { | 480 { |
| 481 register bloc_ptr b = bloc; | 481 register bloc_ptr b = bloc; |
| 482 | 482 |
| 483 /* No need to ever call this if arena is frozen, bug somewhere! */ | 483 /* No need to ever call this if arena is frozen, bug somewhere! */ |
| 484 if (r_alloc_freeze_level) | 484 if (r_alloc_freeze_level) |
| 485 abort(); | 485 abort(); |
| 486 | 486 |
| 487 while (b) | 487 while (b) |
| 488 { | 488 { |
| 489 /* If bloc B won't fit within HEAP, | 489 /* If bloc B won't fit within HEAP, |
| 504 register SIZE s = 0; | 504 register SIZE s = 0; |
| 505 | 505 |
| 506 /* Add up the size of all the following blocs. */ | 506 /* Add up the size of all the following blocs. */ |
| 507 while (tb != NIL_BLOC) | 507 while (tb != NIL_BLOC) |
| 508 { | 508 { |
| 509 if (tb->variable) | 509 if (tb->variable) |
| 510 s += tb->size; | 510 s += tb->size; |
| 511 | 511 |
| 512 tb = tb->next; | 512 tb = tb->next; |
| 513 } | 513 } |
| 514 | 514 |
| 521 } | 521 } |
| 522 | 522 |
| 523 /* Record the new address of this bloc | 523 /* Record the new address of this bloc |
| 524 and update where the next bloc can start. */ | 524 and update where the next bloc can start. */ |
| 525 b->new_data = address; | 525 b->new_data = address; |
| 526 if (b->variable) | 526 if (b->variable) |
| 527 address = (char *) address + b->size; | 527 address = (char *) address + b->size; |
| 528 b = b->next; | 528 b = b->next; |
| 529 } | 529 } |
| 530 | 530 |
| 531 return 1; | 531 return 1; |
| 637 heap_ptr heap; | 637 heap_ptr heap; |
| 638 POINTER address; | 638 POINTER address; |
| 639 SIZE old_size; | 639 SIZE old_size; |
| 640 | 640 |
| 641 /* No need to ever call this if arena is frozen, bug somewhere! */ | 641 /* No need to ever call this if arena is frozen, bug somewhere! */ |
| 642 if (r_alloc_freeze_level) | 642 if (r_alloc_freeze_level) |
| 643 abort(); | 643 abort(); |
| 644 | 644 |
| 645 if (bloc == NIL_BLOC || size == bloc->size) | 645 if (bloc == NIL_BLOC || size == bloc->size) |
| 646 return 1; | 646 return 1; |
| 647 | 647 |
| 679 { | 679 { |
| 680 if (!b->variable) | 680 if (!b->variable) |
| 681 { | 681 { |
| 682 b->size = 0; | 682 b->size = 0; |
| 683 b->data = b->new_data; | 683 b->data = b->new_data; |
| 684 } | 684 } |
| 685 else | 685 else |
| 686 { | 686 { |
| 687 safe_bcopy (b->data, b->new_data, b->size); | 687 safe_bcopy (b->data, b->new_data, b->size); |
| 688 *b->variable = b->data = b->new_data; | 688 *b->variable = b->data = b->new_data; |
| 689 } | 689 } |
| 690 } | 690 } |
| 706 { | 706 { |
| 707 if (!b->variable) | 707 if (!b->variable) |
| 708 { | 708 { |
| 709 b->size = 0; | 709 b->size = 0; |
| 710 b->data = b->new_data; | 710 b->data = b->new_data; |
| 711 } | 711 } |
| 712 else | 712 else |
| 713 { | 713 { |
| 714 safe_bcopy (b->data, b->new_data, b->size); | 714 safe_bcopy (b->data, b->new_data, b->size); |
| 715 *b->variable = b->data = b->new_data; | 715 *b->variable = b->data = b->new_data; |
| 716 } | 716 } |
| 717 } | 717 } |
| 736 if (r_alloc_freeze_level) | 736 if (r_alloc_freeze_level) |
| 737 { | 737 { |
| 738 bloc->variable = (POINTER *) NIL; | 738 bloc->variable = (POINTER *) NIL; |
| 739 return; | 739 return; |
| 740 } | 740 } |
| 741 | 741 |
| 742 resize_bloc (bloc, 0); | 742 resize_bloc (bloc, 0); |
| 743 | 743 |
| 744 if (bloc == first_bloc && bloc == last_bloc) | 744 if (bloc == first_bloc && bloc == last_bloc) |
| 745 { | 745 { |
| 746 first_bloc = last_bloc = NIL_BLOC; | 746 first_bloc = last_bloc = NIL_BLOC; |
| 792 | 792 |
| 793 If we're out of memory, we should return zero, to imitate the other | 793 If we're out of memory, we should return zero, to imitate the other |
| 794 __morecore hook values - in particular, __default_morecore in the | 794 __morecore hook values - in particular, __default_morecore in the |
| 795 GNU malloc package. */ | 795 GNU malloc package. */ |
| 796 | 796 |
| 797 POINTER | 797 POINTER |
| 798 r_alloc_sbrk (size) | 798 r_alloc_sbrk (size) |
| 799 long size; | 799 long size; |
| 800 { | 800 { |
| 801 register bloc_ptr b; | 801 register bloc_ptr b; |
| 802 POINTER address; | 802 POINTER address; |
| 848 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get); | 848 new_bloc_start = (POINTER) MEM_ROUNDUP ((char *)address + get); |
| 849 | 849 |
| 850 if (first_heap->bloc_start < new_bloc_start) | 850 if (first_heap->bloc_start < new_bloc_start) |
| 851 { | 851 { |
| 852 /* This is no clean solution - no idea how to do it better. */ | 852 /* This is no clean solution - no idea how to do it better. */ |
| 853 if (r_alloc_freeze_level) | 853 if (r_alloc_freeze_level) |
| 854 return NIL; | 854 return NIL; |
| 855 | 855 |
| 856 /* There is a bug here: if the above obtain call succeeded, but the | 856 /* There is a bug here: if the above obtain call succeeded, but the |
| 857 relocate_blocs call below does not succeed, we need to free | 857 relocate_blocs call below does not succeed, we need to free |
| 858 the memory that we got with obtain. */ | 858 the memory that we got with obtain. */ |
| 1016 if (! r_alloc_initialized) | 1016 if (! r_alloc_initialized) |
| 1017 r_alloc_init (); | 1017 r_alloc_init (); |
| 1018 | 1018 |
| 1019 if (!*ptr) | 1019 if (!*ptr) |
| 1020 return r_alloc (ptr, size); | 1020 return r_alloc (ptr, size); |
| 1021 if (!size) | 1021 if (!size) |
| 1022 { | 1022 { |
| 1023 r_alloc_free (ptr); | 1023 r_alloc_free (ptr); |
| 1024 return r_alloc (ptr, 0); | 1024 return r_alloc (ptr, 0); |
| 1025 } | 1025 } |
| 1026 | 1026 |
| 1027 bloc = find_bloc (ptr); | 1027 bloc = find_bloc (ptr); |
| 1028 if (bloc == NIL_BLOC) | 1028 if (bloc == NIL_BLOC) |
| 1029 abort (); | 1029 abort (); |
| 1030 | 1030 |
| 1031 if (size < bloc->size) | 1031 if (size < bloc->size) |
| 1032 { | 1032 { |
| 1033 /* Wouldn't it be useful to actually resize the bloc here? */ | 1033 /* Wouldn't it be useful to actually resize the bloc here? */ |
| 1034 /* I think so too, but not if it's too expensive... */ | 1034 /* I think so too, but not if it's too expensive... */ |
| 1035 if ((bloc->size - MEM_ROUNDUP (size) >= page_size) | 1035 if ((bloc->size - MEM_ROUNDUP (size) >= page_size) |
| 1036 && r_alloc_freeze_level == 0) | 1036 && r_alloc_freeze_level == 0) |
| 1037 { | 1037 { |
| 1038 resize_bloc (bloc, MEM_ROUNDUP (size)); | 1038 resize_bloc (bloc, MEM_ROUNDUP (size)); |
| 1039 /* Never mind if this fails, just do nothing... */ | 1039 /* Never mind if this fails, just do nothing... */ |
| 1040 /* It *should* be infallible! */ | 1040 /* It *should* be infallible! */ |
| 1041 } | 1041 } |
| 1053 bloc->variable = (POINTER *) NIL; | 1053 bloc->variable = (POINTER *) NIL; |
| 1054 } | 1054 } |
| 1055 else | 1055 else |
| 1056 return NIL; | 1056 return NIL; |
| 1057 } | 1057 } |
| 1058 else | 1058 else |
| 1059 { | 1059 { |
| 1060 if (! resize_bloc (bloc, MEM_ROUNDUP (size))) | 1060 if (! resize_bloc (bloc, MEM_ROUNDUP (size))) |
| 1061 return NIL; | 1061 return NIL; |
| 1062 } | 1062 } |
| 1063 } | 1063 } |
| 1089 | 1089 |
| 1090 void | 1090 void |
| 1091 r_alloc_thaw () | 1091 r_alloc_thaw () |
| 1092 { | 1092 { |
| 1093 | 1093 |
| 1094 if (! r_alloc_initialized) | 1094 if (! r_alloc_initialized) |
| 1095 r_alloc_init (); | 1095 r_alloc_init (); |
| 1096 | 1096 |
| 1097 if (--r_alloc_freeze_level < 0) | 1097 if (--r_alloc_freeze_level < 0) |
| 1098 abort (); | 1098 abort (); |
| 1099 | 1099 |
| 1100 /* This frees all unused blocs. It is not too inefficient, as the resize | 1100 /* This frees all unused blocs. It is not too inefficient, as the resize |
| 1101 and bcopy is done only once. Afterwards, all unreferenced blocs are | 1101 and bcopy is done only once. Afterwards, all unreferenced blocs are |
| 1102 already shrunk to zero size. */ | 1102 already shrunk to zero size. */ |
| 1103 if (!r_alloc_freeze_level) | 1103 if (!r_alloc_freeze_level) |
| 1104 { | 1104 { |
| 1105 bloc_ptr *b = &first_bloc; | 1105 bloc_ptr *b = &first_bloc; |
| 1106 while (*b) | 1106 while (*b) |
| 1107 if (!(*b)->variable) | 1107 if (!(*b)->variable) |
| 1108 free_bloc (*b); | 1108 free_bloc (*b); |
| 1109 else | 1109 else |
| 1110 b = &(*b)->next; | 1110 b = &(*b)->next; |
| 1111 } | 1111 } |
| 1112 } | 1112 } |
| 1113 | 1113 |
| 1114 | 1114 |
| 1236 r_alloc_init () | 1236 r_alloc_init () |
| 1237 { | 1237 { |
| 1238 if (r_alloc_initialized) | 1238 if (r_alloc_initialized) |
| 1239 return; | 1239 return; |
| 1240 r_alloc_initialized = 1; | 1240 r_alloc_initialized = 1; |
| 1241 | 1241 |
| 1242 page_size = PAGE; | 1242 page_size = PAGE; |
| 1243 #ifndef SYSTEM_MALLOC | 1243 #ifndef SYSTEM_MALLOC |
| 1244 real_morecore = __morecore; | 1244 real_morecore = __morecore; |
| 1245 __morecore = r_alloc_sbrk; | 1245 __morecore = r_alloc_sbrk; |
| 1246 | 1246 |
| 1281 rest of that page to the address space. */ | 1281 rest of that page to the address space. */ |
| 1282 bzero (first_heap->start, | 1282 bzero (first_heap->start, |
| 1283 (char *) first_heap->end - (char *) first_heap->start); | 1283 (char *) first_heap->end - (char *) first_heap->start); |
| 1284 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end; | 1284 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end; |
| 1285 #endif | 1285 #endif |
| 1286 | 1286 |
| 1287 use_relocatable_buffers = 1; | 1287 use_relocatable_buffers = 1; |
| 1288 } | 1288 } |
