Mercurial > jemalloc
comparison jemalloc.h @ 0:9a44d900ee55
initial import
| author | Yoshiki Yazawa <yaz@honeyplanet.jp> |
|---|---|
| date | Mon, 05 Oct 2009 16:06:43 +0900 |
| parents | |
| children |
comparison
equal
deleted
inserted
replaced
| -1:000000000000 | 0:9a44d900ee55 |
|---|---|
| 1 #ifndef MOZ_MEMORY_WINDOWS | |
| 2 # include <stdbool.h> | |
| 3 #else | |
| 4 # include <windows.h> | |
| 5 # ifndef bool | |
| 6 # define bool BOOL | |
| 7 # endif | |
| 8 #endif | |
| 9 | |
| 10 extern const char *_malloc_options; | |
| 11 | |
| 12 /* | |
| 13 * jemalloc_stats() is not a stable interface. When using jemalloc_stats_t, be | |
| 14 * sure that the compiled results of jemalloc.c are in sync with this header | |
| 15 * file. | |
| 16 */ | |
| 17 typedef struct { | |
| 18 /* | |
| 19 * Run-time configuration settings. | |
| 20 */ | |
| 21 bool opt_abort; /* abort(3) on error? */ | |
| 22 bool opt_junk; /* Fill allocated/free memory with 0xa5/0x5a? */ | |
| 23 bool opt_utrace; /* Trace all allocation events? */ | |
| 24 bool opt_sysv; /* SysV semantics? */ | |
| 25 bool opt_xmalloc; /* abort(3) on OOM? */ | |
| 26 bool opt_zero; /* Fill allocated memory with 0x0? */ | |
| 27 size_t narenas; /* Number of arenas. */ | |
| 28 size_t balance_threshold; /* Arena contention rebalance threshold. */ | |
| 29 size_t quantum; /* Allocation quantum. */ | |
| 30 size_t small_max; /* Max quantum-spaced allocation size. */ | |
| 31 size_t large_max; /* Max sub-chunksize allocation size. */ | |
| 32 size_t chunksize; /* Size of each virtual memory mapping. */ | |
| 33 size_t dirty_max; /* Max dirty pages per arena. */ | |
| 34 size_t reserve_min; /* reserve_low callback threshold. */ | |
| 35 size_t reserve_max; /* Maximum reserve size before unmapping. */ | |
| 36 | |
| 37 /* | |
| 38 * Current memory usage statistics. | |
| 39 */ | |
| 40 size_t mapped; /* Bytes mapped (not necessarily committed). */ | |
| 41 size_t committed; /* Bytes committed (readable/writable). */ | |
| 42 size_t allocated; /* Bytes allocted (in use by application). */ | |
| 43 size_t dirty; /* Bytes dirty (committed unused pages). */ | |
| 44 size_t reserve_cur; /* Current memory reserve. */ | |
| 45 } jemalloc_stats_t; | |
| 46 | |
| 47 #ifndef MOZ_MEMORY_DARWIN | |
| 48 void *malloc(size_t size); | |
| 49 void *valloc(size_t size); | |
| 50 void *calloc(size_t num, size_t size); | |
| 51 void *realloc(void *ptr, size_t size); | |
| 52 void free(void *ptr); | |
| 53 #endif | |
| 54 | |
| 55 int posix_memalign(void **memptr, size_t alignment, size_t size); | |
| 56 void *memalign(size_t alignment, size_t size); | |
| 57 size_t malloc_usable_size(const void *ptr); | |
| 58 void jemalloc_stats(jemalloc_stats_t *stats); | |
| 59 | |
| 60 /* The x*() functions never return NULL. */ | |
| 61 void *xmalloc(size_t size); | |
| 62 void *xcalloc(size_t num, size_t size); | |
| 63 void *xrealloc(void *ptr, size_t size); | |
| 64 void *xmemalign(size_t alignment, size_t size); | |
| 65 | |
| 66 /* | |
| 67 * The allocator maintains a memory reserve that is used to satisfy allocation | |
| 68 * requests when no additional memory can be acquired from the operating | |
| 69 * system. Under normal operating conditions, the reserve size is at least | |
| 70 * reserve_min bytes. If the reserve is depleted or insufficient to satisfy an | |
| 71 * allocation request, then condition notifications are sent to one or more of | |
| 72 * the registered callback functions: | |
| 73 * | |
| 74 * RESERVE_CND_LOW: The reserve had to be used to satisfy an allocation | |
| 75 * request, which dropped the reserve size below the | |
| 76 * minimum. The callee should try to free memory in order | |
| 77 * to restore the reserve. | |
| 78 * | |
| 79 * RESERVE_CND_CRIT: The reserve was not large enough to satisfy a pending | |
| 80 * allocation request. Some callee must free adequate | |
| 81 * memory in order to prevent application failure (unless | |
| 82 * the condition spontaneously desists due to concurrent | |
| 83 * deallocation). | |
| 84 * | |
| 85 * RESERVE_CND_FAIL: An allocation request could not be satisfied, despite all | |
| 86 * attempts. The allocator is about to terminate the | |
| 87 * application. | |
| 88 * | |
| 89 * The order in which the callback functions are called is only loosely | |
| 90 * specified: in the absence of interposing callback | |
| 91 * registrations/unregistrations, enabled callbacks will be called in an | |
| 92 * arbitrary round-robin order. | |
| 93 * | |
| 94 * Condition notifications are sent to callbacks only while conditions exist. | |
| 95 * For example, just before the allocator sends a RESERVE_CND_LOW condition | |
| 96 * notification to a callback, the reserve is in fact depleted. However, due | |
| 97 * to allocator concurrency, the reserve may have been restored by the time the | |
| 98 * callback function executes. Furthermore, if the reserve is restored at some | |
| 99 * point during the delivery of condition notifications to callbacks, no | |
| 100 * further deliveries will occur, since the condition no longer exists. | |
| 101 * | |
| 102 * Callback functions can freely call back into the allocator (i.e. the | |
| 103 * allocator releases all internal resources before calling each callback | |
| 104 * function), though allocation is discouraged, since recursive callbacks are | |
| 105 * likely to result, which places extra burden on the application to avoid | |
| 106 * deadlock. | |
| 107 * | |
| 108 * Callback functions must be thread-safe, since it is possible that multiple | |
| 109 * threads will call into the same callback function concurrently. | |
| 110 */ | |
| 111 | |
| 112 /* Memory reserve condition types. */ | |
| 113 typedef enum { | |
| 114 RESERVE_CND_LOW, | |
| 115 RESERVE_CND_CRIT, | |
| 116 RESERVE_CND_FAIL | |
| 117 } reserve_cnd_t; | |
| 118 | |
| 119 /* | |
| 120 * Reserve condition notification callback function type definition. | |
| 121 * | |
| 122 * Inputs: | |
| 123 * ctx: Opaque application data, as passed to reserve_cb_register(). | |
| 124 * cnd: Condition type being delivered. | |
| 125 * size: Allocation request size for the allocation that caused the condition. | |
| 126 */ | |
| 127 typedef void reserve_cb_t(void *ctx, reserve_cnd_t cnd, size_t size); | |
| 128 | |
| 129 /* | |
| 130 * Register a callback function. | |
| 131 * | |
| 132 * Inputs: | |
| 133 * cb: Callback function pointer. | |
| 134 * ctx: Opaque application data, passed to cb(). | |
| 135 * | |
| 136 * Output: | |
| 137 * ret: If true, failure due to OOM; success otherwise. | |
| 138 */ | |
| 139 bool reserve_cb_register(reserve_cb_t *cb, void *ctx); | |
| 140 | |
| 141 /* | |
| 142 * Unregister a callback function. | |
| 143 * | |
| 144 * Inputs: | |
| 145 * cb: Callback function pointer. | |
| 146 * ctx: Opaque application data, same as that passed to reserve_cb_register(). | |
| 147 * | |
| 148 * Output: | |
| 149 * ret: False upon success, true if the {cb,ctx} registration could not be | |
| 150 * found. | |
| 151 */ | |
| 152 bool reserve_cb_unregister(reserve_cb_t *cb, void *ctx); | |
| 153 | |
| 154 /* | |
| 155 * Get the current reserve size. | |
| 156 * | |
| 157 * ret: Current reserve size. | |
| 158 */ | |
| 159 size_t reserve_cur_get(void); | |
| 160 | |
| 161 /* | |
| 162 * Get the minimum acceptable reserve size. If the reserve drops below this | |
| 163 * value, the RESERVE_CND_LOW condition notification is sent to the callbacks. | |
| 164 * | |
| 165 * ret: Minimum acceptable reserve size. | |
| 166 */ | |
| 167 size_t reserve_min_get(void); | |
| 168 | |
| 169 /* | |
| 170 * Set the minimum acceptable reserve size. | |
| 171 * | |
| 172 * min: Reserve threshold. This value may be internally rounded up. | |
| 173 * ret: False if the reserve was successfully resized; true otherwise. Note | |
| 174 * that failure to resize the reserve also results in a RESERVE_CND_LOW | |
| 175 * condition. | |
| 176 */ | |
| 177 bool reserve_min_set(size_t min); |
