summaryrefslogtreecommitdiff
path: root/deps/jemalloc/src
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/src')
-rw-r--r--deps/jemalloc/src/arena.c3863
-rw-r--r--deps/jemalloc/src/atomic.c2
-rw-r--r--deps/jemalloc/src/base.c187
-rw-r--r--deps/jemalloc/src/bitmap.c111
-rw-r--r--deps/jemalloc/src/chunk.c795
-rw-r--r--deps/jemalloc/src/chunk_dss.c238
-rw-r--r--deps/jemalloc/src/chunk_mmap.c78
-rw-r--r--deps/jemalloc/src/ckh.c569
-rw-r--r--deps/jemalloc/src/ctl.c2254
-rw-r--r--deps/jemalloc/src/extent.c77
-rw-r--r--deps/jemalloc/src/hash.c2
-rw-r--r--deps/jemalloc/src/huge.c477
-rw-r--r--deps/jemalloc/src/jemalloc.c2949
-rw-r--r--deps/jemalloc/src/mb.c2
-rw-r--r--deps/jemalloc/src/mutex.c158
-rw-r--r--deps/jemalloc/src/nstime.c194
-rw-r--r--deps/jemalloc/src/pages.c302
-rw-r--r--deps/jemalloc/src/prng.c2
-rw-r--r--deps/jemalloc/src/prof.c2355
-rw-r--r--deps/jemalloc/src/quarantine.c183
-rw-r--r--deps/jemalloc/src/rtree.c132
-rw-r--r--deps/jemalloc/src/spin.c2
-rw-r--r--deps/jemalloc/src/stats.c1154
-rw-r--r--deps/jemalloc/src/tcache.c555
-rw-r--r--deps/jemalloc/src/ticker.c2
-rw-r--r--deps/jemalloc/src/tsd.c197
-rw-r--r--deps/jemalloc/src/util.c666
-rw-r--r--deps/jemalloc/src/valgrind.c34
-rw-r--r--deps/jemalloc/src/witness.c136
-rw-r--r--deps/jemalloc/src/zone.c330
30 files changed, 18006 insertions, 0 deletions
diff --git a/deps/jemalloc/src/arena.c b/deps/jemalloc/src/arena.c
new file mode 100644
index 0000000..648a8da
--- /dev/null
+++ b/deps/jemalloc/src/arena.c
@@ -0,0 +1,3863 @@
+#define JEMALLOC_ARENA_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+purge_mode_t opt_purge = PURGE_DEFAULT;
+const char *purge_mode_names[] = {
+ "ratio",
+ "decay",
+ "N/A"
+};
+ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
+static ssize_t lg_dirty_mult_default;
+ssize_t opt_decay_time = DECAY_TIME_DEFAULT;
+static ssize_t decay_time_default;
+
+arena_bin_info_t arena_bin_info[NBINS];
+
+size_t map_bias;
+size_t map_misc_offset;
+size_t arena_maxrun; /* Max run size for arenas. */
+size_t large_maxclass; /* Max large size class. */
+unsigned nlclasses; /* Number of large size classes. */
+unsigned nhclasses; /* Number of huge size classes. */
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk);
+static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
+ size_t ndirty_limit);
+static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
+ bool dirty, bool cleaned, bool decommitted);
+static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
+static void arena_bin_lower_run(arena_t *arena, arena_run_t *run,
+ arena_bin_t *bin);
+
+/******************************************************************************/
+
+JEMALLOC_INLINE_C size_t
+arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
+{
+ arena_chunk_t *chunk;
+ size_t pageind, mapbits;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+ pageind = arena_miscelm_to_pageind(miscelm);
+ mapbits = arena_mapbits_get(chunk, pageind);
+ return (arena_mapbits_size_decode(mapbits));
+}
+
+JEMALLOC_INLINE_C const extent_node_t *
+arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
+{
+ arena_chunk_t *chunk;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
+ return (&chunk->node);
+}
+
+JEMALLOC_INLINE_C int
+arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
+{
+ size_t a_sn, b_sn;
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
+ b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
+
+ return ((a_sn > b_sn) - (a_sn < b_sn));
+}
+
+JEMALLOC_INLINE_C int
+arena_ad_comp(const arena_chunk_map_misc_t *a,
+ const arena_chunk_map_misc_t *b)
+{
+ uintptr_t a_miscelm = (uintptr_t)a;
+ uintptr_t b_miscelm = (uintptr_t)b;
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
+}
+
+JEMALLOC_INLINE_C int
+arena_snad_comp(const arena_chunk_map_misc_t *a,
+ const arena_chunk_map_misc_t *b)
+{
+ int ret;
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ ret = arena_sn_comp(a, b);
+ if (ret != 0)
+ return (ret);
+
+ ret = arena_ad_comp(a, b);
+ return (ret);
+}
+
+/* Generate pairing heap functions. */
+ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
+ ph_link, arena_snad_comp)
+
+#ifdef JEMALLOC_JET
+#undef run_quantize_floor
+#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
+#endif
+static size_t
+run_quantize_floor(size_t size)
+{
+ size_t ret;
+ pszind_t pind;
+
+ assert(size > 0);
+ assert(size <= HUGE_MAXCLASS);
+ assert((size & PAGE_MASK) == 0);
+
+ assert(size != 0);
+ assert(size == PAGE_CEILING(size));
+
+ pind = psz2ind(size - large_pad + 1);
+ if (pind == 0) {
+ /*
+ * Avoid underflow. This short-circuit would also do the right
+ * thing for all sizes in the range for which there are
+ * PAGE-spaced size classes, but it's simplest to just handle
+ * the one case that would cause erroneous results.
+ */
+ return (size);
+ }
+ ret = pind2sz(pind - 1) + large_pad;
+ assert(ret <= size);
+ return (ret);
+}
+#ifdef JEMALLOC_JET
+#undef run_quantize_floor
+#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
+run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
+#endif
+
+#ifdef JEMALLOC_JET
+#undef run_quantize_ceil
+#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
+#endif
+static size_t
+run_quantize_ceil(size_t size)
+{
+ size_t ret;
+
+ assert(size > 0);
+ assert(size <= HUGE_MAXCLASS);
+ assert((size & PAGE_MASK) == 0);
+
+ ret = run_quantize_floor(size);
+ if (ret < size) {
+ /*
+ * Skip a quantization that may have an adequately large run,
+ * because under-sized runs may be mixed in. This only happens
+ * when an unusual size is requested, i.e. for aligned
+ * allocation, and is just one of several places where linear
+ * search would potentially find sufficiently aligned available
+ * memory somewhere lower.
+ */
+ ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
+ }
+ return (ret);
+}
+#ifdef JEMALLOC_JET
+#undef run_quantize_ceil
+#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
+run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
+#endif
+
+static void
+arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
+{
+ pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
+ arena_miscelm_get_const(chunk, pageind))));
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+ assert((npages << LG_PAGE) < chunksize);
+ assert(pind2sz(pind) <= chunksize);
+ arena_run_heap_insert(&arena->runs_avail[pind],
+ arena_miscelm_get_mutable(chunk, pageind));
+}
+
+static void
+arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
+{
+ pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
+ arena_miscelm_get_const(chunk, pageind))));
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+ assert((npages << LG_PAGE) < chunksize);
+ assert(pind2sz(pind) <= chunksize);
+ arena_run_heap_remove(&arena->runs_avail[pind],
+ arena_miscelm_get_mutable(chunk, pageind));
+}
+
+static void
+arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
+{
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
+
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+ assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
+ assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
+ CHUNK_MAP_DIRTY);
+
+ qr_new(&miscelm->rd, rd_link);
+ qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
+ arena->ndirty += npages;
+}
+
+static void
+arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages)
+{
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
+
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+ assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
+ assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
+ CHUNK_MAP_DIRTY);
+
+ qr_remove(&miscelm->rd, rd_link);
+ assert(arena->ndirty >= npages);
+ arena->ndirty -= npages;
+}
+
+static size_t
+arena_chunk_dirty_npages(const extent_node_t *node)
+{
+
+ return (extent_node_size_get(node) >> LG_PAGE);
+}
+
+void
+arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
+{
+
+ if (cache) {
+ extent_node_dirty_linkage_init(node);
+ extent_node_dirty_insert(node, &arena->runs_dirty,
+ &arena->chunks_cache);
+ arena->ndirty += arena_chunk_dirty_npages(node);
+ }
+}
+
+void
+arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
+{
+
+ if (dirty) {
+ extent_node_dirty_remove(node);
+ assert(arena->ndirty >= arena_chunk_dirty_npages(node));
+ arena->ndirty -= arena_chunk_dirty_npages(node);
+ }
+}
+
+JEMALLOC_INLINE_C void *
+arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
+{
+ void *ret;
+ size_t regind;
+ arena_chunk_map_misc_t *miscelm;
+ void *rpages;
+
+ assert(run->nfree > 0);
+ assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
+
+ regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
+ miscelm = arena_run_to_miscelm(run);
+ rpages = arena_miscelm_to_rpages(miscelm);
+ ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
+ (uintptr_t)(bin_info->reg_interval * regind));
+ run->nfree--;
+ return (ret);
+}
+
+JEMALLOC_INLINE_C void
+arena_run_reg_dalloc(arena_run_t *run, void *ptr)
+{
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t mapbits = arena_mapbits_get(chunk, pageind);
+ szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ size_t regind = arena_run_regind(run, bin_info, ptr);
+
+ assert(run->nfree < bin_info->nregs);
+ /* Freeing an interior pointer can cause assertion failure. */
+ assert(((uintptr_t)ptr -
+ ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
+ (uintptr_t)bin_info->reg0_offset)) %
+ (uintptr_t)bin_info->reg_interval == 0);
+ assert((uintptr_t)ptr >=
+ (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
+ (uintptr_t)bin_info->reg0_offset);
+ /* Freeing an unallocated pointer can cause assertion failure. */
+ assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
+
+ bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
+ run->nfree++;
+}
+
+JEMALLOC_INLINE_C void
+arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
+{
+
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ (run_ind << LG_PAGE)), (npages << LG_PAGE));
+ memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
+ (npages << LG_PAGE));
+}
+
+JEMALLOC_INLINE_C void
+arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
+{
+
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
+ << LG_PAGE)), PAGE);
+}
+
+JEMALLOC_INLINE_C void
+arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
+{
+ size_t i;
+ UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
+
+ arena_run_page_mark_zeroed(chunk, run_ind);
+ for (i = 0; i < PAGE / sizeof(size_t); i++)
+ assert(p[i] == 0);
+}
+
+static void
+arena_nactive_add(arena_t *arena, size_t add_pages)
+{
+
+ if (config_stats) {
+ size_t cactive_add = CHUNK_CEILING((arena->nactive +
+ add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
+ LG_PAGE);
+ if (cactive_add != 0)
+ stats_cactive_add(cactive_add);
+ }
+ arena->nactive += add_pages;
+}
+
+static void
+arena_nactive_sub(arena_t *arena, size_t sub_pages)
+{
+
+ if (config_stats) {
+ size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
+ CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
+ if (cactive_sub != 0)
+ stats_cactive_sub(cactive_sub);
+ }
+ arena->nactive -= sub_pages;
+}
+
+static void
+arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
+ size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
+{
+ size_t total_pages, rem_pages;
+
+ assert(flag_dirty == 0 || flag_decommitted == 0);
+
+ total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
+ LG_PAGE;
+ assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
+ flag_dirty);
+ assert(need_pages <= total_pages);
+ rem_pages = total_pages - need_pages;
+
+ arena_avail_remove(arena, chunk, run_ind, total_pages);
+ if (flag_dirty != 0)
+ arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
+ arena_nactive_add(arena, need_pages);
+
+ /* Keep track of trailing unused pages for later use. */
+ if (rem_pages > 0) {
+ size_t flags = flag_dirty | flag_decommitted;
+ size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED :
+ 0;
+
+ arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
+ (rem_pages << LG_PAGE), flags |
+ (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
+ flag_unzeroed_mask));
+ arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
+ (rem_pages << LG_PAGE), flags |
+ (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
+ flag_unzeroed_mask));
+ if (flag_dirty != 0) {
+ arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
+ rem_pages);
+ }
+ arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
+ }
+}
+
+static bool
+arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
+ bool remove, bool zero)
+{
+ arena_chunk_t *chunk;
+ arena_chunk_map_misc_t *miscelm;
+ size_t flag_dirty, flag_decommitted, run_ind, need_pages;
+ size_t flag_unzeroed_mask;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ miscelm = arena_run_to_miscelm(run);
+ run_ind = arena_miscelm_to_pageind(miscelm);
+ flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+ flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
+ need_pages = (size >> LG_PAGE);
+ assert(need_pages > 0);
+
+ if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
+ run_ind << LG_PAGE, size, arena->ind))
+ return (true);
+
+ if (remove) {
+ arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
+ flag_decommitted, need_pages);
+ }
+
+ if (zero) {
+ if (flag_decommitted != 0) {
+ /* The run is untouched, and therefore zeroed. */
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
+ *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
+ (need_pages << LG_PAGE));
+ } else if (flag_dirty != 0) {
+ /* The run is dirty, so all pages must be zeroed. */
+ arena_run_zero(chunk, run_ind, need_pages);
+ } else {
+ /*
+ * The run is clean, so some pages may be zeroed (i.e.
+ * never before touched).
+ */
+ size_t i;
+ for (i = 0; i < need_pages; i++) {
+ if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
+ != 0)
+ arena_run_zero(chunk, run_ind+i, 1);
+ else if (config_debug) {
+ arena_run_page_validate_zeroed(chunk,
+ run_ind+i);
+ } else {
+ arena_run_page_mark_zeroed(chunk,
+ run_ind+i);
+ }
+ }
+ }
+ } else {
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
+ }
+
+ /*
+ * Set the last element first, in case the run only contains one page
+ * (i.e. both statements set the same element).
+ */
+ flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+ CHUNK_MAP_UNZEROED : 0;
+ arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ run_ind+need_pages-1)));
+ arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
+ return (false);
+}
+
+static bool
+arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
+{
+
+ return (arena_run_split_large_helper(arena, run, size, true, zero));
+}
+
+static bool
+arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
+{
+
+ return (arena_run_split_large_helper(arena, run, size, false, zero));
+}
+
+static bool
+arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
+ szind_t binind)
+{
+ arena_chunk_t *chunk;
+ arena_chunk_map_misc_t *miscelm;
+ size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
+
+ assert(binind != BININD_INVALID);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ miscelm = arena_run_to_miscelm(run);
+ run_ind = arena_miscelm_to_pageind(miscelm);
+ flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+ flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
+ need_pages = (size >> LG_PAGE);
+ assert(need_pages > 0);
+
+ if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
+ run_ind << LG_PAGE, size, arena->ind))
+ return (true);
+
+ arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
+ flag_decommitted, need_pages);
+
+ for (i = 0; i < need_pages; i++) {
+ size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
+ run_ind+i);
+ arena_mapbits_small_set(chunk, run_ind+i, i, binind,
+ flag_unzeroed);
+ if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
+ arena_run_page_validate_zeroed(chunk, run_ind+i);
+ }
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
+ return (false);
+}
+
+static arena_chunk_t *
+arena_chunk_init_spare(arena_t *arena)
+{
+ arena_chunk_t *chunk;
+
+ assert(arena->spare != NULL);
+
+ chunk = arena->spare;
+ arena->spare = NULL;
+
+ assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
+ assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
+ assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
+ arena_maxrun);
+ assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
+ arena_maxrun);
+ assert(arena_mapbits_dirty_get(chunk, map_bias) ==
+ arena_mapbits_dirty_get(chunk, chunk_npages-1));
+
+ return (chunk);
+}
+
+static bool
+arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ size_t sn, bool zero)
+{
+
+ /*
+ * The extent node notion of "committed" doesn't directly apply to
+ * arena chunks. Arbitrarily mark them as committed. The commit state
+ * of runs is tracked individually, and upon chunk deallocation the
+ * entire chunk is in a consistent commit state.
+ */
+ extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
+ extent_node_achunk_set(&chunk->node, true);
+ return (chunk_register(tsdn, chunk, &chunk->node));
+}
+
+static arena_chunk_t *
+arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
+{
+ arena_chunk_t *chunk;
+ size_t sn;
+
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
+ NULL, chunksize, chunksize, &sn, zero, commit);
+ if (chunk != NULL && !*commit) {
+ /* Commit header. */
+ if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
+ LG_PAGE, arena->ind)) {
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
+ (void *)chunk, chunksize, sn, *zero, *commit);
+ chunk = NULL;
+ }
+ }
+ if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, sn,
+ *zero)) {
+ if (!*commit) {
+ /* Undo commit of header. */
+ chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
+ LG_PAGE, arena->ind);
+ }
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
+ chunksize, sn, *zero, *commit);
+ chunk = NULL;
+ }
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ return (chunk);
+}
+
+static arena_chunk_t *
+arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
+ bool *commit)
+{
+ arena_chunk_t *chunk;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ size_t sn;
+
+ chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
+ chunksize, &sn, zero, commit, true);
+ if (chunk != NULL) {
+ if (arena_chunk_register(tsdn, arena, chunk, sn, *zero)) {
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
+ chunksize, sn, true);
+ return (NULL);
+ }
+ }
+ if (chunk == NULL) {
+ chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
+ &chunk_hooks, zero, commit);
+ }
+
+ if (config_stats && chunk != NULL) {
+ arena->stats.mapped += chunksize;
+ arena->stats.metadata_mapped += (map_bias << LG_PAGE);
+ }
+
+ return (chunk);
+}
+
+static arena_chunk_t *
+arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
+{
+ arena_chunk_t *chunk;
+ bool zero, commit;
+ size_t flag_unzeroed, flag_decommitted, i;
+
+ assert(arena->spare == NULL);
+
+ zero = false;
+ commit = false;
+ chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
+ if (chunk == NULL)
+ return (NULL);
+
+ chunk->hugepage = true;
+
+ /*
+ * Initialize the map to contain one maximal free untouched run. Mark
+ * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
+ * or decommitted chunk.
+ */
+ flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
+ flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
+ arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
+ flag_unzeroed | flag_decommitted);
+ /*
+ * There is no need to initialize the internal page map entries unless
+ * the chunk is not zeroed.
+ */
+ if (!zero) {
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
+ (void *)arena_bitselm_get_const(chunk, map_bias+1),
+ (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
+ chunk_npages-1) -
+ (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
+ for (i = map_bias+1; i < chunk_npages-1; i++)
+ arena_mapbits_internal_set(chunk, i, flag_unzeroed);
+ } else {
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
+ *)arena_bitselm_get_const(chunk, map_bias+1),
+ (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
+ chunk_npages-1) -
+ (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
+ if (config_debug) {
+ for (i = map_bias+1; i < chunk_npages-1; i++) {
+ assert(arena_mapbits_unzeroed_get(chunk, i) ==
+ flag_unzeroed);
+ }
+ }
+ }
+ arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
+ flag_unzeroed);
+
+ return (chunk);
+}
+
+static arena_chunk_t *
+arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
+{
+ arena_chunk_t *chunk;
+
+ if (arena->spare != NULL)
+ chunk = arena_chunk_init_spare(arena);
+ else {
+ chunk = arena_chunk_init_hard(tsdn, arena);
+ if (chunk == NULL)
+ return (NULL);
+ }
+
+ ql_elm_new(&chunk->node, ql_link);
+ ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
+ arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
+
+ return (chunk);
+}
+
+static void
+arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
+{
+ size_t sn, hugepage;
+ bool committed;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+
+ chunk_deregister(chunk, &chunk->node);
+
+ sn = extent_node_sn_get(&chunk->node);
+ hugepage = chunk->hugepage;
+ committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
+ if (!committed) {
+ /*
+ * Decommit the header. Mark the chunk as decommitted even if
+ * header decommit fails, since treating a partially committed
+ * chunk as committed has a high potential for causing later
+ * access of decommitted memory.
+ */
+ chunk_hooks = chunk_hooks_get(tsdn, arena);
+ chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
+ arena->ind);
+ }
+ if (!hugepage) {
+ /*
+ * Convert chunk back to the default state, so that all
+ * subsequent chunk allocations start out with chunks that can
+ * be backed by transparent huge pages.
+ */
+ pages_huge(chunk, chunksize);
+ }
+
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
+ sn, committed);
+
+ if (config_stats) {
+ arena->stats.mapped -= chunksize;
+ arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
+ }
+}
+
+static void
+arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
+{
+
+ assert(arena->spare != spare);
+
+ if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
+ arena_run_dirty_remove(arena, spare, map_bias,
+ chunk_npages-map_bias);
+ }
+
+ arena_chunk_discard(tsdn, arena, spare);
+}
+
+static void
+arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
+{
+ arena_chunk_t *spare;
+
+ assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
+ assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
+ assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
+ arena_maxrun);
+ assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
+ arena_maxrun);
+ assert(arena_mapbits_dirty_get(chunk, map_bias) ==
+ arena_mapbits_dirty_get(chunk, chunk_npages-1));
+ assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
+ arena_mapbits_decommitted_get(chunk, chunk_npages-1));
+
+ /* Remove run from runs_avail, so that the arena does not use it. */
+ arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
+
+ ql_remove(&arena->achunks, &chunk->node, ql_link);
+ spare = arena->spare;
+ arena->spare = chunk;
+ if (spare != NULL)
+ arena_spare_discard(tsdn, arena, spare);
+}
+
+static void
+arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.nmalloc_huge++;
+ arena->stats.allocated_huge += usize;
+ arena->stats.hstats[index].nmalloc++;
+ arena->stats.hstats[index].curhchunks++;
+}
+
+static void
+arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.nmalloc_huge--;
+ arena->stats.allocated_huge -= usize;
+ arena->stats.hstats[index].nmalloc--;
+ arena->stats.hstats[index].curhchunks--;
+}
+
+static void
+arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.ndalloc_huge++;
+ arena->stats.allocated_huge -= usize;
+ arena->stats.hstats[index].ndalloc++;
+ arena->stats.hstats[index].curhchunks--;
+}
+
+static void
+arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.ndalloc_huge++;
+ arena->stats.hstats[index].ndalloc--;
+}
+
+static void
+arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.ndalloc_huge--;
+ arena->stats.allocated_huge += usize;
+ arena->stats.hstats[index].ndalloc--;
+ arena->stats.hstats[index].curhchunks++;
+}
+
+static void
+arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
+{
+
+ arena_huge_dalloc_stats_update(arena, oldsize);
+ arena_huge_malloc_stats_update(arena, usize);
+}
+
+static void
+arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
+ size_t usize)
+{
+
+ arena_huge_dalloc_stats_update_undo(arena, oldsize);
+ arena_huge_malloc_stats_update_undo(arena, usize);
+}
+
+extent_node_t *
+arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
+{
+ extent_node_t *node;
+
+ malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
+ node = ql_last(&arena->node_cache, ql_link);
+ if (node == NULL) {
+ malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
+ return (base_alloc(tsdn, sizeof(extent_node_t)));
+ }
+ ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
+ malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
+ return (node);
+}
+
+void
+arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
+{
+
+ malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
+ ql_elm_new(node, ql_link);
+ ql_tail_insert(&arena->node_cache, node, ql_link);
+ malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
+}
+
+static void *
+arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
+ bool *zero, size_t csize)
+{
+ void *ret;
+ bool commit = true;
+
+ ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
+ alignment, sn, zero, &commit);
+ if (ret == NULL) {
+ /* Revert optimistic stats updates. */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_stats) {
+ arena_huge_malloc_stats_update_undo(arena, usize);
+ arena->stats.mapped -= usize;
+ }
+ arena_nactive_sub(arena, usize >> LG_PAGE);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ }
+
+ return (ret);
+}
+
+void *
+arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, size_t *sn, bool *zero)
+{
+ void *ret;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ size_t csize = CHUNK_CEILING(usize);
+ bool commit = true;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+
+ /* Optimistically update stats. */
+ if (config_stats) {
+ arena_huge_malloc_stats_update(arena, usize);
+ arena->stats.mapped += usize;
+ }
+ arena_nactive_add(arena, usize >> LG_PAGE);
+
+ ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
+ alignment, sn, zero, &commit, true);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ if (ret == NULL) {
+ ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
+ usize, alignment, sn, zero, csize);
+ }
+
+ return (ret);
+}
+
+void
+arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
+ size_t sn)
+{
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ size_t csize;
+
+ csize = CHUNK_CEILING(usize);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_stats) {
+ arena_huge_dalloc_stats_update(arena, usize);
+ arena->stats.mapped -= usize;
+ }
+ arena_nactive_sub(arena, usize >> LG_PAGE);
+
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+void
+arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize)
+{
+
+ assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
+ assert(oldsize != usize);
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_stats)
+ arena_huge_ralloc_stats_update(arena, oldsize, usize);
+ if (oldsize < usize)
+ arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
+ else
+ arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+void
+arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize, size_t sn)
+{
+ size_t udiff = oldsize - usize;
+ size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_stats) {
+ arena_huge_ralloc_stats_update(arena, oldsize, usize);
+ if (cdiff != 0)
+ arena->stats.mapped -= cdiff;
+ }
+ arena_nactive_sub(arena, udiff >> LG_PAGE);
+
+ if (cdiff != 0) {
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ void *nchunk = (void *)((uintptr_t)chunk +
+ CHUNK_CEILING(usize));
+
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
+ sn, true);
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+static bool
+arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
+ size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
+{
+ bool err;
+ bool commit = true;
+
+ err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
+ chunksize, sn, zero, &commit) == NULL);
+ if (err) {
+ /* Revert optimistic stats updates. */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_stats) {
+ arena_huge_ralloc_stats_update_undo(arena, oldsize,
+ usize);
+ arena->stats.mapped -= cdiff;
+ }
+ arena_nactive_sub(arena, udiff >> LG_PAGE);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
+ cdiff, true, arena->ind)) {
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
+ *sn, *zero, true);
+ err = true;
+ }
+ return (err);
+}
+
+bool
+arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize, bool *zero)
+{
+ bool err;
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
+ void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
+ size_t udiff = usize - oldsize;
+ size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
+ size_t sn;
+ bool commit = true;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+
+ /* Optimistically update stats. */
+ if (config_stats) {
+ arena_huge_ralloc_stats_update(arena, oldsize, usize);
+ arena->stats.mapped += cdiff;
+ }
+ arena_nactive_add(arena, udiff >> LG_PAGE);
+
+ err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
+ chunksize, &sn, zero, &commit, true) == NULL);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ if (err) {
+ err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
+ &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
+ udiff, cdiff);
+ } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
+ cdiff, true, arena->ind)) {
+ chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
+ sn, *zero, true);
+ err = true;
+ }
+
+ return (err);
+}
+
+/*
+ * Do first-best-fit run selection, i.e. select the lowest run that best fits.
+ * Run sizes are indexed, so not all candidate runs are necessarily exactly the
+ * same size.
+ */
+static arena_run_t *
+arena_run_first_best_fit(arena_t *arena, size_t size)
+{
+ pszind_t pind, i;
+
+ pind = psz2ind(run_quantize_ceil(size));
+
+ for (i = pind; pind2sz(i) <= chunksize; i++) {
+ arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
+ &arena->runs_avail[i]);
+ if (miscelm != NULL)
+ return (&miscelm->run);
+ }
+
+ return (NULL);
+}
+
+static arena_run_t *
+arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
+{
+ arena_run_t *run = arena_run_first_best_fit(arena, size);
+ if (run != NULL) {
+ if (arena_run_split_large(arena, run, size, zero))
+ run = NULL;
+ }
+ return (run);
+}
+
+static arena_run_t *
+arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
+{
+ arena_chunk_t *chunk;
+ arena_run_t *run;
+
+ assert(size <= arena_maxrun);
+ assert(size == PAGE_CEILING(size));
+
+ /* Search the arena's chunks for the lowest best fit. */
+ run = arena_run_alloc_large_helper(arena, size, zero);
+ if (run != NULL)
+ return (run);
+
+ /*
+ * No usable runs. Create a new chunk from which to allocate the run.
+ */
+ chunk = arena_chunk_alloc(tsdn, arena);
+ if (chunk != NULL) {
+ run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
+ if (arena_run_split_large(arena, run, size, zero))
+ run = NULL;
+ return (run);
+ }
+
+ /*
+ * arena_chunk_alloc() failed, but another thread may have made
+ * sufficient memory available while this one dropped arena->lock in
+ * arena_chunk_alloc(), so search one more time.
+ */
+ return (arena_run_alloc_large_helper(arena, size, zero));
+}
+
+static arena_run_t *
+arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
+{
+ arena_run_t *run = arena_run_first_best_fit(arena, size);
+ if (run != NULL) {
+ if (arena_run_split_small(arena, run, size, binind))
+ run = NULL;
+ }
+ return (run);
+}
+
+static arena_run_t *
+arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
+{
+ arena_chunk_t *chunk;
+ arena_run_t *run;
+
+ assert(size <= arena_maxrun);
+ assert(size == PAGE_CEILING(size));
+ assert(binind != BININD_INVALID);
+
+ /* Search the arena's chunks for the lowest best fit. */
+ run = arena_run_alloc_small_helper(arena, size, binind);
+ if (run != NULL)
+ return (run);
+
+ /*
+ * No usable runs. Create a new chunk from which to allocate the run.
+ */
+ chunk = arena_chunk_alloc(tsdn, arena);
+ if (chunk != NULL) {
+ run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
+ if (arena_run_split_small(arena, run, size, binind))
+ run = NULL;
+ return (run);
+ }
+
+ /*
+ * arena_chunk_alloc() failed, but another thread may have made
+ * sufficient memory available while this one dropped arena->lock in
+ * arena_chunk_alloc(), so search one more time.
+ */
+ return (arena_run_alloc_small_helper(arena, size, binind));
+}
+
+static bool
+arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
+{
+
+ return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
+ << 3));
+}
+
+ssize_t
+arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
+{
+ ssize_t lg_dirty_mult;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ lg_dirty_mult = arena->lg_dirty_mult;
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ return (lg_dirty_mult);
+}
+
+bool
+arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
+{
+
+ if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
+ return (true);
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena->lg_dirty_mult = lg_dirty_mult;
+ arena_maybe_purge(tsdn, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ return (false);
+}
+
+static void
+arena_decay_deadline_init(arena_t *arena)
+{
+
+ assert(opt_purge == purge_mode_decay);
+
+ /*
+ * Generate a new deadline that is uniformly random within the next
+ * epoch after the current one.
+ */
+ nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
+ nstime_add(&arena->decay.deadline, &arena->decay.interval);
+ if (arena->decay.time > 0) {
+ nstime_t jitter;
+
+ nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
+ nstime_ns(&arena->decay.interval)));
+ nstime_add(&arena->decay.deadline, &jitter);
+ }
+}
+
+static bool
+arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
+{
+
+ assert(opt_purge == purge_mode_decay);
+
+ return (nstime_compare(&arena->decay.deadline, time) <= 0);
+}
+
+static size_t
+arena_decay_backlog_npages_limit(const arena_t *arena)
+{
+ static const uint64_t h_steps[] = {
+#define STEP(step, h, x, y) \
+ h,
+ SMOOTHSTEP
+#undef STEP
+ };
+ uint64_t sum;
+ size_t npages_limit_backlog;
+ unsigned i;
+
+ assert(opt_purge == purge_mode_decay);
+
+ /*
+ * For each element of decay_backlog, multiply by the corresponding
+ * fixed-point smoothstep decay factor. Sum the products, then divide
+ * to round down to the nearest whole number of pages.
+ */
+ sum = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
+ sum += arena->decay.backlog[i] * h_steps[i];
+ npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
+
+ return (npages_limit_backlog);
+}
+
+static void
+arena_decay_backlog_update_last(arena_t *arena)
+{
+ size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
+ arena->ndirty - arena->decay.ndirty : 0;
+ arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
+}
+
+static void
+arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
+{
+
+ if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
+ memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
+ sizeof(size_t));
+ } else {
+ size_t nadvance_z = (size_t)nadvance_u64;
+
+ assert((uint64_t)nadvance_z == nadvance_u64);
+
+ memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
+ (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
+ if (nadvance_z > 1) {
+ memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
+ nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
+ }
+ }
+
+ arena_decay_backlog_update_last(arena);
+}
+
+static void
+arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
+{
+ uint64_t nadvance_u64;
+ nstime_t delta;
+
+ assert(opt_purge == purge_mode_decay);
+ assert(arena_decay_deadline_reached(arena, time));
+
+ nstime_copy(&delta, time);
+ nstime_subtract(&delta, &arena->decay.epoch);
+ nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
+ assert(nadvance_u64 > 0);
+
+ /* Add nadvance_u64 decay intervals to epoch. */
+ nstime_copy(&delta, &arena->decay.interval);
+ nstime_imultiply(&delta, nadvance_u64);
+ nstime_add(&arena->decay.epoch, &delta);
+
+ /* Set a new deadline. */
+ arena_decay_deadline_init(arena);
+
+ /* Update the backlog. */
+ arena_decay_backlog_update(arena, nadvance_u64);
+}
+
+static void
+arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
+{
+ size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
+
+ if (arena->ndirty > ndirty_limit)
+ arena_purge_to_limit(tsdn, arena, ndirty_limit);
+ arena->decay.ndirty = arena->ndirty;
+}
+
+static void
+arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
+{
+
+ arena_decay_epoch_advance_helper(arena, time);
+ arena_decay_epoch_advance_purge(tsdn, arena);
+}
+
+static void
+arena_decay_init(arena_t *arena, ssize_t decay_time)
+{
+
+ arena->decay.time = decay_time;
+ if (decay_time > 0) {
+ nstime_init2(&arena->decay.interval, decay_time, 0);
+ nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
+ }
+
+ nstime_init(&arena->decay.epoch, 0);
+ nstime_update(&arena->decay.epoch);
+ arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
+ arena_decay_deadline_init(arena);
+ arena->decay.ndirty = arena->ndirty;
+ memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
+}
+
+static bool
+arena_decay_time_valid(ssize_t decay_time)
+{
+
+ if (decay_time < -1)
+ return (false);
+ if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
+ return (true);
+ return (false);
+}
+
+ssize_t
+arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
+{
+ ssize_t decay_time;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ decay_time = arena->decay.time;
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ return (decay_time);
+}
+
+bool
+arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
+{
+
+ if (!arena_decay_time_valid(decay_time))
+ return (true);
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ /*
+ * Restart decay backlog from scratch, which may cause many dirty pages
+ * to be immediately purged. It would conceptually be possible to map
+ * the old backlog onto the new backlog, but there is no justification
+ * for such complexity since decay_time changes are intended to be
+ * infrequent, either between the {-1, 0, >0} states, or a one-time
+ * arbitrary change during initial arena configuration.
+ */
+ arena_decay_init(arena, decay_time);
+ arena_maybe_purge(tsdn, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ return (false);
+}
+
+static void
+arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
+{
+
+ assert(opt_purge == purge_mode_ratio);
+
+ /* Don't purge if the option is disabled. */
+ if (arena->lg_dirty_mult < 0)
+ return;
+
+ /*
+ * Iterate, since preventing recursive purging could otherwise leave too
+ * many dirty pages.
+ */
+ while (true) {
+ size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
+ if (threshold < chunk_npages)
+ threshold = chunk_npages;
+ /*
+ * Don't purge unless the number of purgeable pages exceeds the
+ * threshold.
+ */
+ if (arena->ndirty <= threshold)
+ return;
+ arena_purge_to_limit(tsdn, arena, threshold);
+ }
+}
+
+static void
+arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
+{
+ nstime_t time;
+
+ assert(opt_purge == purge_mode_decay);
+
+ /* Purge all or nothing if the option is disabled. */
+ if (arena->decay.time <= 0) {
+ if (arena->decay.time == 0)
+ arena_purge_to_limit(tsdn, arena, 0);
+ return;
+ }
+
+ nstime_init(&time, 0);
+ nstime_update(&time);
+ if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
+ &time) > 0)) {
+ /*
+ * Time went backwards. Move the epoch back in time and
+ * generate a new deadline, with the expectation that time
+ * typically flows forward for long enough periods of time that
+ * epochs complete. Unfortunately, this strategy is susceptible
+ * to clock jitter triggering premature epoch advances, but
+ * clock jitter estimation and compensation isn't feasible here
+ * because calls into this code are event-driven.
+ */
+ nstime_copy(&arena->decay.epoch, &time);
+ arena_decay_deadline_init(arena);
+ } else {
+ /* Verify that time does not go backwards. */
+ assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
+ }
+
+ /*
+ * If the deadline has been reached, advance to the current epoch and
+ * purge to the new limit if necessary. Note that dirty pages created
+ * during the current epoch are not subject to purge until a future
+ * epoch, so as a result purging only happens during epoch advances.
+ */
+ if (arena_decay_deadline_reached(arena, &time))
+ arena_decay_epoch_advance(tsdn, arena, &time);
+}
+
+void
+arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
+{
+
+ /* Don't recursively purge. */
+ if (arena->purging)
+ return;
+
+ if (opt_purge == purge_mode_ratio)
+ arena_maybe_purge_ratio(tsdn, arena);
+ else
+ arena_maybe_purge_decay(tsdn, arena);
+}
+
+static size_t
+arena_dirty_count(arena_t *arena)
+{
+ size_t ndirty = 0;
+ arena_runs_dirty_link_t *rdelm;
+ extent_node_t *chunkselm;
+
+ for (rdelm = qr_next(&arena->runs_dirty, rd_link),
+ chunkselm = qr_next(&arena->chunks_cache, cc_link);
+ rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
+ size_t npages;
+
+ if (rdelm == &chunkselm->rd) {
+ npages = extent_node_size_get(chunkselm) >> LG_PAGE;
+ chunkselm = qr_next(chunkselm, cc_link);
+ } else {
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ assert(arena_mapbits_allocated_get(chunk, pageind) ==
+ 0);
+ assert(arena_mapbits_large_get(chunk, pageind) == 0);
+ assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
+ npages = arena_mapbits_unallocated_size_get(chunk,
+ pageind) >> LG_PAGE;
+ }
+ ndirty += npages;
+ }
+
+ return (ndirty);
+}
+
+static size_t
+arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
+ extent_node_t *purge_chunks_sentinel)
+{
+ arena_runs_dirty_link_t *rdelm, *rdelm_next;
+ extent_node_t *chunkselm;
+ size_t nstashed = 0;
+
+ /* Stash runs/chunks according to ndirty_limit. */
+ for (rdelm = qr_next(&arena->runs_dirty, rd_link),
+ chunkselm = qr_next(&arena->chunks_cache, cc_link);
+ rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
+ size_t npages;
+ rdelm_next = qr_next(rdelm, rd_link);
+
+ if (rdelm == &chunkselm->rd) {
+ extent_node_t *chunkselm_next;
+ size_t sn;
+ bool zero, commit;
+ UNUSED void *chunk;
+
+ npages = extent_node_size_get(chunkselm) >> LG_PAGE;
+ if (opt_purge == purge_mode_decay && arena->ndirty -
+ (nstashed + npages) < ndirty_limit)
+ break;
+
+ chunkselm_next = qr_next(chunkselm, cc_link);
+ /*
+ * Allocate. chunkselm remains valid due to the
+ * dalloc_node=false argument to chunk_alloc_cache().
+ */
+ zero = false;
+ commit = false;
+ chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
+ extent_node_addr_get(chunkselm),
+ extent_node_size_get(chunkselm), chunksize, &sn,
+ &zero, &commit, false);
+ assert(chunk == extent_node_addr_get(chunkselm));
+ assert(zero == extent_node_zeroed_get(chunkselm));
+ extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
+ purge_chunks_sentinel);
+ assert(npages == (extent_node_size_get(chunkselm) >>
+ LG_PAGE));
+ chunkselm = chunkselm_next;
+ } else {
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ arena_run_t *run = &miscelm->run;
+ size_t run_size =
+ arena_mapbits_unallocated_size_get(chunk, pageind);
+
+ npages = run_size >> LG_PAGE;
+ if (opt_purge == purge_mode_decay && arena->ndirty -
+ (nstashed + npages) < ndirty_limit)
+ break;
+
+ assert(pageind + npages <= chunk_npages);
+ assert(arena_mapbits_dirty_get(chunk, pageind) ==
+ arena_mapbits_dirty_get(chunk, pageind+npages-1));
+
+ /*
+ * If purging the spare chunk's run, make it available
+ * prior to allocation.
+ */
+ if (chunk == arena->spare)
+ arena_chunk_alloc(tsdn, arena);
+
+ /* Temporarily allocate the free dirty run. */
+ arena_run_split_large(arena, run, run_size, false);
+ /* Stash. */
+ if (false)
+ qr_new(rdelm, rd_link); /* Redundant. */
+ else {
+ assert(qr_next(rdelm, rd_link) == rdelm);
+ assert(qr_prev(rdelm, rd_link) == rdelm);
+ }
+ qr_meld(purge_runs_sentinel, rdelm, rd_link);
+ }
+
+ nstashed += npages;
+ if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
+ ndirty_limit)
+ break;
+ }
+
+ return (nstashed);
+}
+
+static size_t
+arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ arena_runs_dirty_link_t *purge_runs_sentinel,
+ extent_node_t *purge_chunks_sentinel)
+{
+ size_t npurged, nmadvise;
+ arena_runs_dirty_link_t *rdelm;
+ extent_node_t *chunkselm;
+
+ if (config_stats)
+ nmadvise = 0;
+ npurged = 0;
+
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ for (rdelm = qr_next(purge_runs_sentinel, rd_link),
+ chunkselm = qr_next(purge_chunks_sentinel, cc_link);
+ rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
+ size_t npages;
+
+ if (rdelm == &chunkselm->rd) {
+ /*
+ * Don't actually purge the chunk here because 1)
+ * chunkselm is embedded in the chunk and must remain
+ * valid, and 2) we deallocate the chunk in
+ * arena_unstash_purged(), where it is destroyed,
+ * decommitted, or purged, depending on chunk
+ * deallocation policy.
+ */
+ size_t size = extent_node_size_get(chunkselm);
+ npages = size >> LG_PAGE;
+ chunkselm = qr_next(chunkselm, cc_link);
+ } else {
+ size_t pageind, run_size, flag_unzeroed, flags, i;
+ bool decommitted;
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ pageind = arena_miscelm_to_pageind(miscelm);
+ run_size = arena_mapbits_large_size_get(chunk, pageind);
+ npages = run_size >> LG_PAGE;
+
+ /*
+ * If this is the first run purged within chunk, mark
+ * the chunk as non-huge. This will prevent all use of
+ * transparent huge pages for this chunk until the chunk
+ * as a whole is deallocated.
+ */
+ if (chunk->hugepage) {
+ pages_nohuge(chunk, chunksize);
+ chunk->hugepage = false;
+ }
+
+ assert(pageind + npages <= chunk_npages);
+ assert(!arena_mapbits_decommitted_get(chunk, pageind));
+ assert(!arena_mapbits_decommitted_get(chunk,
+ pageind+npages-1));
+ decommitted = !chunk_hooks->decommit(chunk, chunksize,
+ pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
+ if (decommitted) {
+ flag_unzeroed = 0;
+ flags = CHUNK_MAP_DECOMMITTED;
+ } else {
+ flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
+ chunk_hooks, chunk, chunksize, pageind <<
+ LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
+ flags = flag_unzeroed;
+ }
+ arena_mapbits_large_set(chunk, pageind+npages-1, 0,
+ flags);
+ arena_mapbits_large_set(chunk, pageind, run_size,
+ flags);
+
+ /*
+ * Set the unzeroed flag for internal pages, now that
+ * chunk_purge_wrapper() has returned whether the pages
+ * were zeroed as a side effect of purging. This chunk
+ * map modification is safe even though the arena mutex
+ * isn't currently owned by this thread, because the run
+ * is marked as allocated, thus protecting it from being
+ * modified by any other thread. As long as these
+ * writes don't perturb the first and last elements'
+ * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
+ */
+ for (i = 1; i < npages-1; i++) {
+ arena_mapbits_internal_set(chunk, pageind+i,
+ flag_unzeroed);
+ }
+ }
+
+ npurged += npages;
+ if (config_stats)
+ nmadvise++;
+ }
+ malloc_mutex_lock(tsdn, &arena->lock);
+
+ if (config_stats) {
+ arena->stats.nmadvise += nmadvise;
+ arena->stats.purged += npurged;
+ }
+
+ return (npurged);
+}
+
+static void
+arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ arena_runs_dirty_link_t *purge_runs_sentinel,
+ extent_node_t *purge_chunks_sentinel)
+{
+ arena_runs_dirty_link_t *rdelm, *rdelm_next;
+ extent_node_t *chunkselm;
+
+ /* Deallocate chunks/runs. */
+ for (rdelm = qr_next(purge_runs_sentinel, rd_link),
+ chunkselm = qr_next(purge_chunks_sentinel, cc_link);
+ rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
+ rdelm_next = qr_next(rdelm, rd_link);
+ if (rdelm == &chunkselm->rd) {
+ extent_node_t *chunkselm_next = qr_next(chunkselm,
+ cc_link);
+ void *addr = extent_node_addr_get(chunkselm);
+ size_t size = extent_node_size_get(chunkselm);
+ size_t sn = extent_node_sn_get(chunkselm);
+ bool zeroed = extent_node_zeroed_get(chunkselm);
+ bool committed = extent_node_committed_get(chunkselm);
+ extent_node_dirty_remove(chunkselm);
+ arena_node_dalloc(tsdn, arena, chunkselm);
+ chunkselm = chunkselm_next;
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
+ size, sn, zeroed, committed);
+ } else {
+ arena_chunk_t *chunk =
+ (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
+ arena_chunk_map_misc_t *miscelm =
+ arena_rd_to_miscelm(rdelm);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ bool decommitted = (arena_mapbits_decommitted_get(chunk,
+ pageind) != 0);
+ arena_run_t *run = &miscelm->run;
+ qr_remove(rdelm, rd_link);
+ arena_run_dalloc(tsdn, arena, run, false, true,
+ decommitted);
+ }
+ }
+}
+
+/*
+ * NB: ndirty_limit is interpreted differently depending on opt_purge:
+ * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
+ * desired state:
+ * (arena->ndirty <= ndirty_limit)
+ * - purge_mode_decay: Purge as many dirty runs/chunks as possible without
+ * violating the invariant:
+ * (arena->ndirty >= ndirty_limit)
+ */
+static void
+arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
+{
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
+ size_t npurge, npurged;
+ arena_runs_dirty_link_t purge_runs_sentinel;
+ extent_node_t purge_chunks_sentinel;
+
+ arena->purging = true;
+
+ /*
+ * Calls to arena_dirty_count() are disabled even for debug builds
+ * because overhead grows nonlinearly as memory usage increases.
+ */
+ if (false && config_debug) {
+ size_t ndirty = arena_dirty_count(arena);
+ assert(ndirty == arena->ndirty);
+ }
+ assert(opt_purge != purge_mode_ratio || (arena->nactive >>
+ arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
+
+ qr_new(&purge_runs_sentinel, rd_link);
+ extent_node_dirty_linkage_init(&purge_chunks_sentinel);
+
+ npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
+ &purge_runs_sentinel, &purge_chunks_sentinel);
+ if (npurge == 0)
+ goto label_return;
+ npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
+ &purge_runs_sentinel, &purge_chunks_sentinel);
+ assert(npurged == npurge);
+ arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
+ &purge_chunks_sentinel);
+
+ if (config_stats)
+ arena->stats.npurge++;
+
+label_return:
+ arena->purging = false;
+}
+
+void
+arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
+{
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (all)
+ arena_purge_to_limit(tsdn, arena, 0);
+ else
+ arena_maybe_purge(tsdn, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+static void
+arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
+{
+ size_t pageind, npages;
+
+ cassert(config_prof);
+ assert(opt_prof);
+
+ /*
+ * Iterate over the allocated runs and remove profiled allocations from
+ * the sample set.
+ */
+ for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
+ if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
+ if (arena_mapbits_large_get(chunk, pageind) != 0) {
+ void *ptr = (void *)((uintptr_t)chunk + (pageind
+ << LG_PAGE));
+ size_t usize = isalloc(tsd_tsdn(tsd), ptr,
+ config_prof);
+
+ prof_free(tsd, ptr, usize);
+ npages = arena_mapbits_large_size_get(chunk,
+ pageind) >> LG_PAGE;
+ } else {
+ /* Skip small run. */
+ size_t binind = arena_mapbits_binind_get(chunk,
+ pageind);
+ arena_bin_info_t *bin_info =
+ &arena_bin_info[binind];
+ npages = bin_info->run_size >> LG_PAGE;
+ }
+ } else {
+ /* Skip unallocated run. */
+ npages = arena_mapbits_unallocated_size_get(chunk,
+ pageind) >> LG_PAGE;
+ }
+ assert(pageind + npages <= chunk_npages);
+ }
+}
+
+void
+arena_reset(tsd_t *tsd, arena_t *arena)
+{
+ unsigned i;
+ extent_node_t *node;
+
+ /*
+ * Locking in this function is unintuitive. The caller guarantees that
+ * no concurrent operations are happening in this arena, but there are
+ * still reasons that some locking is necessary:
+ *
+ * - Some of the functions in the transitive closure of calls assume
+ * appropriate locks are held, and in some cases these locks are
+ * temporarily dropped to avoid lock order reversal or deadlock due to
+ * reentry.
+ * - mallctl("epoch", ...) may concurrently refresh stats. While
+ * strictly speaking this is a "concurrent operation", disallowing
+ * stats refreshes would impose an inconvenient burden.
+ */
+
+ /* Remove large allocations from prof sample set. */
+ if (config_prof && opt_prof) {
+ ql_foreach(node, &arena->achunks, ql_link) {
+ arena_achunk_prof_reset(tsd, arena,
+ extent_node_addr_get(node));
+ }
+ }
+
+ /* Reset curruns for large size classes. */
+ if (config_stats) {
+ for (i = 0; i < nlclasses; i++)
+ arena->stats.lstats[i].curruns = 0;
+ }
+
+ /* Huge allocations. */
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
+ for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
+ ql_last(&arena->huge, ql_link)) {
+ void *ptr = extent_node_addr_get(node);
+ size_t usize;
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
+ if (config_stats || (config_prof && opt_prof))
+ usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ /* Remove huge allocation from prof sample set. */
+ if (config_prof && opt_prof)
+ prof_free(tsd, ptr, usize);
+ huge_dalloc(tsd_tsdn(tsd), ptr);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
+ /* Cancel out unwanted effects on stats. */
+ if (config_stats)
+ arena_huge_reset_stats_cancel(arena, usize);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
+
+ /* Bins. */
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ bin->runcur = NULL;
+ arena_run_heap_new(&bin->runs);
+ if (config_stats) {
+ bin->stats.curregs = 0;
+ bin->stats.curruns = 0;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ }
+
+ /*
+ * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
+ * chains directly correspond.
+ */
+ qr_new(&arena->runs_dirty, rd_link);
+ for (node = qr_next(&arena->chunks_cache, cc_link);
+ node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
+ qr_new(&node->rd, rd_link);
+ qr_meld(&arena->runs_dirty, &node->rd, rd_link);
+ }
+
+ /* Arena chunks. */
+ for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
+ ql_last(&arena->achunks, ql_link)) {
+ ql_remove(&arena->achunks, node, ql_link);
+ arena_chunk_discard(tsd_tsdn(tsd), arena,
+ extent_node_addr_get(node));
+ }
+
+ /* Spare. */
+ if (arena->spare != NULL) {
+ arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
+ arena->spare = NULL;
+ }
+
+ assert(!arena->purging);
+ arena->nactive = 0;
+
+ for (i = 0; i < NPSIZES; i++)
+ arena_run_heap_new(&arena->runs_avail[i]);
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+}
+
+static void
+arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
+ size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
+ size_t flag_decommitted)
+{
+ size_t size = *p_size;
+ size_t run_ind = *p_run_ind;
+ size_t run_pages = *p_run_pages;
+
+ /* Try to coalesce forward. */
+ if (run_ind + run_pages < chunk_npages &&
+ arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
+ arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
+ arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
+ flag_decommitted) {
+ size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
+ run_ind+run_pages);
+ size_t nrun_pages = nrun_size >> LG_PAGE;
+
+ /*
+ * Remove successor from runs_avail; the coalesced run is
+ * inserted later.
+ */
+ assert(arena_mapbits_unallocated_size_get(chunk,
+ run_ind+run_pages+nrun_pages-1) == nrun_size);
+ assert(arena_mapbits_dirty_get(chunk,
+ run_ind+run_pages+nrun_pages-1) == flag_dirty);
+ assert(arena_mapbits_decommitted_get(chunk,
+ run_ind+run_pages+nrun_pages-1) == flag_decommitted);
+ arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
+
+ /*
+ * If the successor is dirty, remove it from the set of dirty
+ * pages.
+ */
+ if (flag_dirty != 0) {
+ arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
+ nrun_pages);
+ }
+
+ size += nrun_size;
+ run_pages += nrun_pages;
+
+ arena_mapbits_unallocated_size_set(chunk, run_ind, size);
+ arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
+ size);
+ }
+
+ /* Try to coalesce backward. */
+ if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
+ run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
+ flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
+ flag_decommitted) {
+ size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
+ run_ind-1);
+ size_t prun_pages = prun_size >> LG_PAGE;
+
+ run_ind -= prun_pages;
+
+ /*
+ * Remove predecessor from runs_avail; the coalesced run is
+ * inserted later.
+ */
+ assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
+ prun_size);
+ assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
+ assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
+ flag_decommitted);
+ arena_avail_remove(arena, chunk, run_ind, prun_pages);
+
+ /*
+ * If the predecessor is dirty, remove it from the set of dirty
+ * pages.
+ */
+ if (flag_dirty != 0) {
+ arena_run_dirty_remove(arena, chunk, run_ind,
+ prun_pages);
+ }
+
+ size += prun_size;
+ run_pages += prun_pages;
+
+ arena_mapbits_unallocated_size_set(chunk, run_ind, size);
+ arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
+ size);
+ }
+
+ *p_size = size;
+ *p_run_ind = run_ind;
+ *p_run_pages = run_pages;
+}
+
+static size_t
+arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
+ size_t run_ind)
+{
+ size_t size;
+
+ assert(run_ind >= map_bias);
+ assert(run_ind < chunk_npages);
+
+ if (arena_mapbits_large_get(chunk, run_ind) != 0) {
+ size = arena_mapbits_large_size_get(chunk, run_ind);
+ assert(size == PAGE || arena_mapbits_large_size_get(chunk,
+ run_ind+(size>>LG_PAGE)-1) == 0);
+ } else {
+ arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
+ size = bin_info->run_size;
+ }
+
+ return (size);
+}
+
+static void
+arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
+ bool cleaned, bool decommitted)
+{
+ arena_chunk_t *chunk;
+ arena_chunk_map_misc_t *miscelm;
+ size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ miscelm = arena_run_to_miscelm(run);
+ run_ind = arena_miscelm_to_pageind(miscelm);
+ assert(run_ind >= map_bias);
+ assert(run_ind < chunk_npages);
+ size = arena_run_size_get(arena, chunk, run, run_ind);
+ run_pages = (size >> LG_PAGE);
+ arena_nactive_sub(arena, run_pages);
+
+ /*
+ * The run is dirty if the caller claims to have dirtied it, as well as
+ * if it was already dirty before being allocated and the caller
+ * doesn't claim to have cleaned it.
+ */
+ assert(arena_mapbits_dirty_get(chunk, run_ind) ==
+ arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
+ if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
+ != 0)
+ dirty = true;
+ flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
+ flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
+
+ /* Mark pages as unallocated in the chunk map. */
+ if (dirty || decommitted) {
+ size_t flags = flag_dirty | flag_decommitted;
+ arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
+ arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
+ flags);
+ } else {
+ arena_mapbits_unallocated_set(chunk, run_ind, size,
+ arena_mapbits_unzeroed_get(chunk, run_ind));
+ arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
+ arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
+ }
+
+ arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
+ flag_dirty, flag_decommitted);
+
+ /* Insert into runs_avail, now that coalescing is complete. */
+ assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
+ arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
+ assert(arena_mapbits_dirty_get(chunk, run_ind) ==
+ arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
+ assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
+ arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
+ arena_avail_insert(arena, chunk, run_ind, run_pages);
+
+ if (dirty)
+ arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
+
+ /* Deallocate chunk if it is now completely unused. */
+ if (size == arena_maxrun) {
+ assert(run_ind == map_bias);
+ assert(run_pages == (arena_maxrun >> LG_PAGE));
+ arena_chunk_dalloc(tsdn, arena, chunk);
+ }
+
+ /*
+ * It is okay to do dirty page processing here even if the chunk was
+ * deallocated above, since in that case it is the spare. Waiting
+ * until after possible chunk deallocation to do dirty processing
+ * allows for an old spare to be fully deallocated, thus decreasing the
+ * chances of spuriously crossing the dirty page purging threshold.
+ */
+ if (dirty)
+ arena_maybe_purge(tsdn, arena);
+}
+
+static void
+arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, size_t oldsize, size_t newsize)
+{
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ size_t head_npages = (oldsize - newsize) >> LG_PAGE;
+ size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
+ size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
+ size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+ CHUNK_MAP_UNZEROED : 0;
+
+ assert(oldsize > newsize);
+
+ /*
+ * Update the chunk map so that arena_run_dalloc() can treat the
+ * leading run as separately allocated. Set the last element of each
+ * run first, in case of single-page runs.
+ */
+ assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
+ arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages-1)));
+ arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
+
+ if (config_debug) {
+ UNUSED size_t tail_npages = newsize >> LG_PAGE;
+ assert(arena_mapbits_large_size_get(chunk,
+ pageind+head_npages+tail_npages-1) == 0);
+ assert(arena_mapbits_dirty_get(chunk,
+ pageind+head_npages+tail_npages-1) == flag_dirty);
+ }
+ arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
+ flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages)));
+
+ arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
+ 0));
+}
+
+static void
+arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
+{
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+ size_t pageind = arena_miscelm_to_pageind(miscelm);
+ size_t head_npages = newsize >> LG_PAGE;
+ size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
+ size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
+ size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
+ CHUNK_MAP_UNZEROED : 0;
+ arena_chunk_map_misc_t *tail_miscelm;
+ arena_run_t *tail_run;
+
+ assert(oldsize > newsize);
+
+ /*
+ * Update the chunk map so that arena_run_dalloc() can treat the
+ * trailing run as separately allocated. Set the last element of each
+ * run first, in case of single-page runs.
+ */
+ assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
+ arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages-1)));
+ arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
+
+ if (config_debug) {
+ UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
+ assert(arena_mapbits_large_size_get(chunk,
+ pageind+head_npages+tail_npages-1) == 0);
+ assert(arena_mapbits_dirty_get(chunk,
+ pageind+head_npages+tail_npages-1) == flag_dirty);
+ }
+ arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
+ flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+head_npages)));
+
+ tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
+ tail_run = &tail_miscelm->run;
+ arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
+ != 0));
+}
+
+static void
+arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
+{
+ arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
+
+ arena_run_heap_insert(&bin->runs, miscelm);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_tryget(arena_bin_t *bin)
+{
+ arena_chunk_map_misc_t *miscelm;
+
+ miscelm = arena_run_heap_remove_first(&bin->runs);
+ if (miscelm == NULL)
+ return (NULL);
+ if (config_stats)
+ bin->stats.reruns++;
+
+ return (&miscelm->run);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
+{
+ arena_run_t *run;
+ szind_t binind;
+ arena_bin_info_t *bin_info;
+
+ /* Look for a usable run. */
+ run = arena_bin_nonfull_run_tryget(bin);
+ if (run != NULL)
+ return (run);
+ /* No existing runs have any space available. */
+
+ binind = arena_bin_index(arena, bin);
+ bin_info = &arena_bin_info[binind];
+
+ /* Allocate a new run. */
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ /******************************/
+ malloc_mutex_lock(tsdn, &arena->lock);
+ run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
+ if (run != NULL) {
+ /* Initialize run internals. */
+ run->binind = binind;
+ run->nfree = bin_info->nregs;
+ bitmap_init(run->bitmap, &bin_info->bitmap_info);
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ /********************************/
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if (run != NULL) {
+ if (config_stats) {
+ bin->stats.nruns++;
+ bin->stats.curruns++;
+ }
+ return (run);
+ }
+
+ /*
+ * arena_run_alloc_small() failed, but another thread may have made
+ * sufficient memory available while this one dropped bin->lock above,
+ * so search one more time.
+ */
+ run = arena_bin_nonfull_run_tryget(bin);
+ if (run != NULL)
+ return (run);
+
+ return (NULL);
+}
+
+/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
+static void *
+arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
+{
+ szind_t binind;
+ arena_bin_info_t *bin_info;
+ arena_run_t *run;
+
+ binind = arena_bin_index(arena, bin);
+ bin_info = &arena_bin_info[binind];
+ bin->runcur = NULL;
+ run = arena_bin_nonfull_run_get(tsdn, arena, bin);
+ if (bin->runcur != NULL && bin->runcur->nfree > 0) {
+ /*
+ * Another thread updated runcur while this one ran without the
+ * bin lock in arena_bin_nonfull_run_get().
+ */
+ void *ret;
+ assert(bin->runcur->nfree > 0);
+ ret = arena_run_reg_alloc(bin->runcur, bin_info);
+ if (run != NULL) {
+ arena_chunk_t *chunk;
+
+ /*
+ * arena_run_alloc_small() may have allocated run, or
+ * it may have pulled run from the bin's run tree.
+ * Therefore it is unsafe to make any assumptions about
+ * how run has previously been used, and
+ * arena_bin_lower_run() must be called, as if a region
+ * were just deallocated from the run.
+ */
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ if (run->nfree == bin_info->nregs) {
+ arena_dalloc_bin_run(tsdn, arena, chunk, run,
+ bin);
+ } else
+ arena_bin_lower_run(arena, run, bin);
+ }
+ return (ret);
+ }
+
+ if (run == NULL)
+ return (NULL);
+
+ bin->runcur = run;
+
+ assert(bin->runcur->nfree > 0);
+
+ return (arena_run_reg_alloc(bin->runcur, bin_info));
+}
+
+void
+arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
+ szind_t binind, uint64_t prof_accumbytes)
+{
+ unsigned i, nfill;
+ arena_bin_t *bin;
+
+ assert(tbin->ncached == 0);
+
+ if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
+ prof_idump(tsdn);
+ bin = &arena->bins[binind];
+ malloc_mutex_lock(tsdn, &bin->lock);
+ for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
+ tbin->lg_fill_div); i < nfill; i++) {
+ arena_run_t *run;
+ void *ptr;
+ if ((run = bin->runcur) != NULL && run->nfree > 0)
+ ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
+ else
+ ptr = arena_bin_malloc_hard(tsdn, arena, bin);
+ if (ptr == NULL) {
+ /*
+ * OOM. tbin->avail isn't yet filled down to its first
+ * element, so the successful allocations (if any) must
+ * be moved just before tbin->avail before bailing out.
+ */
+ if (i > 0) {
+ memmove(tbin->avail - i, tbin->avail - nfill,
+ i * sizeof(void *));
+ }
+ break;
+ }
+ if (config_fill && unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ptr, &arena_bin_info[binind],
+ true);
+ }
+ /* Insert such that low regions get used first. */
+ *(tbin->avail - nfill + i) = ptr;
+ }
+ if (config_stats) {
+ bin->stats.nmalloc += i;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ bin->stats.curregs += i;
+ bin->stats.nfills++;
+ tbin->tstats.nrequests = 0;
+ }
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ tbin->ncached = i;
+ arena_decay_tick(tsdn, arena);
+}
+
+void
+arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
+{
+
+ size_t redzone_size = bin_info->redzone_size;
+
+ if (zero) {
+ memset((void *)((uintptr_t)ptr - redzone_size),
+ JEMALLOC_ALLOC_JUNK, redzone_size);
+ memset((void *)((uintptr_t)ptr + bin_info->reg_size),
+ JEMALLOC_ALLOC_JUNK, redzone_size);
+ } else {
+ memset((void *)((uintptr_t)ptr - redzone_size),
+ JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
+ }
+}
+
+#ifdef JEMALLOC_JET
+#undef arena_redzone_corruption
+#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
+#endif
+static void
+arena_redzone_corruption(void *ptr, size_t usize, bool after,
+ size_t offset, uint8_t byte)
+{
+
+ malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
+ "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
+ after ? "after" : "before", ptr, usize, byte);
+}
+#ifdef JEMALLOC_JET
+#undef arena_redzone_corruption
+#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
+arena_redzone_corruption_t *arena_redzone_corruption =
+ JEMALLOC_N(n_arena_redzone_corruption);
+#endif
+
+static void
+arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
+{
+ bool error = false;
+
+ if (opt_junk_alloc) {
+ size_t size = bin_info->reg_size;
+ size_t redzone_size = bin_info->redzone_size;
+ size_t i;
+
+ for (i = 1; i <= redzone_size; i++) {
+ uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
+ if (*byte != JEMALLOC_ALLOC_JUNK) {
+ error = true;
+ arena_redzone_corruption(ptr, size, false, i,
+ *byte);
+ if (reset)
+ *byte = JEMALLOC_ALLOC_JUNK;
+ }
+ }
+ for (i = 0; i < redzone_size; i++) {
+ uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
+ if (*byte != JEMALLOC_ALLOC_JUNK) {
+ error = true;
+ arena_redzone_corruption(ptr, size, true, i,
+ *byte);
+ if (reset)
+ *byte = JEMALLOC_ALLOC_JUNK;
+ }
+ }
+ }
+
+ if (opt_abort && error)
+ abort();
+}
+
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_small
+#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
+#endif
+void
+arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
+{
+ size_t redzone_size = bin_info->redzone_size;
+
+ arena_redzones_validate(ptr, bin_info, false);
+ memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
+ bin_info->reg_interval);
+}
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_small
+#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
+arena_dalloc_junk_small_t *arena_dalloc_junk_small =
+ JEMALLOC_N(n_arena_dalloc_junk_small);
+#endif
+
+void
+arena_quarantine_junk_small(void *ptr, size_t usize)
+{
+ szind_t binind;
+ arena_bin_info_t *bin_info;
+ cassert(config_fill);
+ assert(opt_junk_free);
+ assert(opt_quarantine);
+ assert(usize <= SMALL_MAXCLASS);
+
+ binind = size2index(usize);
+ bin_info = &arena_bin_info[binind];
+ arena_redzones_validate(ptr, bin_info, true);
+}
+
+static void *
+arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
+{
+ void *ret;
+ arena_bin_t *bin;
+ size_t usize;
+ arena_run_t *run;
+
+ assert(binind < NBINS);
+ bin = &arena->bins[binind];
+ usize = index2size(binind);
+
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if ((run = bin->runcur) != NULL && run->nfree > 0)
+ ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
+ else
+ ret = arena_bin_malloc_hard(tsdn, arena, bin);
+
+ if (ret == NULL) {
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ return (NULL);
+ }
+
+ if (config_stats) {
+ bin->stats.nmalloc++;
+ bin->stats.nrequests++;
+ bin->stats.curregs++;
+ }
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
+ prof_idump(tsdn);
+
+ if (!zero) {
+ if (config_fill) {
+ if (unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ret,
+ &arena_bin_info[binind], false);
+ } else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
+ }
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
+ } else {
+ if (config_fill && unlikely(opt_junk_alloc)) {
+ arena_alloc_junk_small(ret, &arena_bin_info[binind],
+ true);
+ }
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
+ memset(ret, 0, usize);
+ }
+
+ arena_decay_tick(tsdn, arena);
+ return (ret);
+}
+
+void *
+arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
+{
+ void *ret;
+ size_t usize;
+ uintptr_t random_offset;
+ arena_run_t *run;
+ arena_chunk_map_misc_t *miscelm;
+ UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
+
+ /* Large allocation. */
+ usize = index2size(binind);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_cache_oblivious) {
+ uint64_t r;
+
+ /*
+ * Compute a uniformly distributed offset within the first page
+ * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
+ * for 4 KiB pages and 64-byte cachelines.
+ */
+ r = prng_lg_range_zu(&arena->offset_state, LG_PAGE -
+ LG_CACHELINE, false);
+ random_offset = ((uintptr_t)r) << LG_CACHELINE;
+ } else
+ random_offset = 0;
+ run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
+ if (run == NULL) {
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (NULL);
+ }
+ miscelm = arena_run_to_miscelm(run);
+ ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
+ random_offset);
+ if (config_stats) {
+ szind_t index = binind - NBINS;
+
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += usize;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
+ }
+ if (config_prof)
+ idump = arena_prof_accum_locked(arena, usize);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ if (config_prof && idump)
+ prof_idump(tsdn);
+
+ if (!zero) {
+ if (config_fill) {
+ if (unlikely(opt_junk_alloc))
+ memset(ret, JEMALLOC_ALLOC_JUNK, usize);
+ else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
+ }
+ }
+
+ arena_decay_tick(tsdn, arena);
+ return (ret);
+}
+
+void *
+arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
+ bool zero)
+{
+
+ assert(!tsdn_null(tsdn) || arena != NULL);
+
+ if (likely(!tsdn_null(tsdn)))
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
+ if (unlikely(arena == NULL))
+ return (NULL);
+
+ if (likely(size <= SMALL_MAXCLASS))
+ return (arena_malloc_small(tsdn, arena, ind, zero));
+ if (likely(size <= large_maxclass))
+ return (arena_malloc_large(tsdn, arena, ind, zero));
+ return (huge_malloc(tsdn, arena, index2size(ind), zero));
+}
+
+/* Only handles large allocations that require more than page alignment. */
+static void *
+arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero)
+{
+ void *ret;
+ size_t alloc_size, leadsize, trailsize;
+ arena_run_t *run;
+ arena_chunk_t *chunk;
+ arena_chunk_map_misc_t *miscelm;
+ void *rpages;
+
+ assert(!tsdn_null(tsdn) || arena != NULL);
+ assert(usize == PAGE_CEILING(usize));
+
+ if (likely(!tsdn_null(tsdn)))
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
+ if (unlikely(arena == NULL))
+ return (NULL);
+
+ alignment = PAGE_CEILING(alignment);
+ alloc_size = usize + large_pad + alignment - PAGE;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
+ if (run == NULL) {
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (NULL);
+ }
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ miscelm = arena_run_to_miscelm(run);
+ rpages = arena_miscelm_to_rpages(miscelm);
+
+ leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
+ (uintptr_t)rpages;
+ assert(alloc_size >= leadsize + usize);
+ trailsize = alloc_size - leadsize - usize - large_pad;
+ if (leadsize != 0) {
+ arena_chunk_map_misc_t *head_miscelm = miscelm;
+ arena_run_t *head_run = run;
+
+ miscelm = arena_miscelm_get_mutable(chunk,
+ arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
+ LG_PAGE));
+ run = &miscelm->run;
+
+ arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
+ alloc_size - leadsize);
+ }
+ if (trailsize != 0) {
+ arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
+ trailsize, usize + large_pad, false);
+ }
+ if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
+ size_t run_ind =
+ arena_miscelm_to_pageind(arena_run_to_miscelm(run));
+ bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
+ bool decommitted = (arena_mapbits_decommitted_get(chunk,
+ run_ind) != 0);
+
+ assert(decommitted); /* Cause of OOM. */
+ arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (NULL);
+ }
+ ret = arena_miscelm_to_rpages(miscelm);
+
+ if (config_stats) {
+ szind_t index = size2index(usize) - NBINS;
+
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += usize;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ if (config_fill && !zero) {
+ if (unlikely(opt_junk_alloc))
+ memset(ret, JEMALLOC_ALLOC_JUNK, usize);
+ else if (unlikely(opt_zero))
+ memset(ret, 0, usize);
+ }
+ arena_decay_tick(tsdn, arena);
+ return (ret);
+}
+
+void *
+arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero, tcache_t *tcache)
+{
+ void *ret;
+
+ if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
+ && (usize & PAGE_MASK) == 0))) {
+ /* Small; alignment doesn't require special run placement. */
+ ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
+ tcache, true);
+ } else if (usize <= large_maxclass && alignment <= PAGE) {
+ /*
+ * Large; alignment doesn't require special run placement.
+ * However, the cached pointer may be at a random offset from
+ * the base of the run, so do some bit manipulation to retrieve
+ * the base.
+ */
+ ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
+ tcache, true);
+ if (config_cache_oblivious)
+ ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
+ } else {
+ if (likely(usize <= large_maxclass)) {
+ ret = arena_palloc_large(tsdn, arena, usize, alignment,
+ zero);
+ } else if (likely(alignment <= chunksize))
+ ret = huge_malloc(tsdn, arena, usize, zero);
+ else {
+ ret = huge_palloc(tsdn, arena, usize, alignment, zero);
+ }
+ }
+ return (ret);
+}
+
+void
+arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
+{
+ arena_chunk_t *chunk;
+ size_t pageind;
+ szind_t binind;
+
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(CHUNK_ADDR2BASE(ptr) != ptr);
+ assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
+ assert(size <= SMALL_MAXCLASS);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ binind = size2index(size);
+ assert(binind < NBINS);
+ arena_mapbits_large_binind_set(chunk, pageind, binind);
+
+ assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(tsdn, ptr, true) == size);
+}
+
+static void
+arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
+ arena_bin_t *bin)
+{
+
+ /* Dissociate run from bin. */
+ if (run == bin->runcur)
+ bin->runcur = NULL;
+ else {
+ szind_t binind = arena_bin_index(extent_node_arena_get(
+ &chunk->node), bin);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+
+ /*
+ * The following block's conditional is necessary because if the
+ * run only contains one region, then it never gets inserted
+ * into the non-full runs tree.
+ */
+ if (bin_info->nregs != 1) {
+ arena_chunk_map_misc_t *miscelm =
+ arena_run_to_miscelm(run);
+
+ arena_run_heap_remove(&bin->runs, miscelm);
+ }
+ }
+}
+
+static void
+arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, arena_bin_t *bin)
+{
+
+ assert(run != bin->runcur);
+
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ /******************************/
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_run_dalloc(tsdn, arena, run, true, false, false);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ /****************************/
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if (config_stats)
+ bin->stats.curruns--;
+}
+
+static void
+arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
+{
+
+ /*
+ * Make sure that if bin->runcur is non-NULL, it refers to the
+ * oldest/lowest non-full run. It is okay to NULL runcur out rather
+ * than proactively keeping it pointing at the oldest/lowest non-full
+ * run.
+ */
+ if (bin->runcur != NULL &&
+ arena_snad_comp(arena_run_to_miscelm(bin->runcur),
+ arena_run_to_miscelm(run)) > 0) {
+ /* Switch runcur. */
+ if (bin->runcur->nfree > 0)
+ arena_bin_runs_insert(bin, bin->runcur);
+ bin->runcur = run;
+ if (config_stats)
+ bin->stats.reruns++;
+ } else
+ arena_bin_runs_insert(bin, run);
+}
+
+static void
+arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
+{
+ size_t pageind, rpages_ind;
+ arena_run_t *run;
+ arena_bin_t *bin;
+ arena_bin_info_t *bin_info;
+ szind_t binind;
+
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
+ run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
+ binind = run->binind;
+ bin = &arena->bins[binind];
+ bin_info = &arena_bin_info[binind];
+
+ if (!junked && config_fill && unlikely(opt_junk_free))
+ arena_dalloc_junk_small(ptr, bin_info);
+
+ arena_run_reg_dalloc(run, ptr);
+ if (run->nfree == bin_info->nregs) {
+ arena_dissociate_bin_run(chunk, run, bin);
+ arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
+ } else if (run->nfree == 1 && run != bin->runcur)
+ arena_bin_lower_run(arena, run, bin);
+
+ if (config_stats) {
+ bin->stats.ndalloc++;
+ bin->stats.curregs--;
+ }
+}
+
+void
+arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
+{
+
+ arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
+}
+
+void
+arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind, arena_chunk_map_bits_t *bitselm)
+{
+ arena_run_t *run;
+ arena_bin_t *bin;
+ size_t rpages_ind;
+
+ rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
+ run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
+ bin = &arena->bins[run->binind];
+ malloc_mutex_lock(tsdn, &bin->lock);
+ arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
+ malloc_mutex_unlock(tsdn, &bin->lock);
+}
+
+void
+arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t pageind)
+{
+ arena_chunk_map_bits_t *bitselm;
+
+ if (config_debug) {
+ /* arena_ptr_small_binind_get() does extra sanity checking. */
+ assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
+ pageind)) != BININD_INVALID);
+ }
+ bitselm = arena_bitselm_get_mutable(chunk, pageind);
+ arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
+ arena_decay_tick(tsdn, arena);
+}
+
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_large
+#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
+#endif
+void
+arena_dalloc_junk_large(void *ptr, size_t usize)
+{
+
+ if (config_fill && unlikely(opt_junk_free))
+ memset(ptr, JEMALLOC_FREE_JUNK, usize);
+}
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_large
+#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
+arena_dalloc_junk_large_t *arena_dalloc_junk_large =
+ JEMALLOC_N(n_arena_dalloc_junk_large);
+#endif
+
+static void
+arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr, bool junked)
+{
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
+ arena_run_t *run = &miscelm->run;
+
+ if (config_fill || config_stats) {
+ size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
+ large_pad;
+
+ if (!junked)
+ arena_dalloc_junk_large(ptr, usize);
+ if (config_stats) {
+ szind_t index = size2index(usize) - NBINS;
+
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= usize;
+ arena->stats.lstats[index].ndalloc++;
+ arena->stats.lstats[index].curruns--;
+ }
+ }
+
+ arena_run_dalloc(tsdn, arena, run, true, false, false);
+}
+
+void
+arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr)
+{
+
+ arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
+}
+
+void
+arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr)
+{
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ arena_decay_tick(tsdn, arena);
+}
+
+static void
+arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t oldsize, size_t size)
+{
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
+ arena_run_t *run = &miscelm->run;
+
+ assert(size < oldsize);
+
+ /*
+ * Shrink the run, and make trailing pages available for other
+ * allocations.
+ */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
+ large_pad, true);
+ if (config_stats) {
+ szind_t oldindex = size2index(oldsize) - NBINS;
+ szind_t index = size2index(size) - NBINS;
+
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= oldsize;
+ arena->stats.lstats[oldindex].ndalloc++;
+ arena->stats.lstats[oldindex].curruns--;
+
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+static bool
+arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
+{
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t npages = (oldsize + large_pad) >> LG_PAGE;
+ size_t followsize;
+
+ assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
+ large_pad);
+
+ /* Try to extend the run. */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
+ pageind+npages) != 0)
+ goto label_fail;
+ followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
+ if (oldsize + followsize >= usize_min) {
+ /*
+ * The next run is available and sufficiently large. Split the
+ * following run, then merge the first part with the existing
+ * allocation.
+ */
+ arena_run_t *run;
+ size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
+
+ usize = usize_max;
+ while (oldsize + followsize < usize)
+ usize = index2size(size2index(usize)-1);
+ assert(usize >= usize_min);
+ assert(usize >= oldsize);
+ splitsize = usize - oldsize;
+ if (splitsize == 0)
+ goto label_fail;
+
+ run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
+ if (arena_run_split_large(arena, run, splitsize, zero))
+ goto label_fail;
+
+ if (config_cache_oblivious && zero) {
+ /*
+ * Zero the trailing bytes of the original allocation's
+ * last page, since they are in an indeterminate state.
+ * There will always be trailing bytes, because ptr's
+ * offset from the beginning of the run is a multiple of
+ * CACHELINE in [0 .. PAGE).
+ */
+ void *zbase = (void *)((uintptr_t)ptr + oldsize);
+ void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
+ PAGE));
+ size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
+ assert(nzero > 0);
+ memset(zbase, 0, nzero);
+ }
+
+ size = oldsize + splitsize;
+ npages = (size + large_pad) >> LG_PAGE;
+
+ /*
+ * Mark the extended run as dirty if either portion of the run
+ * was dirty before allocation. This is rather pedantic,
+ * because there's not actually any sequence of events that
+ * could cause the resulting run to be passed to
+ * arena_run_dalloc() with the dirty argument set to false
+ * (which is when dirty flag consistency would really matter).
+ */
+ flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
+ arena_mapbits_dirty_get(chunk, pageind+npages-1);
+ flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
+ arena_mapbits_large_set(chunk, pageind, size + large_pad,
+ flag_dirty | (flag_unzeroed_mask &
+ arena_mapbits_unzeroed_get(chunk, pageind)));
+ arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
+ (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
+ pageind+npages-1)));
+
+ if (config_stats) {
+ szind_t oldindex = size2index(oldsize) - NBINS;
+ szind_t index = size2index(size) - NBINS;
+
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= oldsize;
+ arena->stats.lstats[oldindex].ndalloc++;
+ arena->stats.lstats[oldindex].curruns--;
+
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[index].nmalloc++;
+ arena->stats.lstats[index].nrequests++;
+ arena->stats.lstats[index].curruns++;
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (false);
+ }
+label_fail:
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (true);
+}
+
+#ifdef JEMALLOC_JET
+#undef arena_ralloc_junk_large
+#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
+#endif
+static void
+arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
+{
+
+ if (config_fill && unlikely(opt_junk_free)) {
+ memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
+ old_usize - usize);
+ }
+}
+#ifdef JEMALLOC_JET
+#undef arena_ralloc_junk_large
+#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
+arena_ralloc_junk_large_t *arena_ralloc_junk_large =
+ JEMALLOC_N(n_arena_ralloc_junk_large);
+#endif
+
+/*
+ * Try to resize a large allocation, in order to avoid copying. This will
+ * always fail if growing an object, and the following run is already in use.
+ */
+static bool
+arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero)
+{
+ arena_chunk_t *chunk;
+ arena_t *arena;
+
+ if (oldsize == usize_max) {
+ /* Current size class is compatible and maximal. */
+ return (false);
+ }
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ arena = extent_node_arena_get(&chunk->node);
+
+ if (oldsize < usize_max) {
+ bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
+ oldsize, usize_min, usize_max, zero);
+ if (config_fill && !ret && !zero) {
+ if (unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)ptr + oldsize),
+ JEMALLOC_ALLOC_JUNK,
+ isalloc(tsdn, ptr, config_prof) - oldsize);
+ } else if (unlikely(opt_zero)) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ isalloc(tsdn, ptr, config_prof) - oldsize);
+ }
+ }
+ return (ret);
+ }
+
+ assert(oldsize > usize_max);
+ /* Fill before shrinking in order avoid a race. */
+ arena_ralloc_junk_large(ptr, oldsize, usize_max);
+ arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
+ return (false);
+}
+
+bool
+arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
+ size_t extra, bool zero)
+{
+ size_t usize_min, usize_max;
+
+ /* Calls with non-zero extra had to clamp extra. */
+ assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
+
+ if (unlikely(size > HUGE_MAXCLASS))
+ return (true);
+
+ usize_min = s2u(size);
+ usize_max = s2u(size + extra);
+ if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
+ arena_chunk_t *chunk;
+
+ /*
+ * Avoid moving the allocation if the size class can be left the
+ * same.
+ */
+ if (oldsize <= SMALL_MAXCLASS) {
+ assert(arena_bin_info[size2index(oldsize)].reg_size ==
+ oldsize);
+ if ((usize_max > SMALL_MAXCLASS ||
+ size2index(usize_max) != size2index(oldsize)) &&
+ (size > oldsize || usize_max < oldsize))
+ return (true);
+ } else {
+ if (usize_max <= SMALL_MAXCLASS)
+ return (true);
+ if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
+ usize_max, zero))
+ return (true);
+ }
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
+ return (false);
+ } else {
+ return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
+ usize_max, zero));
+ }
+}
+
+static void *
+arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache)
+{
+
+ if (alignment == 0)
+ return (arena_malloc(tsdn, arena, usize, size2index(usize),
+ zero, tcache, true));
+ usize = sa2u(usize, alignment);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ return (NULL);
+ return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
+}
+
+void *
+arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t alignment, bool zero, tcache_t *tcache)
+{
+ void *ret;
+ size_t usize;
+
+ usize = s2u(size);
+ if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
+ return (NULL);
+
+ if (likely(usize <= large_maxclass)) {
+ size_t copysize;
+
+ /* Try to avoid moving the allocation. */
+ if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
+ zero))
+ return (ptr);
+
+ /*
+ * size and oldsize are different enough that we need to move
+ * the object. In that case, fall back to allocating new space
+ * and copying.
+ */
+ ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
+ alignment, zero, tcache);
+ if (ret == NULL)
+ return (NULL);
+
+ /*
+ * Junk/zero-filling were already done by
+ * ipalloc()/arena_malloc().
+ */
+
+ copysize = (usize < oldsize) ? usize : oldsize;
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
+ memcpy(ret, ptr, copysize);
+ isqalloc(tsd, ptr, oldsize, tcache, true);
+ } else {
+ ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
+ zero, tcache);
+ }
+ return (ret);
+}
+
+dss_prec_t
+arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
+{
+ dss_prec_t ret;
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ ret = arena->dss_prec;
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (ret);
+}
+
+bool
+arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
+{
+
+ if (!have_dss)
+ return (dss_prec != dss_prec_disabled);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena->dss_prec = dss_prec;
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ return (false);
+}
+
+ssize_t
+arena_lg_dirty_mult_default_get(void)
+{
+
+ return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
+}
+
+bool
+arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
+{
+
+ if (opt_purge != purge_mode_ratio)
+ return (true);
+ if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
+ return (true);
+ atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
+ return (false);
+}
+
+ssize_t
+arena_decay_time_default_get(void)
+{
+
+ return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
+}
+
+bool
+arena_decay_time_default_set(ssize_t decay_time)
+{
+
+ if (opt_purge != purge_mode_decay)
+ return (true);
+ if (!arena_decay_time_valid(decay_time))
+ return (true);
+ atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
+ return (false);
+}
+
+static void
+arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+ size_t *nactive, size_t *ndirty)
+{
+
+ *nthreads += arena_nthreads_get(arena, false);
+ *dss = dss_prec_names[arena->dss_prec];
+ *lg_dirty_mult = arena->lg_dirty_mult;
+ *decay_time = arena->decay.time;
+ *nactive += arena->nactive;
+ *ndirty += arena->ndirty;
+}
+
+void
+arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+ size_t *nactive, size_t *ndirty)
+{
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
+ decay_time, nactive, ndirty);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+void
+arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+ size_t *nactive, size_t *ndirty, arena_stats_t *astats,
+ malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
+ malloc_huge_stats_t *hstats)
+{
+ unsigned i;
+
+ cassert(config_stats);
+
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
+ decay_time, nactive, ndirty);
+
+ astats->mapped += arena->stats.mapped;
+ astats->retained += arena->stats.retained;
+ astats->npurge += arena->stats.npurge;
+ astats->nmadvise += arena->stats.nmadvise;
+ astats->purged += arena->stats.purged;
+ astats->metadata_mapped += arena->stats.metadata_mapped;
+ astats->metadata_allocated += arena_metadata_allocated_get(arena);
+ astats->allocated_large += arena->stats.allocated_large;
+ astats->nmalloc_large += arena->stats.nmalloc_large;
+ astats->ndalloc_large += arena->stats.ndalloc_large;
+ astats->nrequests_large += arena->stats.nrequests_large;
+ astats->allocated_huge += arena->stats.allocated_huge;
+ astats->nmalloc_huge += arena->stats.nmalloc_huge;
+ astats->ndalloc_huge += arena->stats.ndalloc_huge;
+
+ for (i = 0; i < nlclasses; i++) {
+ lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
+ lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
+ lstats[i].nrequests += arena->stats.lstats[i].nrequests;
+ lstats[i].curruns += arena->stats.lstats[i].curruns;
+ }
+
+ for (i = 0; i < nhclasses; i++) {
+ hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
+ hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
+ hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+
+ malloc_mutex_lock(tsdn, &bin->lock);
+ bstats[i].nmalloc += bin->stats.nmalloc;
+ bstats[i].ndalloc += bin->stats.ndalloc;
+ bstats[i].nrequests += bin->stats.nrequests;
+ bstats[i].curregs += bin->stats.curregs;
+ if (config_tcache) {
+ bstats[i].nfills += bin->stats.nfills;
+ bstats[i].nflushes += bin->stats.nflushes;
+ }
+ bstats[i].nruns += bin->stats.nruns;
+ bstats[i].reruns += bin->stats.reruns;
+ bstats[i].curruns += bin->stats.curruns;
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ }
+}
+
+unsigned
+arena_nthreads_get(arena_t *arena, bool internal)
+{
+
+ return (atomic_read_u(&arena->nthreads[internal]));
+}
+
+void
+arena_nthreads_inc(arena_t *arena, bool internal)
+{
+
+ atomic_add_u(&arena->nthreads[internal], 1);
+}
+
+void
+arena_nthreads_dec(arena_t *arena, bool internal)
+{
+
+ atomic_sub_u(&arena->nthreads[internal], 1);
+}
+
+size_t
+arena_extent_sn_next(arena_t *arena)
+{
+
+ return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
+}
+
+arena_t *
+arena_new(tsdn_t *tsdn, unsigned ind)
+{
+ arena_t *arena;
+ unsigned i;
+
+ /*
+ * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
+ * because there is no way to clean up if base_alloc() OOMs.
+ */
+ if (config_stats) {
+ arena = (arena_t *)base_alloc(tsdn,
+ CACHELINE_CEILING(sizeof(arena_t)) +
+ QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)))
+ + (nhclasses * sizeof(malloc_huge_stats_t)));
+ } else
+ arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
+ if (arena == NULL)
+ return (NULL);
+
+ arena->ind = ind;
+ arena->nthreads[0] = arena->nthreads[1] = 0;
+ if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
+ return (NULL);
+
+ if (config_stats) {
+ memset(&arena->stats, 0, sizeof(arena_stats_t));
+ arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
+ + CACHELINE_CEILING(sizeof(arena_t)));
+ memset(arena->stats.lstats, 0, nlclasses *
+ sizeof(malloc_large_stats_t));
+ arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
+ + CACHELINE_CEILING(sizeof(arena_t)) +
+ QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
+ memset(arena->stats.hstats, 0, nhclasses *
+ sizeof(malloc_huge_stats_t));
+ if (config_tcache)
+ ql_new(&arena->tcache_ql);
+ }
+
+ if (config_prof)
+ arena->prof_accumbytes = 0;
+
+ if (config_cache_oblivious) {
+ /*
+ * A nondeterministic seed based on the address of arena reduces
+ * the likelihood of lockstep non-uniform cache index
+ * utilization among identical concurrent processes, but at the
+ * cost of test repeatability. For debug builds, instead use a
+ * deterministic seed.
+ */
+ arena->offset_state = config_debug ? ind :
+ (size_t)(uintptr_t)arena;
+ }
+
+ arena->dss_prec = chunk_dss_prec_get();
+
+ ql_new(&arena->achunks);
+
+ arena->extent_sn_next = 0;
+
+ arena->spare = NULL;
+
+ arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
+ arena->purging = false;
+ arena->nactive = 0;
+ arena->ndirty = 0;
+
+ for (i = 0; i < NPSIZES; i++)
+ arena_run_heap_new(&arena->runs_avail[i]);
+
+ qr_new(&arena->runs_dirty, rd_link);
+ qr_new(&arena->chunks_cache, cc_link);
+
+ if (opt_purge == purge_mode_decay)
+ arena_decay_init(arena, arena_decay_time_default_get());
+
+ ql_new(&arena->huge);
+ if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
+ WITNESS_RANK_ARENA_HUGE))
+ return (NULL);
+
+ extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
+ extent_tree_ad_new(&arena->chunks_ad_cached);
+ extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
+ extent_tree_ad_new(&arena->chunks_ad_retained);
+ if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
+ WITNESS_RANK_ARENA_CHUNKS))
+ return (NULL);
+ ql_new(&arena->node_cache);
+ if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
+ WITNESS_RANK_ARENA_NODE_CACHE))
+ return (NULL);
+
+ arena->chunk_hooks = chunk_hooks_default;
+
+ /* Initialize bins. */
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+ if (malloc_mutex_init(&bin->lock, "arena_bin",
+ WITNESS_RANK_ARENA_BIN))
+ return (NULL);
+ bin->runcur = NULL;
+ arena_run_heap_new(&bin->runs);
+ if (config_stats)
+ memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+ }
+
+ return (arena);
+}
+
+/*
+ * Calculate bin_info->run_size such that it meets the following constraints:
+ *
+ * *) bin_info->run_size <= arena_maxrun
+ * *) bin_info->nregs <= RUN_MAXREGS
+ *
+ * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
+ * these settings are all interdependent.
+ */
+static void
+bin_info_run_size_calc(arena_bin_info_t *bin_info)
+{
+ size_t pad_size;
+ size_t try_run_size, perfect_run_size, actual_run_size;
+ uint32_t try_nregs, perfect_nregs, actual_nregs;
+
+ /*
+ * Determine redzone size based on minimum alignment and minimum
+ * redzone size. Add padding to the end of the run if it is needed to
+ * align the regions. The padding allows each redzone to be half the
+ * minimum alignment; without the padding, each redzone would have to
+ * be twice as large in order to maintain alignment.
+ */
+ if (config_fill && unlikely(opt_redzone)) {
+ size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
+ if (align_min <= REDZONE_MINSIZE) {
+ bin_info->redzone_size = REDZONE_MINSIZE;
+ pad_size = 0;
+ } else {
+ bin_info->redzone_size = align_min >> 1;
+ pad_size = bin_info->redzone_size;
+ }
+ } else {
+ bin_info->redzone_size = 0;
+ pad_size = 0;
+ }
+ bin_info->reg_interval = bin_info->reg_size +
+ (bin_info->redzone_size << 1);
+
+ /*
+ * Compute run size under ideal conditions (no redzones, no limit on run
+ * size).
+ */
+ try_run_size = PAGE;
+ try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
+ do {
+ perfect_run_size = try_run_size;
+ perfect_nregs = try_nregs;
+
+ try_run_size += PAGE;
+ try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
+ } while (perfect_run_size != perfect_nregs * bin_info->reg_size);
+ assert(perfect_nregs <= RUN_MAXREGS);
+
+ actual_run_size = perfect_run_size;
+ actual_nregs = (uint32_t)((actual_run_size - pad_size) /
+ bin_info->reg_interval);
+
+ /*
+ * Redzones can require enough padding that not even a single region can
+ * fit within the number of pages that would normally be dedicated to a
+ * run for this size class. Increase the run size until at least one
+ * region fits.
+ */
+ while (actual_nregs == 0) {
+ assert(config_fill && unlikely(opt_redzone));
+
+ actual_run_size += PAGE;
+ actual_nregs = (uint32_t)((actual_run_size - pad_size) /
+ bin_info->reg_interval);
+ }
+
+ /*
+ * Make sure that the run will fit within an arena chunk.
+ */
+ while (actual_run_size > arena_maxrun) {
+ actual_run_size -= PAGE;
+ actual_nregs = (uint32_t)((actual_run_size - pad_size) /
+ bin_info->reg_interval);
+ }
+ assert(actual_nregs > 0);
+ assert(actual_run_size == s2u(actual_run_size));
+
+ /* Copy final settings. */
+ bin_info->run_size = actual_run_size;
+ bin_info->nregs = actual_nregs;
+ bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
+ bin_info->reg_interval) - pad_size + bin_info->redzone_size);
+
+ assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
+ * bin_info->reg_interval) + pad_size == bin_info->run_size);
+}
+
+static void
+bin_info_init(void)
+{
+ arena_bin_info_t *bin_info;
+
+#define BIN_INFO_INIT_bin_yes(index, size) \
+ bin_info = &arena_bin_info[index]; \
+ bin_info->reg_size = size; \
+ bin_info_run_size_calc(bin_info); \
+ bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
+#define BIN_INFO_INIT_bin_no(index, size)
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+ BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
+ SIZE_CLASSES
+#undef BIN_INFO_INIT_bin_yes
+#undef BIN_INFO_INIT_bin_no
+#undef SC
+}
+
+void
+arena_boot(void)
+{
+ unsigned i;
+
+ arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
+ arena_decay_time_default_set(opt_decay_time);
+
+ /*
+ * Compute the header size such that it is large enough to contain the
+ * page map. The page map is biased to omit entries for the header
+ * itself, so some iteration is necessary to compute the map bias.
+ *
+ * 1) Compute safe header_size and map_bias values that include enough
+ * space for an unbiased page map.
+ * 2) Refine map_bias based on (1) to omit the header pages in the page
+ * map. The resulting map_bias may be one too small.
+ * 3) Refine map_bias based on (2). The result will be >= the result
+ * from (2), and will always be correct.
+ */
+ map_bias = 0;
+ for (i = 0; i < 3; i++) {
+ size_t header_size = offsetof(arena_chunk_t, map_bits) +
+ ((sizeof(arena_chunk_map_bits_t) +
+ sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
+ map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
+ }
+ assert(map_bias > 0);
+
+ map_misc_offset = offsetof(arena_chunk_t, map_bits) +
+ sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
+
+ arena_maxrun = chunksize - (map_bias << LG_PAGE);
+ assert(arena_maxrun > 0);
+ large_maxclass = index2size(size2index(chunksize)-1);
+ if (large_maxclass > arena_maxrun) {
+ /*
+ * For small chunk sizes it's possible for there to be fewer
+ * non-header pages available than are necessary to serve the
+ * size classes just below chunksize.
+ */
+ large_maxclass = arena_maxrun;
+ }
+ assert(large_maxclass > 0);
+ nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
+ nhclasses = NSIZES - nlclasses - NBINS;
+
+ bin_info_init();
+}
+
+void
+arena_prefork0(tsdn_t *tsdn, arena_t *arena)
+{
+
+ malloc_mutex_prefork(tsdn, &arena->lock);
+}
+
+void
+arena_prefork1(tsdn_t *tsdn, arena_t *arena)
+{
+
+ malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
+}
+
+void
+arena_prefork2(tsdn_t *tsdn, arena_t *arena)
+{
+
+ malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
+}
+
+void
+arena_prefork3(tsdn_t *tsdn, arena_t *arena)
+{
+ unsigned i;
+
+ for (i = 0; i < NBINS; i++)
+ malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
+ malloc_mutex_prefork(tsdn, &arena->huge_mtx);
+}
+
+void
+arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
+{
+ unsigned i;
+
+ malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
+ for (i = 0; i < NBINS; i++)
+ malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
+ malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->lock);
+}
+
+void
+arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
+{
+ unsigned i;
+
+ malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
+ for (i = 0; i < NBINS; i++)
+ malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
+ malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->lock);
+}
diff --git a/deps/jemalloc/src/atomic.c b/deps/jemalloc/src/atomic.c
new file mode 100644
index 0000000..77ee313
--- /dev/null
+++ b/deps/jemalloc/src/atomic.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_ATOMIC_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/deps/jemalloc/src/base.c b/deps/jemalloc/src/base.c
new file mode 100644
index 0000000..5681a3f
--- /dev/null
+++ b/deps/jemalloc/src/base.c
@@ -0,0 +1,187 @@
+#define JEMALLOC_BASE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+static malloc_mutex_t base_mtx;
+static size_t base_extent_sn_next;
+static extent_tree_t base_avail_szsnad;
+static extent_node_t *base_nodes;
+static size_t base_allocated;
+static size_t base_resident;
+static size_t base_mapped;
+
+/******************************************************************************/
+
+static extent_node_t *
+base_node_try_alloc(tsdn_t *tsdn)
+{
+ extent_node_t *node;
+
+ malloc_mutex_assert_owner(tsdn, &base_mtx);
+
+ if (base_nodes == NULL)
+ return (NULL);
+ node = base_nodes;
+ base_nodes = *(extent_node_t **)node;
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
+ return (node);
+}
+
+static void
+base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
+{
+
+ malloc_mutex_assert_owner(tsdn, &base_mtx);
+
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
+ *(extent_node_t **)node = base_nodes;
+ base_nodes = node;
+}
+
+static void
+base_extent_node_init(extent_node_t *node, void *addr, size_t size)
+{
+ size_t sn = atomic_add_z(&base_extent_sn_next, 1) - 1;
+
+ extent_node_init(node, NULL, addr, size, sn, true, true);
+}
+
+static extent_node_t *
+base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
+{
+ extent_node_t *node;
+ size_t csize, nsize;
+ void *addr;
+
+ malloc_mutex_assert_owner(tsdn, &base_mtx);
+ assert(minsize != 0);
+ node = base_node_try_alloc(tsdn);
+ /* Allocate enough space to also carve a node out if necessary. */
+ nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
+ csize = CHUNK_CEILING(minsize + nsize);
+ addr = chunk_alloc_base(csize);
+ if (addr == NULL) {
+ if (node != NULL)
+ base_node_dalloc(tsdn, node);
+ return (NULL);
+ }
+ base_mapped += csize;
+ if (node == NULL) {
+ node = (extent_node_t *)addr;
+ addr = (void *)((uintptr_t)addr + nsize);
+ csize -= nsize;
+ if (config_stats) {
+ base_allocated += nsize;
+ base_resident += PAGE_CEILING(nsize);
+ }
+ }
+ base_extent_node_init(node, addr, csize);
+ return (node);
+}
+
+/*
+ * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
+ * sparse data structures such as radix tree nodes efficient with respect to
+ * physical memory usage.
+ */
+void *
+base_alloc(tsdn_t *tsdn, size_t size)
+{
+ void *ret;
+ size_t csize, usize;
+ extent_node_t *node;
+ extent_node_t key;
+
+ /*
+ * Round size up to nearest multiple of the cacheline size, so that
+ * there is no chance of false cache line sharing.
+ */
+ csize = CACHELINE_CEILING(size);
+
+ usize = s2u(csize);
+ extent_node_init(&key, NULL, NULL, usize, 0, false, false);
+ malloc_mutex_lock(tsdn, &base_mtx);
+ node = extent_tree_szsnad_nsearch(&base_avail_szsnad, &key);
+ if (node != NULL) {
+ /* Use existing space. */
+ extent_tree_szsnad_remove(&base_avail_szsnad, node);
+ } else {
+ /* Try to allocate more space. */
+ node = base_chunk_alloc(tsdn, csize);
+ }
+ if (node == NULL) {
+ ret = NULL;
+ goto label_return;
+ }
+
+ ret = extent_node_addr_get(node);
+ if (extent_node_size_get(node) > csize) {
+ extent_node_addr_set(node, (void *)((uintptr_t)ret + csize));
+ extent_node_size_set(node, extent_node_size_get(node) - csize);
+ extent_tree_szsnad_insert(&base_avail_szsnad, node);
+ } else
+ base_node_dalloc(tsdn, node);
+ if (config_stats) {
+ base_allocated += csize;
+ /*
+ * Add one PAGE to base_resident for every page boundary that is
+ * crossed by the new allocation.
+ */
+ base_resident += PAGE_CEILING((uintptr_t)ret + csize) -
+ PAGE_CEILING((uintptr_t)ret);
+ }
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
+label_return:
+ malloc_mutex_unlock(tsdn, &base_mtx);
+ return (ret);
+}
+
+void
+base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
+ size_t *mapped)
+{
+
+ malloc_mutex_lock(tsdn, &base_mtx);
+ assert(base_allocated <= base_resident);
+ assert(base_resident <= base_mapped);
+ *allocated = base_allocated;
+ *resident = base_resident;
+ *mapped = base_mapped;
+ malloc_mutex_unlock(tsdn, &base_mtx);
+}
+
+bool
+base_boot(void)
+{
+
+ if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
+ return (true);
+ base_extent_sn_next = 0;
+ extent_tree_szsnad_new(&base_avail_szsnad);
+ base_nodes = NULL;
+
+ return (false);
+}
+
+void
+base_prefork(tsdn_t *tsdn)
+{
+
+ malloc_mutex_prefork(tsdn, &base_mtx);
+}
+
+void
+base_postfork_parent(tsdn_t *tsdn)
+{
+
+ malloc_mutex_postfork_parent(tsdn, &base_mtx);
+}
+
+void
+base_postfork_child(tsdn_t *tsdn)
+{
+
+ malloc_mutex_postfork_child(tsdn, &base_mtx);
+}
diff --git a/deps/jemalloc/src/bitmap.c b/deps/jemalloc/src/bitmap.c
new file mode 100644
index 0000000..ac0f3b3
--- /dev/null
+++ b/deps/jemalloc/src/bitmap.c
@@ -0,0 +1,111 @@
+#define JEMALLOC_BITMAP_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+
+#ifdef USE_TREE
+
+void
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
+{
+ unsigned i;
+ size_t group_count;
+
+ assert(nbits > 0);
+ assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
+
+ /*
+ * Compute the number of groups necessary to store nbits bits, and
+ * progressively work upward through the levels until reaching a level
+ * that requires only one group.
+ */
+ binfo->levels[0].group_offset = 0;
+ group_count = BITMAP_BITS2GROUPS(nbits);
+ for (i = 1; group_count > 1; i++) {
+ assert(i < BITMAP_MAX_LEVELS);
+ binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ + group_count;
+ group_count = BITMAP_BITS2GROUPS(group_count);
+ }
+ binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ + group_count;
+ assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
+ binfo->nlevels = i;
+ binfo->nbits = nbits;
+}
+
+static size_t
+bitmap_info_ngroups(const bitmap_info_t *binfo)
+{
+
+ return (binfo->levels[binfo->nlevels].group_offset);
+}
+
+void
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
+{
+ size_t extra;
+ unsigned i;
+
+ /*
+ * Bits are actually inverted with regard to the external bitmap
+ * interface, so the bitmap starts out with all 1 bits, except for
+ * trailing unused bits (if any). Note that each group uses bit 0 to
+ * correspond to the first logical bit in the group, so extra bits
+ * are the most significant bits of the last group.
+ */
+ memset(bitmap, 0xffU, bitmap_size(binfo));
+ extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
+ & BITMAP_GROUP_NBITS_MASK;
+ if (extra != 0)
+ bitmap[binfo->levels[1].group_offset - 1] >>= extra;
+ for (i = 1; i < binfo->nlevels; i++) {
+ size_t group_count = binfo->levels[i].group_offset -
+ binfo->levels[i-1].group_offset;
+ extra = (BITMAP_GROUP_NBITS - (group_count &
+ BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
+ if (extra != 0)
+ bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
+ }
+}
+
+#else /* USE_TREE */
+
+void
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
+{
+
+ assert(nbits > 0);
+ assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
+
+ binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
+ binfo->nbits = nbits;
+}
+
+static size_t
+bitmap_info_ngroups(const bitmap_info_t *binfo)
+{
+
+ return (binfo->ngroups);
+}
+
+void
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
+{
+ size_t extra;
+
+ memset(bitmap, 0xffU, bitmap_size(binfo));
+ extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
+ & BITMAP_GROUP_NBITS_MASK;
+ if (extra != 0)
+ bitmap[binfo->ngroups - 1] >>= extra;
+}
+
+#endif /* USE_TREE */
+
+size_t
+bitmap_size(const bitmap_info_t *binfo)
+{
+
+ return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
+}
diff --git a/deps/jemalloc/src/chunk.c b/deps/jemalloc/src/chunk.c
new file mode 100644
index 0000000..c1c514a
--- /dev/null
+++ b/deps/jemalloc/src/chunk.c
@@ -0,0 +1,795 @@
+#define JEMALLOC_CHUNK_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+const char *opt_dss = DSS_DEFAULT;
+size_t opt_lg_chunk = 0;
+
+/* Used exclusively for gdump triggering. */
+static size_t curchunks;
+static size_t highchunks;
+
+rtree_t chunks_rtree;
+
+/* Various chunk-related settings. */
+size_t chunksize;
+size_t chunksize_mask; /* (chunksize - 1). */
+size_t chunk_npages;
+
+static void *chunk_alloc_default(void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
+static bool chunk_dalloc_default(void *chunk, size_t size, bool committed,
+ unsigned arena_ind);
+static bool chunk_commit_default(void *chunk, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool chunk_decommit_default(void *chunk, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool chunk_purge_default(void *chunk, size_t size, size_t offset,
+ size_t length, unsigned arena_ind);
+static bool chunk_split_default(void *chunk, size_t size, size_t size_a,
+ size_t size_b, bool committed, unsigned arena_ind);
+static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b,
+ size_t size_b, bool committed, unsigned arena_ind);
+
+const chunk_hooks_t chunk_hooks_default = {
+ chunk_alloc_default,
+ chunk_dalloc_default,
+ chunk_commit_default,
+ chunk_decommit_default,
+ chunk_purge_default,
+ chunk_split_default,
+ chunk_merge_default
+};
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static void chunk_record(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad,
+ extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn,
+ bool zeroed, bool committed);
+
+/******************************************************************************/
+
+static chunk_hooks_t
+chunk_hooks_get_locked(arena_t *arena)
+{
+
+ return (arena->chunk_hooks);
+}
+
+chunk_hooks_t
+chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
+{
+ chunk_hooks_t chunk_hooks;
+
+ malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ chunk_hooks = chunk_hooks_get_locked(arena);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+
+ return (chunk_hooks);
+}
+
+chunk_hooks_t
+chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
+{
+ chunk_hooks_t old_chunk_hooks;
+
+ malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ old_chunk_hooks = arena->chunk_hooks;
+ /*
+ * Copy each field atomically so that it is impossible for readers to
+ * see partially updated pointers. There are places where readers only
+ * need one hook function pointer (therefore no need to copy the
+ * entirety of arena->chunk_hooks), and stale reads do not affect
+ * correctness, so they perform unlocked reads.
+ */
+#define ATOMIC_COPY_HOOK(n) do { \
+ union { \
+ chunk_##n##_t **n; \
+ void **v; \
+ } u; \
+ u.n = &arena->chunk_hooks.n; \
+ atomic_write_p(u.v, chunk_hooks->n); \
+} while (0)
+ ATOMIC_COPY_HOOK(alloc);
+ ATOMIC_COPY_HOOK(dalloc);
+ ATOMIC_COPY_HOOK(commit);
+ ATOMIC_COPY_HOOK(decommit);
+ ATOMIC_COPY_HOOK(purge);
+ ATOMIC_COPY_HOOK(split);
+ ATOMIC_COPY_HOOK(merge);
+#undef ATOMIC_COPY_HOOK
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+
+ return (old_chunk_hooks);
+}
+
+static void
+chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, bool locked)
+{
+ static const chunk_hooks_t uninitialized_hooks =
+ CHUNK_HOOKS_INITIALIZER;
+
+ if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
+ 0) {
+ *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
+ chunk_hooks_get(tsdn, arena);
+ }
+}
+
+static void
+chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks)
+{
+
+ chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
+}
+
+static void
+chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks)
+{
+
+ chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
+}
+
+bool
+chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
+{
+
+ assert(extent_node_addr_get(node) == chunk);
+
+ if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node))
+ return (true);
+ if (config_prof && opt_prof) {
+ size_t size = extent_node_size_get(node);
+ size_t nadd = (size == 0) ? 1 : size / chunksize;
+ size_t cur = atomic_add_z(&curchunks, nadd);
+ size_t high = atomic_read_z(&highchunks);
+ while (cur > high && atomic_cas_z(&highchunks, high, cur)) {
+ /*
+ * Don't refresh cur, because it may have decreased
+ * since this thread lost the highchunks update race.
+ */
+ high = atomic_read_z(&highchunks);
+ }
+ if (cur > high && prof_gdump_get_unlocked())
+ prof_gdump(tsdn);
+ }
+
+ return (false);
+}
+
+void
+chunk_deregister(const void *chunk, const extent_node_t *node)
+{
+ bool err;
+
+ err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL);
+ assert(!err);
+ if (config_prof && opt_prof) {
+ size_t size = extent_node_size_get(node);
+ size_t nsub = (size == 0) ? 1 : size / chunksize;
+ assert(atomic_read_z(&curchunks) >= nsub);
+ atomic_sub_z(&curchunks, nsub);
+ }
+}
+
+/*
+ * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that
+ * best fits.
+ */
+static extent_node_t *
+chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
+{
+ extent_node_t key;
+
+ assert(size == CHUNK_CEILING(size));
+
+ extent_node_init(&key, arena, NULL, size, 0, false, false);
+ return (extent_tree_szsnad_nsearch(chunks_szsnad, &key));
+}
+
+static void *
+chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
+ void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
+ bool *commit, bool dalloc_node)
+{
+ void *ret;
+ extent_node_t *node;
+ size_t alloc_size, leadsize, trailsize;
+ bool zeroed, committed;
+
+ assert(CHUNK_CEILING(size) == size);
+ assert(alignment > 0);
+ assert(new_addr == NULL || alignment == chunksize);
+ assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
+ /*
+ * Cached chunks use the node linkage embedded in their headers, in
+ * which case dalloc_node is true, and new_addr is non-NULL because
+ * we're operating on a specific chunk.
+ */
+ assert(dalloc_node || new_addr != NULL);
+
+ alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < size)
+ return (NULL);
+ malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
+ if (new_addr != NULL) {
+ extent_node_t key;
+ extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
+ false);
+ node = extent_tree_ad_search(chunks_ad, &key);
+ } else {
+ node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
+ }
+ if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
+ size)) {
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ return (NULL);
+ }
+ leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
+ alignment) - (uintptr_t)extent_node_addr_get(node);
+ assert(new_addr == NULL || leadsize == 0);
+ assert(extent_node_size_get(node) >= leadsize + size);
+ trailsize = extent_node_size_get(node) - leadsize - size;
+ ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize);
+ *sn = extent_node_sn_get(node);
+ zeroed = extent_node_zeroed_get(node);
+ if (zeroed)
+ *zero = true;
+ committed = extent_node_committed_get(node);
+ if (committed)
+ *commit = true;
+ /* Split the lead. */
+ if (leadsize != 0 &&
+ chunk_hooks->split(extent_node_addr_get(node),
+ extent_node_size_get(node), leadsize, size, false, arena->ind)) {
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ return (NULL);
+ }
+ /* Remove node from the tree. */
+ extent_tree_szsnad_remove(chunks_szsnad, node);
+ extent_tree_ad_remove(chunks_ad, node);
+ arena_chunk_cache_maybe_remove(arena, node, cache);
+ if (leadsize != 0) {
+ /* Insert the leading space as a smaller chunk. */
+ extent_node_size_set(node, leadsize);
+ extent_tree_szsnad_insert(chunks_szsnad, node);
+ extent_tree_ad_insert(chunks_ad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
+ node = NULL;
+ }
+ if (trailsize != 0) {
+ /* Split the trail. */
+ if (chunk_hooks->split(ret, size + trailsize, size,
+ trailsize, false, arena->ind)) {
+ if (dalloc_node && node != NULL)
+ arena_node_dalloc(tsdn, arena, node);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
+ chunks_ad, cache, ret, size + trailsize, *sn,
+ zeroed, committed);
+ return (NULL);
+ }
+ /* Insert the trailing space as a smaller chunk. */
+ if (node == NULL) {
+ node = arena_node_alloc(tsdn, arena);
+ if (node == NULL) {
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ chunk_record(tsdn, arena, chunk_hooks,
+ chunks_szsnad, chunks_ad, cache, ret, size
+ + trailsize, *sn, zeroed, committed);
+ return (NULL);
+ }
+ }
+ extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
+ trailsize, *sn, zeroed, committed);
+ extent_tree_szsnad_insert(chunks_szsnad, node);
+ extent_tree_ad_insert(chunks_ad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
+ node = NULL;
+ }
+ if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
+ cache, ret, size, *sn, zeroed, committed);
+ return (NULL);
+ }
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+
+ assert(dalloc_node || node != NULL);
+ if (dalloc_node && node != NULL)
+ arena_node_dalloc(tsdn, arena, node);
+ if (*zero) {
+ if (!zeroed)
+ memset(ret, 0, size);
+ else if (config_debug) {
+ size_t i;
+ size_t *p = (size_t *)(uintptr_t)ret;
+
+ for (i = 0; i < size / sizeof(size_t); i++)
+ assert(p[i] == 0);
+ }
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
+ }
+ return (ret);
+}
+
+/*
+ * If the caller specifies (!*zero), it is still possible to receive zeroed
+ * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes
+ * advantage of this to avoid demanding zeroed chunks, but taking advantage of
+ * them if they are returned.
+ */
+static void *
+chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
+{
+ void *ret;
+
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
+
+ /* "primary" dss. */
+ if (have_dss && dss_prec == dss_prec_primary && (ret =
+ chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL)
+ return (ret);
+ /* mmap. */
+ if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
+ NULL)
+ return (ret);
+ /* "secondary" dss. */
+ if (have_dss && dss_prec == dss_prec_secondary && (ret =
+ chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL)
+ return (ret);
+
+ /* All strategies for allocation failed. */
+ return (NULL);
+}
+
+void *
+chunk_alloc_base(size_t size)
+{
+ void *ret;
+ bool zero, commit;
+
+ /*
+ * Directly call chunk_alloc_mmap() rather than chunk_alloc_core()
+ * because it's critical that chunk_alloc_base() return untouched
+ * demand-zeroed virtual memory.
+ */
+ zero = true;
+ commit = true;
+ ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+
+ return (ret);
+}
+
+void *
+chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
+ bool *commit, bool dalloc_node)
+{
+ void *ret;
+
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
+
+ ret = chunk_recycle(tsdn, arena, chunk_hooks,
+ &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
+ new_addr, size, alignment, sn, zero, commit, dalloc_node);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ return (ret);
+}
+
+static arena_t *
+chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
+{
+ arena_t *arena;
+
+ arena = arena_get(tsdn, arena_ind, false);
+ /*
+ * The arena we're allocating on behalf of must have been initialized
+ * already.
+ */
+ assert(arena != NULL);
+ return (arena);
+}
+
+static void *
+chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit)
+{
+ void *ret;
+
+ ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
+ commit, arena->dss_prec);
+ if (ret == NULL)
+ return (NULL);
+ if (config_valgrind)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+
+ return (ret);
+}
+
+static void *
+chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit, unsigned arena_ind)
+{
+ tsdn_t *tsdn;
+ arena_t *arena;
+
+ tsdn = tsdn_fetch();
+ arena = chunk_arena_get(tsdn, arena_ind);
+
+ return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
+ zero, commit));
+}
+
+static void *
+chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
+ bool *commit)
+{
+ void *ret;
+
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
+
+ ret = chunk_recycle(tsdn, arena, chunk_hooks,
+ &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
+ new_addr, size, alignment, sn, zero, commit, true);
+
+ if (config_stats && ret != NULL)
+ arena->stats.retained -= size;
+
+ return (ret);
+}
+
+void *
+chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero,
+ bool *commit)
+{
+ void *ret;
+
+ chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
+
+ ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
+ alignment, sn, zero, commit);
+ if (ret == NULL) {
+ if (chunk_hooks->alloc == chunk_alloc_default) {
+ /* Call directly to propagate tsdn. */
+ ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
+ size, alignment, zero, commit);
+ } else {
+ ret = chunk_hooks->alloc(new_addr, size, alignment,
+ zero, commit, arena->ind);
+ }
+
+ if (ret == NULL)
+ return (NULL);
+
+ *sn = arena_extent_sn_next(arena);
+
+ if (config_valgrind && chunk_hooks->alloc !=
+ chunk_alloc_default)
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize);
+ }
+
+ return (ret);
+}
+
+static void
+chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache,
+ void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
+{
+ bool unzeroed;
+ extent_node_t *node, *prev;
+ extent_node_t key;
+
+ assert(!cache || !zeroed);
+ unzeroed = cache || !zeroed;
+ JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
+
+ malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
+ extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
+ false, false);
+ node = extent_tree_ad_nsearch(chunks_ad, &key);
+ /* Try to coalesce forward. */
+ if (node != NULL && extent_node_addr_get(node) ==
+ extent_node_addr_get(&key) && extent_node_committed_get(node) ==
+ committed && !chunk_hooks->merge(chunk, size,
+ extent_node_addr_get(node), extent_node_size_get(node), false,
+ arena->ind)) {
+ /*
+ * Coalesce chunk with the following address range. This does
+ * not change the position within chunks_ad, so only
+ * remove/insert from/into chunks_szsnad.
+ */
+ extent_tree_szsnad_remove(chunks_szsnad, node);
+ arena_chunk_cache_maybe_remove(arena, node, cache);
+ extent_node_addr_set(node, chunk);
+ extent_node_size_set(node, size + extent_node_size_get(node));
+ if (sn < extent_node_sn_get(node))
+ extent_node_sn_set(node, sn);
+ extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
+ !unzeroed);
+ extent_tree_szsnad_insert(chunks_szsnad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
+ } else {
+ /* Coalescing forward failed, so insert a new node. */
+ node = arena_node_alloc(tsdn, arena);
+ if (node == NULL) {
+ /*
+ * Node allocation failed, which is an exceedingly
+ * unlikely failure. Leak chunk after making sure its
+ * pages have already been purged, so that this is only
+ * a virtual memory leak.
+ */
+ if (cache) {
+ chunk_purge_wrapper(tsdn, arena, chunk_hooks,
+ chunk, size, 0, size);
+ }
+ goto label_return;
+ }
+ extent_node_init(node, arena, chunk, size, sn, !unzeroed,
+ committed);
+ extent_tree_ad_insert(chunks_ad, node);
+ extent_tree_szsnad_insert(chunks_szsnad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
+ }
+
+ /* Try to coalesce backward. */
+ prev = extent_tree_ad_prev(chunks_ad, node);
+ if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) +
+ extent_node_size_get(prev)) == chunk &&
+ extent_node_committed_get(prev) == committed &&
+ !chunk_hooks->merge(extent_node_addr_get(prev),
+ extent_node_size_get(prev), chunk, size, false, arena->ind)) {
+ /*
+ * Coalesce chunk with the previous address range. This does
+ * not change the position within chunks_ad, so only
+ * remove/insert node from/into chunks_szsnad.
+ */
+ extent_tree_szsnad_remove(chunks_szsnad, prev);
+ extent_tree_ad_remove(chunks_ad, prev);
+ arena_chunk_cache_maybe_remove(arena, prev, cache);
+ extent_tree_szsnad_remove(chunks_szsnad, node);
+ arena_chunk_cache_maybe_remove(arena, node, cache);
+ extent_node_addr_set(node, extent_node_addr_get(prev));
+ extent_node_size_set(node, extent_node_size_get(prev) +
+ extent_node_size_get(node));
+ if (extent_node_sn_get(prev) < extent_node_sn_get(node))
+ extent_node_sn_set(node, extent_node_sn_get(prev));
+ extent_node_zeroed_set(node, extent_node_zeroed_get(prev) &&
+ extent_node_zeroed_get(node));
+ extent_tree_szsnad_insert(chunks_szsnad, node);
+ arena_chunk_cache_maybe_insert(arena, node, cache);
+
+ arena_node_dalloc(tsdn, arena, prev);
+ }
+
+label_return:
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+}
+
+void
+chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, size_t sn, bool committed)
+{
+
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+
+ chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
+ &arena->chunks_ad_cached, true, chunk, size, sn, false,
+ committed);
+ arena_maybe_purge(tsdn, arena);
+}
+
+static bool
+chunk_dalloc_default_impl(void *chunk, size_t size)
+{
+
+ if (!have_dss || !chunk_in_dss(chunk))
+ return (chunk_dalloc_mmap(chunk, size));
+ return (true);
+}
+
+static bool
+chunk_dalloc_default(void *chunk, size_t size, bool committed,
+ unsigned arena_ind)
+{
+
+ return (chunk_dalloc_default_impl(chunk, size));
+}
+
+void
+chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, size_t sn, bool zeroed, bool committed)
+{
+ bool err;
+
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+
+ chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
+ /* Try to deallocate. */
+ if (chunk_hooks->dalloc == chunk_dalloc_default) {
+ /* Call directly to propagate tsdn. */
+ err = chunk_dalloc_default_impl(chunk, size);
+ } else
+ err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
+
+ if (!err)
+ return;
+ /* Try to decommit; purge if that fails. */
+ if (committed) {
+ committed = chunk_hooks->decommit(chunk, size, 0, size,
+ arena->ind);
+ }
+ zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
+ arena->ind);
+ chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
+ &arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
+ committed);
+
+ if (config_stats)
+ arena->stats.retained += size;
+}
+
+static bool
+chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset),
+ length));
+}
+
+static bool
+chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset),
+ length));
+}
+
+static bool
+chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
+{
+
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+
+ return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset),
+ length));
+}
+
+bool
+chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, size_t offset, size_t length)
+{
+
+ chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
+ return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
+}
+
+static bool
+chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ if (!maps_coalesce)
+ return (true);
+ return (false);
+}
+
+static bool
+chunk_merge_default_impl(void *chunk_a, void *chunk_b)
+{
+
+ if (!maps_coalesce)
+ return (true);
+ if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b))
+ return (true);
+
+ return (false);
+}
+
+static bool
+chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
+ bool committed, unsigned arena_ind)
+{
+
+ return (chunk_merge_default_impl(chunk_a, chunk_b));
+}
+
+static rtree_node_elm_t *
+chunks_rtree_node_alloc(size_t nelms)
+{
+
+ return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms *
+ sizeof(rtree_node_elm_t)));
+}
+
+bool
+chunk_boot(void)
+{
+#ifdef _WIN32
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+
+ /*
+ * Verify actual page size is equal to or an integral multiple of
+ * configured page size.
+ */
+ if (info.dwPageSize & ((1U << LG_PAGE) - 1))
+ return (true);
+
+ /*
+ * Configure chunksize (if not set) to match granularity (usually 64K),
+ * so pages_map will always take fast path.
+ */
+ if (!opt_lg_chunk) {
+ opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity)
+ - 1;
+ }
+#else
+ if (!opt_lg_chunk)
+ opt_lg_chunk = LG_CHUNK_DEFAULT;
+#endif
+
+ /* Set variables according to the value of opt_lg_chunk. */
+ chunksize = (ZU(1) << opt_lg_chunk);
+ assert(chunksize >= PAGE);
+ chunksize_mask = chunksize - 1;
+ chunk_npages = (chunksize >> LG_PAGE);
+
+ if (have_dss)
+ chunk_dss_boot();
+ if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) -
+ opt_lg_chunk), chunks_rtree_node_alloc, NULL))
+ return (true);
+
+ return (false);
+}
diff --git a/deps/jemalloc/src/chunk_dss.c b/deps/jemalloc/src/chunk_dss.c
new file mode 100644
index 0000000..ee3f838
--- /dev/null
+++ b/deps/jemalloc/src/chunk_dss.c
@@ -0,0 +1,238 @@
+#define JEMALLOC_CHUNK_DSS_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+/******************************************************************************/
+/* Data. */
+
+const char *dss_prec_names[] = {
+ "disabled",
+ "primary",
+ "secondary",
+ "N/A"
+};
+
+/*
+ * Current dss precedence default, used when creating new arenas. NB: This is
+ * stored as unsigned rather than dss_prec_t because in principle there's no
+ * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use
+ * atomic operations to synchronize the setting.
+ */
+static unsigned dss_prec_default = (unsigned)DSS_PREC_DEFAULT;
+
+/* Base address of the DSS. */
+static void *dss_base;
+/* Atomic boolean indicating whether the DSS is exhausted. */
+static unsigned dss_exhausted;
+/* Atomic current upper limit on DSS addresses. */
+static void *dss_max;
+
+/******************************************************************************/
+
+static void *
+chunk_dss_sbrk(intptr_t increment)
+{
+
+#ifdef JEMALLOC_DSS
+ return (sbrk(increment));
+#else
+ not_implemented();
+ return (NULL);
+#endif
+}
+
+dss_prec_t
+chunk_dss_prec_get(void)
+{
+ dss_prec_t ret;
+
+ if (!have_dss)
+ return (dss_prec_disabled);
+ ret = (dss_prec_t)atomic_read_u(&dss_prec_default);
+ return (ret);
+}
+
+bool
+chunk_dss_prec_set(dss_prec_t dss_prec)
+{
+
+ if (!have_dss)
+ return (dss_prec != dss_prec_disabled);
+ atomic_write_u(&dss_prec_default, (unsigned)dss_prec);
+ return (false);
+}
+
+static void *
+chunk_dss_max_update(void *new_addr)
+{
+ void *max_cur;
+ spin_t spinner;
+
+ /*
+ * Get the current end of the DSS as max_cur and assure that dss_max is
+ * up to date.
+ */
+ spin_init(&spinner);
+ while (true) {
+ void *max_prev = atomic_read_p(&dss_max);
+
+ max_cur = chunk_dss_sbrk(0);
+ if ((uintptr_t)max_prev > (uintptr_t)max_cur) {
+ /*
+ * Another thread optimistically updated dss_max. Wait
+ * for it to finish.
+ */
+ spin_adaptive(&spinner);
+ continue;
+ }
+ if (!atomic_cas_p(&dss_max, max_prev, max_cur))
+ break;
+ }
+ /* Fixed new_addr can only be supported if it is at the edge of DSS. */
+ if (new_addr != NULL && max_cur != new_addr)
+ return (NULL);
+
+ return (max_cur);
+}
+
+void *
+chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit)
+{
+ cassert(have_dss);
+ assert(size > 0 && (size & chunksize_mask) == 0);
+ assert(alignment > 0 && (alignment & chunksize_mask) == 0);
+
+ /*
+ * sbrk() uses a signed increment argument, so take care not to
+ * interpret a huge allocation request as a negative increment.
+ */
+ if ((intptr_t)size < 0)
+ return (NULL);
+
+ if (!atomic_read_u(&dss_exhausted)) {
+ /*
+ * The loop is necessary to recover from races with other
+ * threads that are using the DSS for something other than
+ * malloc.
+ */
+ while (true) {
+ void *ret, *cpad, *max_cur, *dss_next, *dss_prev;
+ size_t gap_size, cpad_size;
+ intptr_t incr;
+
+ max_cur = chunk_dss_max_update(new_addr);
+ if (max_cur == NULL)
+ goto label_oom;
+
+ /*
+ * Calculate how much padding is necessary to
+ * chunk-align the end of the DSS.
+ */
+ gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
+ chunksize_mask;
+ /*
+ * Compute how much chunk-aligned pad space (if any) is
+ * necessary to satisfy alignment. This space can be
+ * recycled for later use.
+ */
+ cpad = (void *)((uintptr_t)dss_max + gap_size);
+ ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
+ alignment);
+ cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
+ dss_next = (void *)((uintptr_t)ret + size);
+ if ((uintptr_t)ret < (uintptr_t)dss_max ||
+ (uintptr_t)dss_next < (uintptr_t)dss_max)
+ goto label_oom; /* Wrap-around. */
+ incr = gap_size + cpad_size + size;
+
+ /*
+ * Optimistically update dss_max, and roll back below if
+ * sbrk() fails. No other thread will try to extend the
+ * DSS while dss_max is greater than the current DSS
+ * max reported by sbrk(0).
+ */
+ if (atomic_cas_p(&dss_max, max_cur, dss_next))
+ continue;
+
+ /* Try to allocate. */
+ dss_prev = chunk_dss_sbrk(incr);
+ if (dss_prev == max_cur) {
+ /* Success. */
+ if (cpad_size != 0) {
+ chunk_hooks_t chunk_hooks =
+ CHUNK_HOOKS_INITIALIZER;
+ chunk_dalloc_wrapper(tsdn, arena,
+ &chunk_hooks, cpad, cpad_size,
+ arena_extent_sn_next(arena), false,
+ true);
+ }
+ if (*zero) {
+ JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
+ ret, size);
+ memset(ret, 0, size);
+ }
+ if (!*commit)
+ *commit = pages_decommit(ret, size);
+ return (ret);
+ }
+
+ /*
+ * Failure, whether due to OOM or a race with a raw
+ * sbrk() call from outside the allocator. Try to roll
+ * back optimistic dss_max update; if rollback fails,
+ * it's due to another caller of this function having
+ * succeeded since this invocation started, in which
+ * case rollback is not necessary.
+ */
+ atomic_cas_p(&dss_max, dss_next, max_cur);
+ if (dss_prev == (void *)-1) {
+ /* OOM. */
+ atomic_write_u(&dss_exhausted, (unsigned)true);
+ goto label_oom;
+ }
+ }
+ }
+label_oom:
+ return (NULL);
+}
+
+static bool
+chunk_in_dss_helper(void *chunk, void *max)
+{
+
+ return ((uintptr_t)chunk >= (uintptr_t)dss_base && (uintptr_t)chunk <
+ (uintptr_t)max);
+}
+
+bool
+chunk_in_dss(void *chunk)
+{
+
+ cassert(have_dss);
+
+ return (chunk_in_dss_helper(chunk, atomic_read_p(&dss_max)));
+}
+
+bool
+chunk_dss_mergeable(void *chunk_a, void *chunk_b)
+{
+ void *max;
+
+ cassert(have_dss);
+
+ max = atomic_read_p(&dss_max);
+ return (chunk_in_dss_helper(chunk_a, max) ==
+ chunk_in_dss_helper(chunk_b, max));
+}
+
+void
+chunk_dss_boot(void)
+{
+
+ cassert(have_dss);
+
+ dss_base = chunk_dss_sbrk(0);
+ dss_exhausted = (unsigned)(dss_base == (void *)-1);
+ dss_max = dss_base;
+}
+
+/******************************************************************************/
diff --git a/deps/jemalloc/src/chunk_mmap.c b/deps/jemalloc/src/chunk_mmap.c
new file mode 100644
index 0000000..73fc497
--- /dev/null
+++ b/deps/jemalloc/src/chunk_mmap.c
@@ -0,0 +1,78 @@
+#define JEMALLOC_CHUNK_MMAP_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+
+static void *
+chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
+{
+ void *ret;
+ size_t alloc_size;
+
+ alloc_size = size + alignment - PAGE;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < size)
+ return (NULL);
+ do {
+ void *pages;
+ size_t leadsize;
+ pages = pages_map(NULL, alloc_size, commit);
+ if (pages == NULL)
+ return (NULL);
+ leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
+ (uintptr_t)pages;
+ ret = pages_trim(pages, alloc_size, leadsize, size, commit);
+ } while (ret == NULL);
+
+ assert(ret != NULL);
+ *zero = true;
+ return (ret);
+}
+
+void *
+chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
+ bool *commit)
+{
+ void *ret;
+ size_t offset;
+
+ /*
+ * Ideally, there would be a way to specify alignment to mmap() (like
+ * NetBSD has), but in the absence of such a feature, we have to work
+ * hard to efficiently create aligned mappings. The reliable, but
+ * slow method is to create a mapping that is over-sized, then trim the
+ * excess. However, that always results in one or two calls to
+ * pages_unmap().
+ *
+ * Optimistically try mapping precisely the right amount before falling
+ * back to the slow method, with the expectation that the optimistic
+ * approach works most of the time.
+ */
+
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
+
+ ret = pages_map(new_addr, size, commit);
+ if (ret == NULL || ret == new_addr)
+ return (ret);
+ assert(new_addr == NULL);
+ offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
+ if (offset != 0) {
+ pages_unmap(ret, size);
+ return (chunk_alloc_mmap_slow(size, alignment, zero, commit));
+ }
+
+ assert(ret != NULL);
+ *zero = true;
+ return (ret);
+}
+
+bool
+chunk_dalloc_mmap(void *chunk, size_t size)
+{
+
+ if (config_munmap)
+ pages_unmap(chunk, size);
+
+ return (!config_munmap);
+}
diff --git a/deps/jemalloc/src/ckh.c b/deps/jemalloc/src/ckh.c
new file mode 100644
index 0000000..159bd8a
--- /dev/null
+++ b/deps/jemalloc/src/ckh.c
@@ -0,0 +1,569 @@
+/*
+ *******************************************************************************
+ * Implementation of (2^1+,2) cuckoo hashing, where 2^1+ indicates that each
+ * hash bucket contains 2^n cells, for n >= 1, and 2 indicates that two hash
+ * functions are employed. The original cuckoo hashing algorithm was described
+ * in:
+ *
+ * Pagh, R., F.F. Rodler (2004) Cuckoo Hashing. Journal of Algorithms
+ * 51(2):122-144.
+ *
+ * Generalization of cuckoo hashing was discussed in:
+ *
+ * Erlingsson, U., M. Manasse, F. McSherry (2006) A cool and practical
+ * alternative to traditional hash tables. In Proceedings of the 7th
+ * Workshop on Distributed Data and Structures (WDAS'06), Santa Clara, CA,
+ * January 2006.
+ *
+ * This implementation uses precisely two hash functions because that is the
+ * fewest that can work, and supporting multiple hashes is an implementation
+ * burden. Here is a reproduction of Figure 1 from Erlingsson et al. (2006)
+ * that shows approximate expected maximum load factors for various
+ * configurations:
+ *
+ * | #cells/bucket |
+ * #hashes | 1 | 2 | 4 | 8 |
+ * --------+-------+-------+-------+-------+
+ * 1 | 0.006 | 0.006 | 0.03 | 0.12 |
+ * 2 | 0.49 | 0.86 |>0.93< |>0.96< |
+ * 3 | 0.91 | 0.97 | 0.98 | 0.999 |
+ * 4 | 0.97 | 0.99 | 0.999 | |
+ *
+ * The number of cells per bucket is chosen such that a bucket fits in one cache
+ * line. So, on 32- and 64-bit systems, we use (8,2) and (4,2) cuckoo hashing,
+ * respectively.
+ *
+ ******************************************************************************/
+#define JEMALLOC_CKH_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
+static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
+
+/******************************************************************************/
+
+/*
+ * Search bucket for key and return the cell number if found; SIZE_T_MAX
+ * otherwise.
+ */
+JEMALLOC_INLINE_C size_t
+ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
+{
+ ckhc_t *cell;
+ unsigned i;
+
+ for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
+ cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
+ if (cell->key != NULL && ckh->keycomp(key, cell->key))
+ return ((bucket << LG_CKH_BUCKET_CELLS) + i);
+ }
+
+ return (SIZE_T_MAX);
+}
+
+/*
+ * Search table for key and return cell number if found; SIZE_T_MAX otherwise.
+ */
+JEMALLOC_INLINE_C size_t
+ckh_isearch(ckh_t *ckh, const void *key)
+{
+ size_t hashes[2], bucket, cell;
+
+ assert(ckh != NULL);
+
+ ckh->hash(key, hashes);
+
+ /* Search primary bucket. */
+ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ cell = ckh_bucket_search(ckh, bucket, key);
+ if (cell != SIZE_T_MAX)
+ return (cell);
+
+ /* Search secondary bucket. */
+ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ cell = ckh_bucket_search(ckh, bucket, key);
+ return (cell);
+}
+
+JEMALLOC_INLINE_C bool
+ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
+ const void *data)
+{
+ ckhc_t *cell;
+ unsigned offset, i;
+
+ /*
+ * Cycle through the cells in the bucket, starting at a random position.
+ * The randomness avoids worst-case search overhead as buckets fill up.
+ */
+ offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
+ LG_CKH_BUCKET_CELLS);
+ for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
+ cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
+ ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
+ if (cell->key == NULL) {
+ cell->key = key;
+ cell->data = data;
+ ckh->count++;
+ return (false);
+ }
+ }
+
+ return (true);
+}
+
+/*
+ * No space is available in bucket. Randomly evict an item, then try to find an
+ * alternate location for that item. Iteratively repeat this
+ * eviction/relocation procedure until either success or detection of an
+ * eviction/relocation bucket cycle.
+ */
+JEMALLOC_INLINE_C bool
+ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
+ void const **argdata)
+{
+ const void *key, *data, *tkey, *tdata;
+ ckhc_t *cell;
+ size_t hashes[2], bucket, tbucket;
+ unsigned i;
+
+ bucket = argbucket;
+ key = *argkey;
+ data = *argdata;
+ while (true) {
+ /*
+ * Choose a random item within the bucket to evict. This is
+ * critical to correct function, because without (eventually)
+ * evicting all items within a bucket during iteration, it
+ * would be possible to get stuck in an infinite loop if there
+ * were an item for which both hashes indicated the same
+ * bucket.
+ */
+ i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
+ LG_CKH_BUCKET_CELLS);
+ cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
+ assert(cell->key != NULL);
+
+ /* Swap cell->{key,data} and {key,data} (evict). */
+ tkey = cell->key; tdata = cell->data;
+ cell->key = key; cell->data = data;
+ key = tkey; data = tdata;
+
+#ifdef CKH_COUNT
+ ckh->nrelocs++;
+#endif
+
+ /* Find the alternate bucket for the evicted item. */
+ ckh->hash(key, hashes);
+ tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ if (tbucket == bucket) {
+ tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
+ - 1);
+ /*
+ * It may be that (tbucket == bucket) still, if the
+ * item's hashes both indicate this bucket. However,
+ * we are guaranteed to eventually escape this bucket
+ * during iteration, assuming pseudo-random item
+ * selection (true randomness would make infinite
+ * looping a remote possibility). The reason we can
+ * never get trapped forever is that there are two
+ * cases:
+ *
+ * 1) This bucket == argbucket, so we will quickly
+ * detect an eviction cycle and terminate.
+ * 2) An item was evicted to this bucket from another,
+ * which means that at least one item in this bucket
+ * has hashes that indicate distinct buckets.
+ */
+ }
+ /* Check for a cycle. */
+ if (tbucket == argbucket) {
+ *argkey = key;
+ *argdata = data;
+ return (true);
+ }
+
+ bucket = tbucket;
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data))
+ return (false);
+ }
+}
+
+JEMALLOC_INLINE_C bool
+ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
+{
+ size_t hashes[2], bucket;
+ const void *key = *argkey;
+ const void *data = *argdata;
+
+ ckh->hash(key, hashes);
+
+ /* Try to insert in primary bucket. */
+ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data))
+ return (false);
+
+ /* Try to insert in secondary bucket. */
+ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data))
+ return (false);
+
+ /*
+ * Try to find a place for this item via iterative eviction/relocation.
+ */
+ return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
+}
+
+/*
+ * Try to rebuild the hash table from scratch by inserting all items from the
+ * old table into the new.
+ */
+JEMALLOC_INLINE_C bool
+ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
+{
+ size_t count, i, nins;
+ const void *key, *data;
+
+ count = ckh->count;
+ ckh->count = 0;
+ for (i = nins = 0; nins < count; i++) {
+ if (aTab[i].key != NULL) {
+ key = aTab[i].key;
+ data = aTab[i].data;
+ if (ckh_try_insert(ckh, &key, &data)) {
+ ckh->count = count;
+ return (true);
+ }
+ nins++;
+ }
+ }
+
+ return (false);
+}
+
+static bool
+ckh_grow(tsd_t *tsd, ckh_t *ckh)
+{
+ bool ret;
+ ckhc_t *tab, *ttab;
+ unsigned lg_prevbuckets, lg_curcells;
+
+#ifdef CKH_COUNT
+ ckh->ngrows++;
+#endif
+
+ /*
+ * It is possible (though unlikely, given well behaved hashes) that the
+ * table will have to be doubled more than once in order to create a
+ * usable table.
+ */
+ lg_prevbuckets = ckh->lg_curbuckets;
+ lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
+ while (true) {
+ size_t usize;
+
+ lg_curcells++;
+ usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ ret = true;
+ goto label_return;
+ }
+ tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
+ true, NULL, true, arena_ichoose(tsd, NULL));
+ if (tab == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ /* Swap in new table. */
+ ttab = ckh->tab;
+ ckh->tab = tab;
+ tab = ttab;
+ ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
+
+ if (!ckh_rebuild(ckh, tab)) {
+ idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
+ break;
+ }
+
+ /* Rebuilding failed, so back out partially rebuilt table. */
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ ckh->tab = tab;
+ ckh->lg_curbuckets = lg_prevbuckets;
+ }
+
+ ret = false;
+label_return:
+ return (ret);
+}
+
+static void
+ckh_shrink(tsd_t *tsd, ckh_t *ckh)
+{
+ ckhc_t *tab, *ttab;
+ size_t usize;
+ unsigned lg_prevbuckets, lg_curcells;
+
+ /*
+ * It is possible (though unlikely, given well behaved hashes) that the
+ * table rebuild will fail.
+ */
+ lg_prevbuckets = ckh->lg_curbuckets;
+ lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
+ usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ return;
+ tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
+ true, arena_ichoose(tsd, NULL));
+ if (tab == NULL) {
+ /*
+ * An OOM error isn't worth propagating, since it doesn't
+ * prevent this or future operations from proceeding.
+ */
+ return;
+ }
+ /* Swap in new table. */
+ ttab = ckh->tab;
+ ckh->tab = tab;
+ tab = ttab;
+ ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
+
+ if (!ckh_rebuild(ckh, tab)) {
+ idalloctm(tsd_tsdn(tsd), tab, NULL, true, true);
+#ifdef CKH_COUNT
+ ckh->nshrinks++;
+#endif
+ return;
+ }
+
+ /* Rebuilding failed, so back out partially rebuilt table. */
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ ckh->tab = tab;
+ ckh->lg_curbuckets = lg_prevbuckets;
+#ifdef CKH_COUNT
+ ckh->nshrinkfails++;
+#endif
+}
+
+bool
+ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+ ckh_keycomp_t *keycomp)
+{
+ bool ret;
+ size_t mincells, usize;
+ unsigned lg_mincells;
+
+ assert(minitems > 0);
+ assert(hash != NULL);
+ assert(keycomp != NULL);
+
+#ifdef CKH_COUNT
+ ckh->ngrows = 0;
+ ckh->nshrinks = 0;
+ ckh->nshrinkfails = 0;
+ ckh->ninserts = 0;
+ ckh->nrelocs = 0;
+#endif
+ ckh->prng_state = 42; /* Value doesn't really matter. */
+ ckh->count = 0;
+
+ /*
+ * Find the minimum power of 2 that is large enough to fit minitems
+ * entries. We are using (2+,2) cuckoo hashing, which has an expected
+ * maximum load factor of at least ~0.86, so 0.75 is a conservative load
+ * factor that will typically allow mincells items to fit without ever
+ * growing the table.
+ */
+ assert(LG_CKH_BUCKET_CELLS > 0);
+ mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
+ for (lg_mincells = LG_CKH_BUCKET_CELLS;
+ (ZU(1) << lg_mincells) < mincells;
+ lg_mincells++)
+ ; /* Do nothing. */
+ ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
+ ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
+ ckh->hash = hash;
+ ckh->keycomp = keycomp;
+
+ usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ ret = true;
+ goto label_return;
+ }
+ ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
+ NULL, true, arena_ichoose(tsd, NULL));
+ if (ckh->tab == NULL) {
+ ret = true;
+ goto label_return;
+ }
+
+ ret = false;
+label_return:
+ return (ret);
+}
+
+void
+ckh_delete(tsd_t *tsd, ckh_t *ckh)
+{
+
+ assert(ckh != NULL);
+
+#ifdef CKH_VERBOSE
+ malloc_printf(
+ "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
+ " nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
+ " nrelocs: %"FMTu64"\n", __func__, ckh,
+ (unsigned long long)ckh->ngrows,
+ (unsigned long long)ckh->nshrinks,
+ (unsigned long long)ckh->nshrinkfails,
+ (unsigned long long)ckh->ninserts,
+ (unsigned long long)ckh->nrelocs);
+#endif
+
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, true, true);
+ if (config_debug)
+ memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
+}
+
+size_t
+ckh_count(ckh_t *ckh)
+{
+
+ assert(ckh != NULL);
+
+ return (ckh->count);
+}
+
+bool
+ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
+{
+ size_t i, ncells;
+
+ for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
+ LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
+ if (ckh->tab[i].key != NULL) {
+ if (key != NULL)
+ *key = (void *)ckh->tab[i].key;
+ if (data != NULL)
+ *data = (void *)ckh->tab[i].data;
+ *tabind = i + 1;
+ return (false);
+ }
+ }
+
+ return (true);
+}
+
+bool
+ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
+{
+ bool ret;
+
+ assert(ckh != NULL);
+ assert(ckh_search(ckh, key, NULL, NULL));
+
+#ifdef CKH_COUNT
+ ckh->ninserts++;
+#endif
+
+ while (ckh_try_insert(ckh, &key, &data)) {
+ if (ckh_grow(tsd, ckh)) {
+ ret = true;
+ goto label_return;
+ }
+ }
+
+ ret = false;
+label_return:
+ return (ret);
+}
+
+bool
+ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+ void **data)
+{
+ size_t cell;
+
+ assert(ckh != NULL);
+
+ cell = ckh_isearch(ckh, searchkey);
+ if (cell != SIZE_T_MAX) {
+ if (key != NULL)
+ *key = (void *)ckh->tab[cell].key;
+ if (data != NULL)
+ *data = (void *)ckh->tab[cell].data;
+ ckh->tab[cell].key = NULL;
+ ckh->tab[cell].data = NULL; /* Not necessary. */
+
+ ckh->count--;
+ /* Try to halve the table if it is less than 1/4 full. */
+ if (ckh->count < (ZU(1) << (ckh->lg_curbuckets
+ + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
+ > ckh->lg_minbuckets) {
+ /* Ignore error due to OOM. */
+ ckh_shrink(tsd, ckh);
+ }
+
+ return (false);
+ }
+
+ return (true);
+}
+
+bool
+ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
+{
+ size_t cell;
+
+ assert(ckh != NULL);
+
+ cell = ckh_isearch(ckh, searchkey);
+ if (cell != SIZE_T_MAX) {
+ if (key != NULL)
+ *key = (void *)ckh->tab[cell].key;
+ if (data != NULL)
+ *data = (void *)ckh->tab[cell].data;
+ return (false);
+ }
+
+ return (true);
+}
+
+void
+ckh_string_hash(const void *key, size_t r_hash[2])
+{
+
+ hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
+}
+
+bool
+ckh_string_keycomp(const void *k1, const void *k2)
+{
+
+ assert(k1 != NULL);
+ assert(k2 != NULL);
+
+ return (strcmp((char *)k1, (char *)k2) ? false : true);
+}
+
+void
+ckh_pointer_hash(const void *key, size_t r_hash[2])
+{
+ union {
+ const void *v;
+ size_t i;
+ } u;
+
+ assert(sizeof(u.v) == sizeof(u.i));
+ u.v = key;
+ hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
+}
+
+bool
+ckh_pointer_keycomp(const void *k1, const void *k2)
+{
+
+ return ((k1 == k2) ? true : false);
+}
diff --git a/deps/jemalloc/src/ctl.c b/deps/jemalloc/src/ctl.c
new file mode 100644
index 0000000..bc78b20
--- /dev/null
+++ b/deps/jemalloc/src/ctl.c
@@ -0,0 +1,2254 @@
+#define JEMALLOC_CTL_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+/*
+ * ctl_mtx protects the following:
+ * - ctl_stats.*
+ */
+static malloc_mutex_t ctl_mtx;
+static bool ctl_initialized;
+static uint64_t ctl_epoch;
+static ctl_stats_t ctl_stats;
+
+/******************************************************************************/
+/* Helpers for named and indexed nodes. */
+
+JEMALLOC_INLINE_C const ctl_named_node_t *
+ctl_named_node(const ctl_node_t *node)
+{
+
+ return ((node->named) ? (const ctl_named_node_t *)node : NULL);
+}
+
+JEMALLOC_INLINE_C const ctl_named_node_t *
+ctl_named_children(const ctl_named_node_t *node, size_t index)
+{
+ const ctl_named_node_t *children = ctl_named_node(node->children);
+
+ return (children ? &children[index] : NULL);
+}
+
+JEMALLOC_INLINE_C const ctl_indexed_node_t *
+ctl_indexed_node(const ctl_node_t *node)
+{
+
+ return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
+}
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+#define CTL_PROTO(n) \
+static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+
+#define INDEX_PROTO(n) \
+static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
+ const size_t *mib, size_t miblen, size_t i);
+
+static bool ctl_arena_init(ctl_arena_stats_t *astats);
+static void ctl_arena_clear(ctl_arena_stats_t *astats);
+static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
+ arena_t *arena);
+static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
+ ctl_arena_stats_t *astats);
+static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i);
+static bool ctl_grow(tsdn_t *tsdn);
+static void ctl_refresh(tsdn_t *tsdn);
+static bool ctl_init(tsdn_t *tsdn);
+static int ctl_lookup(tsdn_t *tsdn, const char *name,
+ ctl_node_t const **nodesp, size_t *mibp, size_t *depthp);
+
+CTL_PROTO(version)
+CTL_PROTO(epoch)
+CTL_PROTO(thread_tcache_enabled)
+CTL_PROTO(thread_tcache_flush)
+CTL_PROTO(thread_prof_name)
+CTL_PROTO(thread_prof_active)
+CTL_PROTO(thread_arena)
+CTL_PROTO(thread_allocated)
+CTL_PROTO(thread_allocatedp)
+CTL_PROTO(thread_deallocated)
+CTL_PROTO(thread_deallocatedp)
+CTL_PROTO(config_cache_oblivious)
+CTL_PROTO(config_debug)
+CTL_PROTO(config_fill)
+CTL_PROTO(config_lazy_lock)
+CTL_PROTO(config_malloc_conf)
+CTL_PROTO(config_munmap)
+CTL_PROTO(config_prof)
+CTL_PROTO(config_prof_libgcc)
+CTL_PROTO(config_prof_libunwind)
+CTL_PROTO(config_stats)
+CTL_PROTO(config_tcache)
+CTL_PROTO(config_tls)
+CTL_PROTO(config_utrace)
+CTL_PROTO(config_valgrind)
+CTL_PROTO(config_xmalloc)
+CTL_PROTO(opt_abort)
+CTL_PROTO(opt_dss)
+CTL_PROTO(opt_lg_chunk)
+CTL_PROTO(opt_narenas)
+CTL_PROTO(opt_purge)
+CTL_PROTO(opt_lg_dirty_mult)
+CTL_PROTO(opt_decay_time)
+CTL_PROTO(opt_stats_print)
+CTL_PROTO(opt_junk)
+CTL_PROTO(opt_zero)
+CTL_PROTO(opt_quarantine)
+CTL_PROTO(opt_redzone)
+CTL_PROTO(opt_utrace)
+CTL_PROTO(opt_xmalloc)
+CTL_PROTO(opt_tcache)
+CTL_PROTO(opt_lg_tcache_max)
+CTL_PROTO(opt_prof)
+CTL_PROTO(opt_prof_prefix)
+CTL_PROTO(opt_prof_active)
+CTL_PROTO(opt_prof_thread_active_init)
+CTL_PROTO(opt_lg_prof_sample)
+CTL_PROTO(opt_lg_prof_interval)
+CTL_PROTO(opt_prof_gdump)
+CTL_PROTO(opt_prof_final)
+CTL_PROTO(opt_prof_leak)
+CTL_PROTO(opt_prof_accum)
+CTL_PROTO(tcache_create)
+CTL_PROTO(tcache_flush)
+CTL_PROTO(tcache_destroy)
+static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all);
+CTL_PROTO(arena_i_purge)
+CTL_PROTO(arena_i_decay)
+CTL_PROTO(arena_i_reset)
+CTL_PROTO(arena_i_dss)
+CTL_PROTO(arena_i_lg_dirty_mult)
+CTL_PROTO(arena_i_decay_time)
+CTL_PROTO(arena_i_chunk_hooks)
+INDEX_PROTO(arena_i)
+CTL_PROTO(arenas_bin_i_size)
+CTL_PROTO(arenas_bin_i_nregs)
+CTL_PROTO(arenas_bin_i_run_size)
+INDEX_PROTO(arenas_bin_i)
+CTL_PROTO(arenas_lrun_i_size)
+INDEX_PROTO(arenas_lrun_i)
+CTL_PROTO(arenas_hchunk_i_size)
+INDEX_PROTO(arenas_hchunk_i)
+CTL_PROTO(arenas_narenas)
+CTL_PROTO(arenas_initialized)
+CTL_PROTO(arenas_lg_dirty_mult)
+CTL_PROTO(arenas_decay_time)
+CTL_PROTO(arenas_quantum)
+CTL_PROTO(arenas_page)
+CTL_PROTO(arenas_tcache_max)
+CTL_PROTO(arenas_nbins)
+CTL_PROTO(arenas_nhbins)
+CTL_PROTO(arenas_nlruns)
+CTL_PROTO(arenas_nhchunks)
+CTL_PROTO(arenas_extend)
+CTL_PROTO(prof_thread_active_init)
+CTL_PROTO(prof_active)
+CTL_PROTO(prof_dump)
+CTL_PROTO(prof_gdump)
+CTL_PROTO(prof_reset)
+CTL_PROTO(prof_interval)
+CTL_PROTO(lg_prof_sample)
+CTL_PROTO(stats_arenas_i_small_allocated)
+CTL_PROTO(stats_arenas_i_small_nmalloc)
+CTL_PROTO(stats_arenas_i_small_ndalloc)
+CTL_PROTO(stats_arenas_i_small_nrequests)
+CTL_PROTO(stats_arenas_i_large_allocated)
+CTL_PROTO(stats_arenas_i_large_nmalloc)
+CTL_PROTO(stats_arenas_i_large_ndalloc)
+CTL_PROTO(stats_arenas_i_large_nrequests)
+CTL_PROTO(stats_arenas_i_huge_allocated)
+CTL_PROTO(stats_arenas_i_huge_nmalloc)
+CTL_PROTO(stats_arenas_i_huge_ndalloc)
+CTL_PROTO(stats_arenas_i_huge_nrequests)
+CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
+CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
+CTL_PROTO(stats_arenas_i_bins_j_nrequests)
+CTL_PROTO(stats_arenas_i_bins_j_curregs)
+CTL_PROTO(stats_arenas_i_bins_j_nfills)
+CTL_PROTO(stats_arenas_i_bins_j_nflushes)
+CTL_PROTO(stats_arenas_i_bins_j_nruns)
+CTL_PROTO(stats_arenas_i_bins_j_nreruns)
+CTL_PROTO(stats_arenas_i_bins_j_curruns)
+INDEX_PROTO(stats_arenas_i_bins_j)
+CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
+CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
+CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
+CTL_PROTO(stats_arenas_i_lruns_j_curruns)
+INDEX_PROTO(stats_arenas_i_lruns_j)
+CTL_PROTO(stats_arenas_i_hchunks_j_nmalloc)
+CTL_PROTO(stats_arenas_i_hchunks_j_ndalloc)
+CTL_PROTO(stats_arenas_i_hchunks_j_nrequests)
+CTL_PROTO(stats_arenas_i_hchunks_j_curhchunks)
+INDEX_PROTO(stats_arenas_i_hchunks_j)
+CTL_PROTO(stats_arenas_i_nthreads)
+CTL_PROTO(stats_arenas_i_dss)
+CTL_PROTO(stats_arenas_i_lg_dirty_mult)
+CTL_PROTO(stats_arenas_i_decay_time)
+CTL_PROTO(stats_arenas_i_pactive)
+CTL_PROTO(stats_arenas_i_pdirty)
+CTL_PROTO(stats_arenas_i_mapped)
+CTL_PROTO(stats_arenas_i_retained)
+CTL_PROTO(stats_arenas_i_npurge)
+CTL_PROTO(stats_arenas_i_nmadvise)
+CTL_PROTO(stats_arenas_i_purged)
+CTL_PROTO(stats_arenas_i_metadata_mapped)
+CTL_PROTO(stats_arenas_i_metadata_allocated)
+INDEX_PROTO(stats_arenas_i)
+CTL_PROTO(stats_cactive)
+CTL_PROTO(stats_allocated)
+CTL_PROTO(stats_active)
+CTL_PROTO(stats_metadata)
+CTL_PROTO(stats_resident)
+CTL_PROTO(stats_mapped)
+CTL_PROTO(stats_retained)
+
+/******************************************************************************/
+/* mallctl tree. */
+
+/* Maximum tree depth. */
+#define CTL_MAX_DEPTH 6
+
+#define NAME(n) {true}, n
+#define CHILD(t, c) \
+ sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
+ (ctl_node_t *)c##_node, \
+ NULL
+#define CTL(c) 0, NULL, c##_ctl
+
+/*
+ * Only handles internal indexed nodes, since there are currently no external
+ * ones.
+ */
+#define INDEX(i) {false}, i##_index
+
+static const ctl_named_node_t thread_tcache_node[] = {
+ {NAME("enabled"), CTL(thread_tcache_enabled)},
+ {NAME("flush"), CTL(thread_tcache_flush)}
+};
+
+static const ctl_named_node_t thread_prof_node[] = {
+ {NAME("name"), CTL(thread_prof_name)},
+ {NAME("active"), CTL(thread_prof_active)}
+};
+
+static const ctl_named_node_t thread_node[] = {
+ {NAME("arena"), CTL(thread_arena)},
+ {NAME("allocated"), CTL(thread_allocated)},
+ {NAME("allocatedp"), CTL(thread_allocatedp)},
+ {NAME("deallocated"), CTL(thread_deallocated)},
+ {NAME("deallocatedp"), CTL(thread_deallocatedp)},
+ {NAME("tcache"), CHILD(named, thread_tcache)},
+ {NAME("prof"), CHILD(named, thread_prof)}
+};
+
+static const ctl_named_node_t config_node[] = {
+ {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
+ {NAME("debug"), CTL(config_debug)},
+ {NAME("fill"), CTL(config_fill)},
+ {NAME("lazy_lock"), CTL(config_lazy_lock)},
+ {NAME("malloc_conf"), CTL(config_malloc_conf)},
+ {NAME("munmap"), CTL(config_munmap)},
+ {NAME("prof"), CTL(config_prof)},
+ {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
+ {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
+ {NAME("stats"), CTL(config_stats)},
+ {NAME("tcache"), CTL(config_tcache)},
+ {NAME("tls"), CTL(config_tls)},
+ {NAME("utrace"), CTL(config_utrace)},
+ {NAME("valgrind"), CTL(config_valgrind)},
+ {NAME("xmalloc"), CTL(config_xmalloc)}
+};
+
+static const ctl_named_node_t opt_node[] = {
+ {NAME("abort"), CTL(opt_abort)},
+ {NAME("dss"), CTL(opt_dss)},
+ {NAME("lg_chunk"), CTL(opt_lg_chunk)},
+ {NAME("narenas"), CTL(opt_narenas)},
+ {NAME("purge"), CTL(opt_purge)},
+ {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
+ {NAME("decay_time"), CTL(opt_decay_time)},
+ {NAME("stats_print"), CTL(opt_stats_print)},
+ {NAME("junk"), CTL(opt_junk)},
+ {NAME("zero"), CTL(opt_zero)},
+ {NAME("quarantine"), CTL(opt_quarantine)},
+ {NAME("redzone"), CTL(opt_redzone)},
+ {NAME("utrace"), CTL(opt_utrace)},
+ {NAME("xmalloc"), CTL(opt_xmalloc)},
+ {NAME("tcache"), CTL(opt_tcache)},
+ {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
+ {NAME("prof"), CTL(opt_prof)},
+ {NAME("prof_prefix"), CTL(opt_prof_prefix)},
+ {NAME("prof_active"), CTL(opt_prof_active)},
+ {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
+ {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
+ {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
+ {NAME("prof_gdump"), CTL(opt_prof_gdump)},
+ {NAME("prof_final"), CTL(opt_prof_final)},
+ {NAME("prof_leak"), CTL(opt_prof_leak)},
+ {NAME("prof_accum"), CTL(opt_prof_accum)}
+};
+
+static const ctl_named_node_t tcache_node[] = {
+ {NAME("create"), CTL(tcache_create)},
+ {NAME("flush"), CTL(tcache_flush)},
+ {NAME("destroy"), CTL(tcache_destroy)}
+};
+
+static const ctl_named_node_t arena_i_node[] = {
+ {NAME("purge"), CTL(arena_i_purge)},
+ {NAME("decay"), CTL(arena_i_decay)},
+ {NAME("reset"), CTL(arena_i_reset)},
+ {NAME("dss"), CTL(arena_i_dss)},
+ {NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
+ {NAME("decay_time"), CTL(arena_i_decay_time)},
+ {NAME("chunk_hooks"), CTL(arena_i_chunk_hooks)}
+};
+static const ctl_named_node_t super_arena_i_node[] = {
+ {NAME(""), CHILD(named, arena_i)}
+};
+
+static const ctl_indexed_node_t arena_node[] = {
+ {INDEX(arena_i)}
+};
+
+static const ctl_named_node_t arenas_bin_i_node[] = {
+ {NAME("size"), CTL(arenas_bin_i_size)},
+ {NAME("nregs"), CTL(arenas_bin_i_nregs)},
+ {NAME("run_size"), CTL(arenas_bin_i_run_size)}
+};
+static const ctl_named_node_t super_arenas_bin_i_node[] = {
+ {NAME(""), CHILD(named, arenas_bin_i)}
+};
+
+static const ctl_indexed_node_t arenas_bin_node[] = {
+ {INDEX(arenas_bin_i)}
+};
+
+static const ctl_named_node_t arenas_lrun_i_node[] = {
+ {NAME("size"), CTL(arenas_lrun_i_size)}
+};
+static const ctl_named_node_t super_arenas_lrun_i_node[] = {
+ {NAME(""), CHILD(named, arenas_lrun_i)}
+};
+
+static const ctl_indexed_node_t arenas_lrun_node[] = {
+ {INDEX(arenas_lrun_i)}
+};
+
+static const ctl_named_node_t arenas_hchunk_i_node[] = {
+ {NAME("size"), CTL(arenas_hchunk_i_size)}
+};
+static const ctl_named_node_t super_arenas_hchunk_i_node[] = {
+ {NAME(""), CHILD(named, arenas_hchunk_i)}
+};
+
+static const ctl_indexed_node_t arenas_hchunk_node[] = {
+ {INDEX(arenas_hchunk_i)}
+};
+
+static const ctl_named_node_t arenas_node[] = {
+ {NAME("narenas"), CTL(arenas_narenas)},
+ {NAME("initialized"), CTL(arenas_initialized)},
+ {NAME("lg_dirty_mult"), CTL(arenas_lg_dirty_mult)},
+ {NAME("decay_time"), CTL(arenas_decay_time)},
+ {NAME("quantum"), CTL(arenas_quantum)},
+ {NAME("page"), CTL(arenas_page)},
+ {NAME("tcache_max"), CTL(arenas_tcache_max)},
+ {NAME("nbins"), CTL(arenas_nbins)},
+ {NAME("nhbins"), CTL(arenas_nhbins)},
+ {NAME("bin"), CHILD(indexed, arenas_bin)},
+ {NAME("nlruns"), CTL(arenas_nlruns)},
+ {NAME("lrun"), CHILD(indexed, arenas_lrun)},
+ {NAME("nhchunks"), CTL(arenas_nhchunks)},
+ {NAME("hchunk"), CHILD(indexed, arenas_hchunk)},
+ {NAME("extend"), CTL(arenas_extend)}
+};
+
+static const ctl_named_node_t prof_node[] = {
+ {NAME("thread_active_init"), CTL(prof_thread_active_init)},
+ {NAME("active"), CTL(prof_active)},
+ {NAME("dump"), CTL(prof_dump)},
+ {NAME("gdump"), CTL(prof_gdump)},
+ {NAME("reset"), CTL(prof_reset)},
+ {NAME("interval"), CTL(prof_interval)},
+ {NAME("lg_sample"), CTL(lg_prof_sample)}
+};
+
+static const ctl_named_node_t stats_arenas_i_metadata_node[] = {
+ {NAME("mapped"), CTL(stats_arenas_i_metadata_mapped)},
+ {NAME("allocated"), CTL(stats_arenas_i_metadata_allocated)}
+};
+
+static const ctl_named_node_t stats_arenas_i_small_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
+};
+
+static const ctl_named_node_t stats_arenas_i_large_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
+};
+
+static const ctl_named_node_t stats_arenas_i_huge_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_huge_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_huge_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_huge_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_huge_nrequests)}
+};
+
+static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
+ {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
+ {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
+ {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
+ {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
+ {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
+ {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
+ {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
+};
+static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
+};
+
+static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
+ {INDEX(stats_arenas_i_bins_j)}
+};
+
+static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
+ {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
+ {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
+};
+static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
+};
+
+static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
+ {INDEX(stats_arenas_i_lruns_j)}
+};
+
+static const ctl_named_node_t stats_arenas_i_hchunks_j_node[] = {
+ {NAME("nmalloc"), CTL(stats_arenas_i_hchunks_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_hchunks_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_hchunks_j_nrequests)},
+ {NAME("curhchunks"), CTL(stats_arenas_i_hchunks_j_curhchunks)}
+};
+static const ctl_named_node_t super_stats_arenas_i_hchunks_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_hchunks_j)}
+};
+
+static const ctl_indexed_node_t stats_arenas_i_hchunks_node[] = {
+ {INDEX(stats_arenas_i_hchunks_j)}
+};
+
+static const ctl_named_node_t stats_arenas_i_node[] = {
+ {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
+ {NAME("dss"), CTL(stats_arenas_i_dss)},
+ {NAME("lg_dirty_mult"), CTL(stats_arenas_i_lg_dirty_mult)},
+ {NAME("decay_time"), CTL(stats_arenas_i_decay_time)},
+ {NAME("pactive"), CTL(stats_arenas_i_pactive)},
+ {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
+ {NAME("mapped"), CTL(stats_arenas_i_mapped)},
+ {NAME("retained"), CTL(stats_arenas_i_retained)},
+ {NAME("npurge"), CTL(stats_arenas_i_npurge)},
+ {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
+ {NAME("purged"), CTL(stats_arenas_i_purged)},
+ {NAME("metadata"), CHILD(named, stats_arenas_i_metadata)},
+ {NAME("small"), CHILD(named, stats_arenas_i_small)},
+ {NAME("large"), CHILD(named, stats_arenas_i_large)},
+ {NAME("huge"), CHILD(named, stats_arenas_i_huge)},
+ {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
+ {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)},
+ {NAME("hchunks"), CHILD(indexed, stats_arenas_i_hchunks)}
+};
+static const ctl_named_node_t super_stats_arenas_i_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i)}
+};
+
+static const ctl_indexed_node_t stats_arenas_node[] = {
+ {INDEX(stats_arenas_i)}
+};
+
+static const ctl_named_node_t stats_node[] = {
+ {NAME("cactive"), CTL(stats_cactive)},
+ {NAME("allocated"), CTL(stats_allocated)},
+ {NAME("active"), CTL(stats_active)},
+ {NAME("metadata"), CTL(stats_metadata)},
+ {NAME("resident"), CTL(stats_resident)},
+ {NAME("mapped"), CTL(stats_mapped)},
+ {NAME("retained"), CTL(stats_retained)},
+ {NAME("arenas"), CHILD(indexed, stats_arenas)}
+};
+
+static const ctl_named_node_t root_node[] = {
+ {NAME("version"), CTL(version)},
+ {NAME("epoch"), CTL(epoch)},
+ {NAME("thread"), CHILD(named, thread)},
+ {NAME("config"), CHILD(named, config)},
+ {NAME("opt"), CHILD(named, opt)},
+ {NAME("tcache"), CHILD(named, tcache)},
+ {NAME("arena"), CHILD(indexed, arena)},
+ {NAME("arenas"), CHILD(named, arenas)},
+ {NAME("prof"), CHILD(named, prof)},
+ {NAME("stats"), CHILD(named, stats)}
+};
+static const ctl_named_node_t super_root_node[] = {
+ {NAME(""), CHILD(named, root)}
+};
+
+#undef NAME
+#undef CHILD
+#undef CTL
+#undef INDEX
+
+/******************************************************************************/
+
+static bool
+ctl_arena_init(ctl_arena_stats_t *astats)
+{
+
+ if (astats->lstats == NULL) {
+ astats->lstats = (malloc_large_stats_t *)a0malloc(nlclasses *
+ sizeof(malloc_large_stats_t));
+ if (astats->lstats == NULL)
+ return (true);
+ }
+
+ if (astats->hstats == NULL) {
+ astats->hstats = (malloc_huge_stats_t *)a0malloc(nhclasses *
+ sizeof(malloc_huge_stats_t));
+ if (astats->hstats == NULL)
+ return (true);
+ }
+
+ return (false);
+}
+
+static void
+ctl_arena_clear(ctl_arena_stats_t *astats)
+{
+
+ astats->nthreads = 0;
+ astats->dss = dss_prec_names[dss_prec_limit];
+ astats->lg_dirty_mult = -1;
+ astats->decay_time = -1;
+ astats->pactive = 0;
+ astats->pdirty = 0;
+ if (config_stats) {
+ memset(&astats->astats, 0, sizeof(arena_stats_t));
+ astats->allocated_small = 0;
+ astats->nmalloc_small = 0;
+ astats->ndalloc_small = 0;
+ astats->nrequests_small = 0;
+ memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
+ memset(astats->lstats, 0, nlclasses *
+ sizeof(malloc_large_stats_t));
+ memset(astats->hstats, 0, nhclasses *
+ sizeof(malloc_huge_stats_t));
+ }
+}
+
+static void
+ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
+{
+ unsigned i;
+
+ if (config_stats) {
+ arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
+ &cstats->lg_dirty_mult, &cstats->decay_time,
+ &cstats->pactive, &cstats->pdirty, &cstats->astats,
+ cstats->bstats, cstats->lstats, cstats->hstats);
+
+ for (i = 0; i < NBINS; i++) {
+ cstats->allocated_small += cstats->bstats[i].curregs *
+ index2size(i);
+ cstats->nmalloc_small += cstats->bstats[i].nmalloc;
+ cstats->ndalloc_small += cstats->bstats[i].ndalloc;
+ cstats->nrequests_small += cstats->bstats[i].nrequests;
+ }
+ } else {
+ arena_basic_stats_merge(tsdn, arena, &cstats->nthreads,
+ &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time,
+ &cstats->pactive, &cstats->pdirty);
+ }
+}
+
+static void
+ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
+{
+ unsigned i;
+
+ sstats->nthreads += astats->nthreads;
+ sstats->pactive += astats->pactive;
+ sstats->pdirty += astats->pdirty;
+
+ if (config_stats) {
+ sstats->astats.mapped += astats->astats.mapped;
+ sstats->astats.retained += astats->astats.retained;
+ sstats->astats.npurge += astats->astats.npurge;
+ sstats->astats.nmadvise += astats->astats.nmadvise;
+ sstats->astats.purged += astats->astats.purged;
+
+ sstats->astats.metadata_mapped +=
+ astats->astats.metadata_mapped;
+ sstats->astats.metadata_allocated +=
+ astats->astats.metadata_allocated;
+
+ sstats->allocated_small += astats->allocated_small;
+ sstats->nmalloc_small += astats->nmalloc_small;
+ sstats->ndalloc_small += astats->ndalloc_small;
+ sstats->nrequests_small += astats->nrequests_small;
+
+ sstats->astats.allocated_large +=
+ astats->astats.allocated_large;
+ sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
+ sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
+ sstats->astats.nrequests_large +=
+ astats->astats.nrequests_large;
+
+ sstats->astats.allocated_huge += astats->astats.allocated_huge;
+ sstats->astats.nmalloc_huge += astats->astats.nmalloc_huge;
+ sstats->astats.ndalloc_huge += astats->astats.ndalloc_huge;
+
+ for (i = 0; i < NBINS; i++) {
+ sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
+ sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
+ sstats->bstats[i].nrequests +=
+ astats->bstats[i].nrequests;
+ sstats->bstats[i].curregs += astats->bstats[i].curregs;
+ if (config_tcache) {
+ sstats->bstats[i].nfills +=
+ astats->bstats[i].nfills;
+ sstats->bstats[i].nflushes +=
+ astats->bstats[i].nflushes;
+ }
+ sstats->bstats[i].nruns += astats->bstats[i].nruns;
+ sstats->bstats[i].reruns += astats->bstats[i].reruns;
+ sstats->bstats[i].curruns += astats->bstats[i].curruns;
+ }
+
+ for (i = 0; i < nlclasses; i++) {
+ sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
+ sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
+ sstats->lstats[i].nrequests +=
+ astats->lstats[i].nrequests;
+ sstats->lstats[i].curruns += astats->lstats[i].curruns;
+ }
+
+ for (i = 0; i < nhclasses; i++) {
+ sstats->hstats[i].nmalloc += astats->hstats[i].nmalloc;
+ sstats->hstats[i].ndalloc += astats->hstats[i].ndalloc;
+ sstats->hstats[i].curhchunks +=
+ astats->hstats[i].curhchunks;
+ }
+ }
+}
+
+static void
+ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i)
+{
+ ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
+ ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
+
+ ctl_arena_clear(astats);
+ ctl_arena_stats_amerge(tsdn, astats, arena);
+ /* Merge into sum stats as well. */
+ ctl_arena_stats_smerge(sstats, astats);
+}
+
+static bool
+ctl_grow(tsdn_t *tsdn)
+{
+ ctl_arena_stats_t *astats;
+
+ /* Initialize new arena. */
+ if (arena_init(tsdn, ctl_stats.narenas) == NULL)
+ return (true);
+
+ /* Allocate extended arena stats. */
+ astats = (ctl_arena_stats_t *)a0malloc((ctl_stats.narenas + 2) *
+ sizeof(ctl_arena_stats_t));
+ if (astats == NULL)
+ return (true);
+
+ /* Initialize the new astats element. */
+ memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
+ sizeof(ctl_arena_stats_t));
+ memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
+ if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
+ a0dalloc(astats);
+ return (true);
+ }
+ /* Swap merged stats to their new location. */
+ {
+ ctl_arena_stats_t tstats;
+ memcpy(&tstats, &astats[ctl_stats.narenas],
+ sizeof(ctl_arena_stats_t));
+ memcpy(&astats[ctl_stats.narenas],
+ &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
+ memcpy(&astats[ctl_stats.narenas + 1], &tstats,
+ sizeof(ctl_arena_stats_t));
+ }
+ a0dalloc(ctl_stats.arenas);
+ ctl_stats.arenas = astats;
+ ctl_stats.narenas++;
+
+ return (false);
+}
+
+static void
+ctl_refresh(tsdn_t *tsdn)
+{
+ unsigned i;
+ VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
+
+ /*
+ * Clear sum stats, since they will be merged into by
+ * ctl_arena_refresh().
+ */
+ ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
+
+ for (i = 0; i < ctl_stats.narenas; i++)
+ tarenas[i] = arena_get(tsdn, i, false);
+
+ for (i = 0; i < ctl_stats.narenas; i++) {
+ bool initialized = (tarenas[i] != NULL);
+
+ ctl_stats.arenas[i].initialized = initialized;
+ if (initialized)
+ ctl_arena_refresh(tsdn, tarenas[i], i);
+ }
+
+ if (config_stats) {
+ size_t base_allocated, base_resident, base_mapped;
+ base_stats_get(tsdn, &base_allocated, &base_resident,
+ &base_mapped);
+ ctl_stats.allocated =
+ ctl_stats.arenas[ctl_stats.narenas].allocated_small +
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
+ ctl_stats.arenas[ctl_stats.narenas].astats.allocated_huge;
+ ctl_stats.active =
+ (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE);
+ ctl_stats.metadata = base_allocated +
+ ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
+ ctl_stats.arenas[ctl_stats.narenas].astats
+ .metadata_allocated;
+ ctl_stats.resident = base_resident +
+ ctl_stats.arenas[ctl_stats.narenas].astats.metadata_mapped +
+ ((ctl_stats.arenas[ctl_stats.narenas].pactive +
+ ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
+ ctl_stats.mapped = base_mapped +
+ ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
+ ctl_stats.retained =
+ ctl_stats.arenas[ctl_stats.narenas].astats.retained;
+ }
+
+ ctl_epoch++;
+}
+
+static bool
+ctl_init(tsdn_t *tsdn)
+{
+ bool ret;
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ if (!ctl_initialized) {
+ /*
+ * Allocate space for one extra arena stats element, which
+ * contains summed stats across all arenas.
+ */
+ ctl_stats.narenas = narenas_total_get();
+ ctl_stats.arenas = (ctl_arena_stats_t *)a0malloc(
+ (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
+ if (ctl_stats.arenas == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
+ sizeof(ctl_arena_stats_t));
+
+ /*
+ * Initialize all stats structures, regardless of whether they
+ * ever get used. Lazy initialization would allow errors to
+ * cause inconsistent state to be viewable by the application.
+ */
+ if (config_stats) {
+ unsigned i;
+ for (i = 0; i <= ctl_stats.narenas; i++) {
+ if (ctl_arena_init(&ctl_stats.arenas[i])) {
+ unsigned j;
+ for (j = 0; j < i; j++) {
+ a0dalloc(
+ ctl_stats.arenas[j].lstats);
+ a0dalloc(
+ ctl_stats.arenas[j].hstats);
+ }
+ a0dalloc(ctl_stats.arenas);
+ ctl_stats.arenas = NULL;
+ ret = true;
+ goto label_return;
+ }
+ }
+ }
+ ctl_stats.arenas[ctl_stats.narenas].initialized = true;
+
+ ctl_epoch = 0;
+ ctl_refresh(tsdn);
+ ctl_initialized = true;
+ }
+
+ ret = false;
+label_return:
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+ return (ret);
+}
+
+static int
+ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
+ size_t *mibp, size_t *depthp)
+{
+ int ret;
+ const char *elm, *tdot, *dot;
+ size_t elen, i, j;
+ const ctl_named_node_t *node;
+
+ elm = name;
+ /* Equivalent to strchrnul(). */
+ dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
+ elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
+ if (elen == 0) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ node = super_root_node;
+ for (i = 0; i < *depthp; i++) {
+ assert(node);
+ assert(node->nchildren > 0);
+ if (ctl_named_node(node->children) != NULL) {
+ const ctl_named_node_t *pnode = node;
+
+ /* Children are named. */
+ for (j = 0; j < node->nchildren; j++) {
+ const ctl_named_node_t *child =
+ ctl_named_children(node, j);
+ if (strlen(child->name) == elen &&
+ strncmp(elm, child->name, elen) == 0) {
+ node = child;
+ if (nodesp != NULL)
+ nodesp[i] =
+ (const ctl_node_t *)node;
+ mibp[i] = j;
+ break;
+ }
+ }
+ if (node == pnode) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ } else {
+ uintmax_t index;
+ const ctl_indexed_node_t *inode;
+
+ /* Children are indexed. */
+ index = malloc_strtoumax(elm, NULL, 10);
+ if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ inode = ctl_indexed_node(node->children);
+ node = inode->index(tsdn, mibp, *depthp, (size_t)index);
+ if (node == NULL) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ if (nodesp != NULL)
+ nodesp[i] = (const ctl_node_t *)node;
+ mibp[i] = (size_t)index;
+ }
+
+ if (node->ctl != NULL) {
+ /* Terminal node. */
+ if (*dot != '\0') {
+ /*
+ * The name contains more elements than are
+ * in this path through the tree.
+ */
+ ret = ENOENT;
+ goto label_return;
+ }
+ /* Complete lookup successful. */
+ *depthp = i + 1;
+ break;
+ }
+
+ /* Update elm. */
+ if (*dot == '\0') {
+ /* No more elements. */
+ ret = ENOENT;
+ goto label_return;
+ }
+ elm = &dot[1];
+ dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
+ strchr(elm, '\0');
+ elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+int
+ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ size_t depth;
+ ctl_node_t const *nodes[CTL_MAX_DEPTH];
+ size_t mib[CTL_MAX_DEPTH];
+ const ctl_named_node_t *node;
+
+ if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ depth = CTL_MAX_DEPTH;
+ ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
+ if (ret != 0)
+ goto label_return;
+
+ node = ctl_named_node(nodes[depth-1]);
+ if (node != NULL && node->ctl)
+ ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
+ else {
+ /* The name refers to a partial path through the ctl tree. */
+ ret = ENOENT;
+ }
+
+label_return:
+ return(ret);
+}
+
+int
+ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp)
+{
+ int ret;
+
+ if (!ctl_initialized && ctl_init(tsdn)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp);
+label_return:
+ return(ret);
+}
+
+int
+ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ const ctl_named_node_t *node;
+ size_t i;
+
+ if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ /* Iterate down the tree. */
+ node = super_root_node;
+ for (i = 0; i < miblen; i++) {
+ assert(node);
+ assert(node->nchildren > 0);
+ if (ctl_named_node(node->children) != NULL) {
+ /* Children are named. */
+ if (node->nchildren <= (unsigned)mib[i]) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ node = ctl_named_children(node, mib[i]);
+ } else {
+ const ctl_indexed_node_t *inode;
+
+ /* Indexed element. */
+ inode = ctl_indexed_node(node->children);
+ node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
+ if (node == NULL) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ }
+ }
+
+ /* Call the ctl function. */
+ if (node && node->ctl)
+ ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
+ else {
+ /* Partial MIB. */
+ ret = ENOENT;
+ }
+
+label_return:
+ return(ret);
+}
+
+bool
+ctl_boot(void)
+{
+
+ if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL))
+ return (true);
+
+ ctl_initialized = false;
+
+ return (false);
+}
+
+void
+ctl_prefork(tsdn_t *tsdn)
+{
+
+ malloc_mutex_prefork(tsdn, &ctl_mtx);
+}
+
+void
+ctl_postfork_parent(tsdn_t *tsdn)
+{
+
+ malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
+}
+
+void
+ctl_postfork_child(tsdn_t *tsdn)
+{
+
+ malloc_mutex_postfork_child(tsdn, &ctl_mtx);
+}
+
+/******************************************************************************/
+/* *_ctl() functions. */
+
+#define READONLY() do { \
+ if (newp != NULL || newlen != 0) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
+#define WRITEONLY() do { \
+ if (oldp != NULL || oldlenp != NULL) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
+#define READ_XOR_WRITE() do { \
+ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
+ newlen != 0)) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
+#define READ(v, t) do { \
+ if (oldp != NULL && oldlenp != NULL) { \
+ if (*oldlenp != sizeof(t)) { \
+ size_t copylen = (sizeof(t) <= *oldlenp) \
+ ? sizeof(t) : *oldlenp; \
+ memcpy(oldp, (void *)&(v), copylen); \
+ ret = EINVAL; \
+ goto label_return; \
+ } \
+ *(t *)oldp = (v); \
+ } \
+} while (0)
+
+#define WRITE(v, t) do { \
+ if (newp != NULL) { \
+ if (newlen != sizeof(t)) { \
+ ret = EINVAL; \
+ goto label_return; \
+ } \
+ (v) = *(t *)newp; \
+ } \
+} while (0)
+
+/*
+ * There's a lot of code duplication in the following macros due to limitations
+ * in how nested cpp macros are expanded.
+ */
+#define CTL_RO_CLGEN(c, l, n, v, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if (!(c)) \
+ return (ENOENT); \
+ if (l) \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ READONLY(); \
+ oldval = (v); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ if (l) \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ return (ret); \
+}
+
+#define CTL_RO_CGEN(c, n, v, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if (!(c)) \
+ return (ENOENT); \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ READONLY(); \
+ oldval = (v); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ return (ret); \
+}
+
+#define CTL_RO_GEN(n, v, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ READONLY(); \
+ oldval = (v); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ return (ret); \
+}
+
+/*
+ * ctl_mtx is not acquired, under the assumption that no pertinent data will
+ * mutate during the call.
+ */
+#define CTL_RO_NL_CGEN(c, n, v, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if (!(c)) \
+ return (ENOENT); \
+ READONLY(); \
+ oldval = (v); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return (ret); \
+}
+
+#define CTL_RO_NL_GEN(n, v, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ READONLY(); \
+ oldval = (v); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return (ret); \
+}
+
+#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if (!(c)) \
+ return (ENOENT); \
+ READONLY(); \
+ oldval = (m(tsd)); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return (ret); \
+}
+
+#define CTL_RO_CONFIG_GEN(n, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ READONLY(); \
+ oldval = n; \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return (ret); \
+}
+
+/******************************************************************************/
+
+CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
+
+static int
+epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ UNUSED uint64_t newval;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ WRITE(newval, uint64_t);
+ if (newp != NULL)
+ ctl_refresh(tsd_tsdn(tsd));
+ READ(ctl_epoch, uint64_t);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+/******************************************************************************/
+
+CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
+CTL_RO_CONFIG_GEN(config_debug, bool)
+CTL_RO_CONFIG_GEN(config_fill, bool)
+CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
+CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
+CTL_RO_CONFIG_GEN(config_munmap, bool)
+CTL_RO_CONFIG_GEN(config_prof, bool)
+CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
+CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
+CTL_RO_CONFIG_GEN(config_stats, bool)
+CTL_RO_CONFIG_GEN(config_tcache, bool)
+CTL_RO_CONFIG_GEN(config_tls, bool)
+CTL_RO_CONFIG_GEN(config_utrace, bool)
+CTL_RO_CONFIG_GEN(config_valgrind, bool)
+CTL_RO_CONFIG_GEN(config_xmalloc, bool)
+
+/******************************************************************************/
+
+CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
+CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
+CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
+CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
+CTL_RO_NL_GEN(opt_purge, purge_mode_names[opt_purge], const char *)
+CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
+CTL_RO_NL_GEN(opt_decay_time, opt_decay_time, ssize_t)
+CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
+CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
+CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
+CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
+CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
+CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
+CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
+ opt_prof_thread_active_init, bool)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
+
+/******************************************************************************/
+
+static int
+thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ arena_t *oldarena;
+ unsigned newind, oldind;
+
+ oldarena = arena_choose(tsd, NULL);
+ if (oldarena == NULL)
+ return (EAGAIN);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ newind = oldind = oldarena->ind;
+ WRITE(newind, unsigned);
+ READ(oldind, unsigned);
+ if (newind != oldind) {
+ arena_t *newarena;
+
+ if (newind >= ctl_stats.narenas) {
+ /* New arena index is out of range. */
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ /* Initialize arena if necessary. */
+ newarena = arena_get(tsd_tsdn(tsd), newind, true);
+ if (newarena == NULL) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+ /* Set new arena/tcache associations. */
+ arena_migrate(tsd, oldind, newind);
+ if (config_tcache) {
+ tcache_t *tcache = tsd_tcache_get(tsd);
+ if (tcache != NULL) {
+ tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
+ oldarena, newarena);
+ }
+ }
+ }
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
+ uint64_t)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
+ uint64_t *)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
+ uint64_t)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
+ tsd_thread_deallocatedp_get, uint64_t *)
+
+static int
+thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ oldval = tcache_enabled_get();
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ tcache_enabled_set(*(bool *)newp);
+ }
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ READONLY();
+ WRITEONLY();
+
+ tcache_flush();
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ READ_XOR_WRITE();
+
+ if (newp != NULL) {
+ if (newlen != sizeof(const char *)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
+ 0)
+ goto label_return;
+ } else {
+ const char *oldname = prof_thread_name_get(tsd);
+ READ(oldname, const char *);
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ oldval = prof_thread_active_get(tsd);
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (prof_thread_active_set(tsd, *(bool *)newp)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+ }
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+/******************************************************************************/
+
+static int
+tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned tcache_ind;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ READONLY();
+ if (tcaches_create(tsd, &tcache_ind)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ READ(tcache_ind, unsigned);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+static int
+tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned tcache_ind;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ WRITEONLY();
+ tcache_ind = UINT_MAX;
+ WRITE(tcache_ind, unsigned);
+ if (tcache_ind == UINT_MAX) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ tcaches_flush(tsd, tcache_ind);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned tcache_ind;
+
+ if (!config_tcache)
+ return (ENOENT);
+
+ WRITEONLY();
+ tcache_ind = UINT_MAX;
+ WRITE(tcache_ind, unsigned);
+ if (tcache_ind == UINT_MAX) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ tcaches_destroy(tsd, tcache_ind);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+/******************************************************************************/
+
+static void
+arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
+{
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ {
+ unsigned narenas = ctl_stats.narenas;
+
+ if (arena_ind == narenas) {
+ unsigned i;
+ VARIABLE_ARRAY(arena_t *, tarenas, narenas);
+
+ for (i = 0; i < narenas; i++)
+ tarenas[i] = arena_get(tsdn, i, false);
+
+ /*
+ * No further need to hold ctl_mtx, since narenas and
+ * tarenas contain everything needed below.
+ */
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+
+ for (i = 0; i < narenas; i++) {
+ if (tarenas[i] != NULL)
+ arena_purge(tsdn, tarenas[i], all);
+ }
+ } else {
+ arena_t *tarena;
+
+ assert(arena_ind < narenas);
+
+ tarena = arena_get(tsdn, arena_ind, false);
+
+ /* No further need to hold ctl_mtx. */
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+
+ if (tarena != NULL)
+ arena_purge(tsdn, tarena, all);
+ }
+ }
+}
+
+static int
+arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ READONLY();
+ WRITEONLY();
+ arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ READONLY();
+ WRITEONLY();
+ arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+
+ READONLY();
+ WRITEONLY();
+
+ if ((config_valgrind && unlikely(in_valgrind)) || (config_fill &&
+ unlikely(opt_quarantine))) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ arena_ind = (unsigned)mib[1];
+ if (config_debug) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ assert(arena_ind < ctl_stats.narenas);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ }
+ assert(arena_ind >= opt_narenas);
+
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+
+ arena_reset(tsd, arena);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ const char *dss = NULL;
+ unsigned arena_ind = (unsigned)mib[1];
+ dss_prec_t dss_prec_old = dss_prec_limit;
+ dss_prec_t dss_prec = dss_prec_limit;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ WRITE(dss, const char *);
+ if (dss != NULL) {
+ int i;
+ bool match = false;
+
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strcmp(dss_prec_names[i], dss) == 0) {
+ dss_prec = i;
+ match = true;
+ break;
+ }
+ }
+
+ if (!match) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ }
+
+ if (arena_ind < ctl_stats.narenas) {
+ arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL || (dss_prec != dss_prec_limit &&
+ arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena);
+ } else {
+ if (dss_prec != dss_prec_limit &&
+ chunk_dss_prec_set(dss_prec)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ dss_prec_old = chunk_dss_prec_get();
+ }
+
+ dss = dss_prec_names[dss_prec_old];
+ READ(dss, const char *);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+static int
+arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind = (unsigned)mib[1];
+ arena_t *arena;
+
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena);
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena,
+ *(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind = (unsigned)mib[1];
+ arena_t *arena;
+
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena);
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (arena_decay_time_set(tsd_tsdn(tsd), arena,
+ *(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind = (unsigned)mib[1];
+ arena_t *arena;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ if (arena_ind < narenas_total_get() && (arena =
+ arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
+ if (newp != NULL) {
+ chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
+ WRITE(new_chunk_hooks, chunk_hooks_t);
+ old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena,
+ &new_chunk_hooks);
+ READ(old_chunk_hooks, chunk_hooks_t);
+ } else {
+ chunk_hooks_t old_chunk_hooks =
+ chunk_hooks_get(tsd_tsdn(tsd), arena);
+ READ(old_chunk_hooks, chunk_hooks_t);
+ }
+ } else {
+ ret = EFAULT;
+ goto label_return;
+ }
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+static const ctl_named_node_t *
+arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+{
+ const ctl_named_node_t *ret;
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ if (i > ctl_stats.narenas) {
+ ret = NULL;
+ goto label_return;
+ }
+
+ ret = super_arena_i_node;
+label_return:
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+ return (ret);
+}
+
+/******************************************************************************/
+
+static int
+arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned narenas;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ READONLY();
+ if (*oldlenp != sizeof(unsigned)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ narenas = ctl_stats.narenas;
+ READ(narenas, unsigned);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+static int
+arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned nread, i;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ READONLY();
+ if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
+ ret = EINVAL;
+ nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
+ ? (unsigned)(*oldlenp / sizeof(bool)) : ctl_stats.narenas;
+ } else {
+ ret = 0;
+ nread = ctl_stats.narenas;
+ }
+
+ for (i = 0; i < nread; i++)
+ ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
+
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+static int
+arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = arena_lg_dirty_mult_default_get();
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (arena_lg_dirty_mult_default_set(*(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = arena_decay_time_default_get();
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (arena_decay_time_default_set(*(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
+CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
+CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
+CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
+CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
+CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
+CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
+CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
+static const ctl_named_node_t *
+arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+{
+
+ if (i > NBINS)
+ return (NULL);
+ return (super_arenas_bin_i_node);
+}
+
+CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
+CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
+static const ctl_named_node_t *
+arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+{
+
+ if (i > nlclasses)
+ return (NULL);
+ return (super_arenas_lrun_i_node);
+}
+
+CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
+CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
+ size_t)
+static const ctl_named_node_t *
+arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+{
+
+ if (i > nhclasses)
+ return (NULL);
+ return (super_arenas_hchunk_i_node);
+}
+
+static int
+arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned narenas;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ READONLY();
+ if (ctl_grow(tsd_tsdn(tsd))) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+ narenas = ctl_stats.narenas - 1;
+ READ(narenas, unsigned);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return (ret);
+}
+
+/******************************************************************************/
+
+static int
+prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
+ *(bool *)newp);
+ } else
+ oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
+ } else
+ oldval = prof_active_get(tsd_tsdn(tsd));
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ const char *filename = NULL;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ WRITEONLY();
+ WRITE(filename, const char *);
+
+ if (prof_mdump(tsd, filename)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
+ } else
+ oldval = prof_gdump_get(tsd_tsdn(tsd));
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ size_t lg_sample = lg_prof_sample;
+
+ if (!config_prof)
+ return (ENOENT);
+
+ WRITEONLY();
+ WRITE(lg_sample, size_t);
+ if (lg_sample >= (sizeof(uint64_t) << 3))
+ lg_sample = (sizeof(uint64_t) << 3) - 1;
+
+ prof_reset(tsd, lg_sample);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
+CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
+
+/******************************************************************************/
+
+CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
+CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
+CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
+CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
+CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_retained, ctl_stats.retained, size_t)
+
+CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
+CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
+ ssize_t)
+CTL_RO_GEN(stats_arenas_i_decay_time, ctl_stats.arenas[mib[2]].decay_time,
+ ssize_t)
+CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
+CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
+CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
+ ctl_stats.arenas[mib[2]].astats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
+ ctl_stats.arenas[mib[2]].astats.retained, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
+ ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
+ ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
+ ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_mapped,
+ ctl_stats.arenas[mib[2]].astats.metadata_mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_allocated,
+ ctl_stats.arenas[mib[2]].astats.metadata_allocated, size_t)
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
+ ctl_stats.arenas[mib[2]].allocated_small, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
+ ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
+ ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
+ ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
+ ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
+ ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
+ ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
+ ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_allocated,
+ ctl_stats.arenas[mib[2]].astats.allocated_huge, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nmalloc,
+ ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_ndalloc,
+ ctl_stats.arenas[mib[2]].astats.ndalloc_huge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_huge_nrequests,
+ ctl_stats.arenas[mib[2]].astats.nmalloc_huge, uint64_t) /* Intentional. */
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].curregs, size_t)
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
+ ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
+
+static const ctl_named_node_t *
+stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t j)
+{
+
+ if (j > NBINS)
+ return (NULL);
+ return (super_stats_arenas_i_bins_j_node);
+}
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
+ ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
+ ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
+ ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
+ ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
+
+static const ctl_named_node_t *
+stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t j)
+{
+
+ if (j > nlclasses)
+ return (NULL);
+ return (super_stats_arenas_i_lruns_j_node);
+}
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nmalloc,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_ndalloc,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_nrequests,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].nmalloc, /* Intentional. */
+ uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
+ ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
+
+static const ctl_named_node_t *
+stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t j)
+{
+
+ if (j > nhclasses)
+ return (NULL);
+ return (super_stats_arenas_i_hchunks_j_node);
+}
+
+static const ctl_named_node_t *
+stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
+{
+ const ctl_named_node_t * ret;
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
+ ret = NULL;
+ goto label_return;
+ }
+
+ ret = super_stats_arenas_i_node;
+label_return:
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+ return (ret);
+}
diff --git a/deps/jemalloc/src/extent.c b/deps/jemalloc/src/extent.c
new file mode 100644
index 0000000..218156c
--- /dev/null
+++ b/deps/jemalloc/src/extent.c
@@ -0,0 +1,77 @@
+#define JEMALLOC_EXTENT_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+
+/*
+ * Round down to the nearest chunk size that can actually be requested during
+ * normal huge allocation.
+ */
+JEMALLOC_INLINE_C size_t
+extent_quantize(size_t size)
+{
+ size_t ret;
+ szind_t ind;
+
+ assert(size > 0);
+
+ ind = size2index(size + 1);
+ if (ind == 0) {
+ /* Avoid underflow. */
+ return (index2size(0));
+ }
+ ret = index2size(ind - 1);
+ assert(ret <= size);
+ return (ret);
+}
+
+JEMALLOC_INLINE_C int
+extent_sz_comp(const extent_node_t *a, const extent_node_t *b)
+{
+ size_t a_qsize = extent_quantize(extent_node_size_get(a));
+ size_t b_qsize = extent_quantize(extent_node_size_get(b));
+
+ return ((a_qsize > b_qsize) - (a_qsize < b_qsize));
+}
+
+JEMALLOC_INLINE_C int
+extent_sn_comp(const extent_node_t *a, const extent_node_t *b)
+{
+ size_t a_sn = extent_node_sn_get(a);
+ size_t b_sn = extent_node_sn_get(b);
+
+ return ((a_sn > b_sn) - (a_sn < b_sn));
+}
+
+JEMALLOC_INLINE_C int
+extent_ad_comp(const extent_node_t *a, const extent_node_t *b)
+{
+ uintptr_t a_addr = (uintptr_t)extent_node_addr_get(a);
+ uintptr_t b_addr = (uintptr_t)extent_node_addr_get(b);
+
+ return ((a_addr > b_addr) - (a_addr < b_addr));
+}
+
+JEMALLOC_INLINE_C int
+extent_szsnad_comp(const extent_node_t *a, const extent_node_t *b)
+{
+ int ret;
+
+ ret = extent_sz_comp(a, b);
+ if (ret != 0)
+ return (ret);
+
+ ret = extent_sn_comp(a, b);
+ if (ret != 0)
+ return (ret);
+
+ ret = extent_ad_comp(a, b);
+ return (ret);
+}
+
+/* Generate red-black tree functions. */
+rb_gen(, extent_tree_szsnad_, extent_tree_t, extent_node_t, szsnad_link,
+ extent_szsnad_comp)
+
+/* Generate red-black tree functions. */
+rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, ad_link, extent_ad_comp)
diff --git a/deps/jemalloc/src/hash.c b/deps/jemalloc/src/hash.c
new file mode 100644
index 0000000..cfa4da0
--- /dev/null
+++ b/deps/jemalloc/src/hash.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_HASH_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/deps/jemalloc/src/huge.c b/deps/jemalloc/src/huge.c
new file mode 100644
index 0000000..8abd8c0
--- /dev/null
+++ b/deps/jemalloc/src/huge.c
@@ -0,0 +1,477 @@
+#define JEMALLOC_HUGE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+
+static extent_node_t *
+huge_node_get(const void *ptr)
+{
+ extent_node_t *node;
+
+ node = chunk_lookup(ptr, true);
+ assert(!extent_node_achunk_get(node));
+
+ return (node);
+}
+
+static bool
+huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
+{
+
+ assert(extent_node_addr_get(node) == ptr);
+ assert(!extent_node_achunk_get(node));
+ return (chunk_register(tsdn, ptr, node));
+}
+
+static void
+huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
+{
+ bool err;
+
+ err = huge_node_set(tsdn, ptr, node);
+ assert(!err);
+}
+
+static void
+huge_node_unset(const void *ptr, const extent_node_t *node)
+{
+
+ chunk_deregister(ptr, node);
+}
+
+void *
+huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
+{
+
+ assert(usize == s2u(usize));
+
+ return (huge_palloc(tsdn, arena, usize, chunksize, zero));
+}
+
+void *
+huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero)
+{
+ void *ret;
+ size_t ausize;
+ arena_t *iarena;
+ extent_node_t *node;
+ size_t sn;
+ bool is_zeroed;
+
+ /* Allocate one or more contiguous chunks for this request. */
+
+ assert(!tsdn_null(tsdn) || arena != NULL);
+
+ ausize = sa2u(usize, alignment);
+ if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
+ return (NULL);
+ assert(ausize >= chunksize);
+
+ /* Allocate an extent node with which to track the chunk. */
+ iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) :
+ a0get();
+ node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
+ CACHELINE, false, NULL, true, iarena);
+ if (node == NULL)
+ return (NULL);
+
+ /*
+ * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
+ * it is possible to make correct junk/zero fill decisions below.
+ */
+ is_zeroed = zero;
+ if (likely(!tsdn_null(tsdn)))
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
+ if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
+ arena, usize, alignment, &sn, &is_zeroed)) == NULL) {
+ idalloctm(tsdn, node, NULL, true, true);
+ return (NULL);
+ }
+
+ extent_node_init(node, arena, ret, usize, sn, is_zeroed, true);
+
+ if (huge_node_set(tsdn, ret, node)) {
+ arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn);
+ idalloctm(tsdn, node, NULL, true, true);
+ return (NULL);
+ }
+
+ /* Insert node into huge. */
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ ql_elm_new(node, ql_link);
+ ql_tail_insert(&arena->huge, node, ql_link);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ if (zero || (config_fill && unlikely(opt_zero))) {
+ if (!is_zeroed)
+ memset(ret, 0, usize);
+ } else if (config_fill && unlikely(opt_junk_alloc))
+ memset(ret, JEMALLOC_ALLOC_JUNK, usize);
+
+ arena_decay_tick(tsdn, arena);
+ return (ret);
+}
+
+#ifdef JEMALLOC_JET
+#undef huge_dalloc_junk
+#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
+#endif
+static void
+huge_dalloc_junk(void *ptr, size_t usize)
+{
+
+ if (config_fill && have_dss && unlikely(opt_junk_free)) {
+ /*
+ * Only bother junk filling if the chunk isn't about to be
+ * unmapped.
+ */
+ if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
+ memset(ptr, JEMALLOC_FREE_JUNK, usize);
+ }
+}
+#ifdef JEMALLOC_JET
+#undef huge_dalloc_junk
+#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
+huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
+#endif
+
+static void
+huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
+ size_t usize_min, size_t usize_max, bool zero)
+{
+ size_t usize, usize_next;
+ extent_node_t *node;
+ arena_t *arena;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+ bool pre_zeroed, post_zeroed;
+
+ /* Increase usize to incorporate extra. */
+ for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1))
+ <= oldsize; usize = usize_next)
+ ; /* Do nothing. */
+
+ if (oldsize == usize)
+ return;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ pre_zeroed = extent_node_zeroed_get(node);
+
+ /* Fill if necessary (shrinking). */
+ if (oldsize > usize) {
+ size_t sdiff = oldsize - usize;
+ if (config_fill && unlikely(opt_junk_free)) {
+ memset((void *)((uintptr_t)ptr + usize),
+ JEMALLOC_FREE_JUNK, sdiff);
+ post_zeroed = false;
+ } else {
+ post_zeroed = !chunk_purge_wrapper(tsdn, arena,
+ &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
+ sdiff);
+ }
+ } else
+ post_zeroed = pre_zeroed;
+
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ /* Update the size of the huge allocation. */
+ huge_node_unset(ptr, node);
+ assert(extent_node_size_get(node) != usize);
+ extent_node_size_set(node, usize);
+ huge_node_reset(tsdn, ptr, node);
+ /* Update zeroed. */
+ extent_node_zeroed_set(node, post_zeroed);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
+
+ /* Fill if necessary (growing). */
+ if (oldsize < usize) {
+ if (zero || (config_fill && unlikely(opt_zero))) {
+ if (!pre_zeroed) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ usize - oldsize);
+ }
+ } else if (config_fill && unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)ptr + oldsize),
+ JEMALLOC_ALLOC_JUNK, usize - oldsize);
+ }
+ }
+}
+
+static bool
+huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
+ size_t usize)
+{
+ extent_node_t *node;
+ arena_t *arena;
+ chunk_hooks_t chunk_hooks;
+ size_t cdiff;
+ bool pre_zeroed, post_zeroed;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ pre_zeroed = extent_node_zeroed_get(node);
+ chunk_hooks = chunk_hooks_get(tsdn, arena);
+
+ assert(oldsize > usize);
+
+ /* Split excess chunks. */
+ cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
+ if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize),
+ CHUNK_CEILING(usize), cdiff, true, arena->ind))
+ return (true);
+
+ if (oldsize > usize) {
+ size_t sdiff = oldsize - usize;
+ if (config_fill && unlikely(opt_junk_free)) {
+ huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
+ sdiff);
+ post_zeroed = false;
+ } else {
+ post_zeroed = !chunk_purge_wrapper(tsdn, arena,
+ &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
+ usize), CHUNK_CEILING(oldsize),
+ CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
+ }
+ } else
+ post_zeroed = pre_zeroed;
+
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ /* Update the size of the huge allocation. */
+ huge_node_unset(ptr, node);
+ extent_node_size_set(node, usize);
+ huge_node_reset(tsdn, ptr, node);
+ /* Update zeroed. */
+ extent_node_zeroed_set(node, post_zeroed);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ /* Zap the excess chunks. */
+ arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize,
+ extent_node_sn_get(node));
+
+ return (false);
+}
+
+static bool
+huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
+ size_t usize, bool zero) {
+ extent_node_t *node;
+ arena_t *arena;
+ bool is_zeroed_subchunk, is_zeroed_chunk;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ is_zeroed_subchunk = extent_node_zeroed_get(node);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ /*
+ * Use is_zeroed_chunk to detect whether the trailing memory is zeroed,
+ * update extent's zeroed field, and zero as necessary.
+ */
+ is_zeroed_chunk = false;
+ if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
+ &is_zeroed_chunk))
+ return (true);
+
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ huge_node_unset(ptr, node);
+ extent_node_size_set(node, usize);
+ extent_node_zeroed_set(node, extent_node_zeroed_get(node) &&
+ is_zeroed_chunk);
+ huge_node_reset(tsdn, ptr, node);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ if (zero || (config_fill && unlikely(opt_zero))) {
+ if (!is_zeroed_subchunk) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ CHUNK_CEILING(oldsize) - oldsize);
+ }
+ if (!is_zeroed_chunk) {
+ memset((void *)((uintptr_t)ptr +
+ CHUNK_CEILING(oldsize)), 0, usize -
+ CHUNK_CEILING(oldsize));
+ }
+ } else if (config_fill && unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
+ usize - oldsize);
+ }
+
+ return (false);
+}
+
+bool
+huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero)
+{
+
+ assert(s2u(oldsize) == oldsize);
+ /* The following should have been caught by callers. */
+ assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS);
+
+ /* Both allocations must be huge to avoid a move. */
+ if (oldsize < chunksize || usize_max < chunksize)
+ return (true);
+
+ if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
+ /* Attempt to expand the allocation in-place. */
+ if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
+ zero)) {
+ arena_decay_tick(tsdn, huge_aalloc(ptr));
+ return (false);
+ }
+ /* Try again, this time with usize_min. */
+ if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
+ CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
+ ptr, oldsize, usize_min, zero)) {
+ arena_decay_tick(tsdn, huge_aalloc(ptr));
+ return (false);
+ }
+ }
+
+ /*
+ * Avoid moving the allocation if the existing chunk size accommodates
+ * the new size.
+ */
+ if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
+ && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
+ huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
+ usize_max, zero);
+ arena_decay_tick(tsdn, huge_aalloc(ptr));
+ return (false);
+ }
+
+ /* Attempt to shrink the allocation in-place. */
+ if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
+ if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
+ usize_max)) {
+ arena_decay_tick(tsdn, huge_aalloc(ptr));
+ return (false);
+ }
+ }
+ return (true);
+}
+
+static void *
+huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero)
+{
+
+ if (alignment <= chunksize)
+ return (huge_malloc(tsdn, arena, usize, zero));
+ return (huge_palloc(tsdn, arena, usize, alignment, zero));
+}
+
+void *
+huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
+ size_t usize, size_t alignment, bool zero, tcache_t *tcache)
+{
+ void *ret;
+ size_t copysize;
+
+ /* The following should have been caught by callers. */
+ assert(usize > 0 && usize <= HUGE_MAXCLASS);
+
+ /* Try to avoid moving the allocation. */
+ if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
+ zero))
+ return (ptr);
+
+ /*
+ * usize and oldsize are different enough that we need to use a
+ * different size class. In that case, fall back to allocating new
+ * space and copying.
+ */
+ ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
+ zero);
+ if (ret == NULL)
+ return (NULL);
+
+ copysize = (usize < oldsize) ? usize : oldsize;
+ memcpy(ret, ptr, copysize);
+ isqalloc(tsd, ptr, oldsize, tcache, true);
+ return (ret);
+}
+
+void
+huge_dalloc(tsdn_t *tsdn, void *ptr)
+{
+ extent_node_t *node;
+ arena_t *arena;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ huge_node_unset(ptr, node);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ ql_remove(&arena->huge, node, ql_link);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ huge_dalloc_junk(extent_node_addr_get(node),
+ extent_node_size_get(node));
+ arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
+ extent_node_addr_get(node), extent_node_size_get(node),
+ extent_node_sn_get(node));
+ idalloctm(tsdn, node, NULL, true, true);
+
+ arena_decay_tick(tsdn, arena);
+}
+
+arena_t *
+huge_aalloc(const void *ptr)
+{
+
+ return (extent_node_arena_get(huge_node_get(ptr)));
+}
+
+size_t
+huge_salloc(tsdn_t *tsdn, const void *ptr)
+{
+ size_t size;
+ extent_node_t *node;
+ arena_t *arena;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ size = extent_node_size_get(node);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ return (size);
+}
+
+prof_tctx_t *
+huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
+{
+ prof_tctx_t *tctx;
+ extent_node_t *node;
+ arena_t *arena;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ tctx = extent_node_prof_tctx_get(node);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+
+ return (tctx);
+}
+
+void
+huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
+{
+ extent_node_t *node;
+ arena_t *arena;
+
+ node = huge_node_get(ptr);
+ arena = extent_node_arena_get(node);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
+ extent_node_prof_tctx_set(node, tctx);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
+}
+
+void
+huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
+{
+
+ huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);
+}
diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c
new file mode 100644
index 0000000..07389ca
--- /dev/null
+++ b/deps/jemalloc/src/jemalloc.c
@@ -0,0 +1,2949 @@
+#define JEMALLOC_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+/* Runtime configuration options. */
+const char *je_malloc_conf
+#ifndef _WIN32
+ JEMALLOC_ATTR(weak)
+#endif
+ ;
+bool opt_abort =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+const char *opt_junk =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ "true"
+#else
+ "false"
+#endif
+ ;
+bool opt_junk_alloc =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ true
+#else
+ false
+#endif
+ ;
+bool opt_junk_free =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ true
+#else
+ false
+#endif
+ ;
+
+size_t opt_quarantine = ZU(0);
+bool opt_redzone = false;
+bool opt_utrace = false;
+bool opt_xmalloc = false;
+bool opt_zero = false;
+unsigned opt_narenas = 0;
+
+/* Initialized to true if the process is running inside Valgrind. */
+bool in_valgrind;
+
+unsigned ncpus;
+
+/* Protects arenas initialization. */
+static malloc_mutex_t arenas_lock;
+/*
+ * Arenas that are used to service external requests. Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ *
+ * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
+ * arenas. arenas[narenas_auto..narenas_total) are only used if the application
+ * takes some action to create them and allocate from them.
+ */
+arena_t **arenas;
+static unsigned narenas_total; /* Use narenas_total_*(). */
+static arena_t *a0; /* arenas[0]; read-only after initialization. */
+unsigned narenas_auto; /* Read-only after initialization. */
+
+typedef enum {
+ malloc_init_uninitialized = 3,
+ malloc_init_a0_initialized = 2,
+ malloc_init_recursible = 1,
+ malloc_init_initialized = 0 /* Common case --> jnz. */
+} malloc_init_t;
+static malloc_init_t malloc_init_state = malloc_init_uninitialized;
+
+/* False should be the common case. Set to true to trigger initialization. */
+static bool malloc_slow = true;
+
+/* When malloc_slow is true, set the corresponding bits for sanity check. */
+enum {
+ flag_opt_junk_alloc = (1U),
+ flag_opt_junk_free = (1U << 1),
+ flag_opt_quarantine = (1U << 2),
+ flag_opt_zero = (1U << 3),
+ flag_opt_utrace = (1U << 4),
+ flag_in_valgrind = (1U << 5),
+ flag_opt_xmalloc = (1U << 6)
+};
+static uint8_t malloc_slow_flags;
+
+JEMALLOC_ALIGNED(CACHELINE)
+const size_t pind2sz_tab[NPSIZES] = {
+#define PSZ_yes(lg_grp, ndelta, lg_delta) \
+ (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))),
+#define PSZ_no(lg_grp, ndelta, lg_delta)
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+ PSZ_##psz(lg_grp, ndelta, lg_delta)
+ SIZE_CLASSES
+#undef PSZ_yes
+#undef PSZ_no
+#undef SC
+};
+
+JEMALLOC_ALIGNED(CACHELINE)
+const size_t index2size_tab[NSIZES] = {
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+ ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
+ SIZE_CLASSES
+#undef SC
+};
+
+JEMALLOC_ALIGNED(CACHELINE)
+const uint8_t size2index_tab[] = {
+#if LG_TINY_MIN == 0
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_0(i) i,
+#elif LG_TINY_MIN == 1
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_1(i) i,
+#elif LG_TINY_MIN == 2
+#warning "Dangerous LG_TINY_MIN"
+#define S2B_2(i) i,
+#elif LG_TINY_MIN == 3
+#define S2B_3(i) i,
+#elif LG_TINY_MIN == 4
+#define S2B_4(i) i,
+#elif LG_TINY_MIN == 5
+#define S2B_5(i) i,
+#elif LG_TINY_MIN == 6
+#define S2B_6(i) i,
+#elif LG_TINY_MIN == 7
+#define S2B_7(i) i,
+#elif LG_TINY_MIN == 8
+#define S2B_8(i) i,
+#elif LG_TINY_MIN == 9
+#define S2B_9(i) i,
+#elif LG_TINY_MIN == 10
+#define S2B_10(i) i,
+#elif LG_TINY_MIN == 11
+#define S2B_11(i) i,
+#else
+#error "Unsupported LG_TINY_MIN"
+#endif
+#if LG_TINY_MIN < 1
+#define S2B_1(i) S2B_0(i) S2B_0(i)
+#endif
+#if LG_TINY_MIN < 2
+#define S2B_2(i) S2B_1(i) S2B_1(i)
+#endif
+#if LG_TINY_MIN < 3
+#define S2B_3(i) S2B_2(i) S2B_2(i)
+#endif
+#if LG_TINY_MIN < 4
+#define S2B_4(i) S2B_3(i) S2B_3(i)
+#endif
+#if LG_TINY_MIN < 5
+#define S2B_5(i) S2B_4(i) S2B_4(i)
+#endif
+#if LG_TINY_MIN < 6
+#define S2B_6(i) S2B_5(i) S2B_5(i)
+#endif
+#if LG_TINY_MIN < 7
+#define S2B_7(i) S2B_6(i) S2B_6(i)
+#endif
+#if LG_TINY_MIN < 8
+#define S2B_8(i) S2B_7(i) S2B_7(i)
+#endif
+#if LG_TINY_MIN < 9
+#define S2B_9(i) S2B_8(i) S2B_8(i)
+#endif
+#if LG_TINY_MIN < 10
+#define S2B_10(i) S2B_9(i) S2B_9(i)
+#endif
+#if LG_TINY_MIN < 11
+#define S2B_11(i) S2B_10(i) S2B_10(i)
+#endif
+#define S2B_no(i)
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \
+ S2B_##lg_delta_lookup(index)
+ SIZE_CLASSES
+#undef S2B_3
+#undef S2B_4
+#undef S2B_5
+#undef S2B_6
+#undef S2B_7
+#undef S2B_8
+#undef S2B_9
+#undef S2B_10
+#undef S2B_11
+#undef S2B_no
+#undef SC
+};
+
+#ifdef JEMALLOC_THREADED_INIT
+/* Used to let the initializing thread recursively allocate. */
+# define NO_INITIALIZER ((unsigned long)0)
+# define INITIALIZER pthread_self()
+# define IS_INITIALIZER (malloc_initializer == pthread_self())
+static pthread_t malloc_initializer = NO_INITIALIZER;
+#else
+# define NO_INITIALIZER false
+# define INITIALIZER true
+# define IS_INITIALIZER malloc_initializer
+static bool malloc_initializer = NO_INITIALIZER;
+#endif
+
+/* Used to avoid initialization races. */
+#ifdef _WIN32
+#if _WIN32_WINNT >= 0x0600
+static malloc_mutex_t init_lock = SRWLOCK_INIT;
+#else
+static malloc_mutex_t init_lock;
+static bool init_lock_initialized = false;
+
+JEMALLOC_ATTR(constructor)
+static void WINAPI
+_init_init_lock(void)
+{
+
+ /* If another constructor in the same binary is using mallctl to
+ * e.g. setup chunk hooks, it may end up running before this one,
+ * and malloc_init_hard will crash trying to lock the uninitialized
+ * lock. So we force an initialization of the lock in
+ * malloc_init_hard as well. We don't try to care about atomicity
+ * of the accessed to the init_lock_initialized boolean, since it
+ * really only matters early in the process creation, before any
+ * separate thread normally starts doing anything. */
+ if (!init_lock_initialized)
+ malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
+ init_lock_initialized = true;
+}
+
+#ifdef _MSC_VER
+# pragma section(".CRT$XCU", read)
+JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
+static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
+#endif
+#endif
+#else
+static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
+#endif
+
+typedef struct {
+ void *p; /* Input pointer (as in realloc(p, s)). */
+ size_t s; /* Request size. */
+ void *r; /* Result pointer. */
+} malloc_utrace_t;
+
+#ifdef JEMALLOC_UTRACE
+# define UTRACE(a, b, c) do { \
+ if (unlikely(opt_utrace)) { \
+ int utrace_serrno = errno; \
+ malloc_utrace_t ut; \
+ ut.p = (a); \
+ ut.s = (b); \
+ ut.r = (c); \
+ utrace(&ut, sizeof(ut)); \
+ errno = utrace_serrno; \
+ } \
+} while (0)
+#else
+# define UTRACE(a, b, c)
+#endif
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static bool malloc_init_hard_a0(void);
+static bool malloc_init_hard(void);
+
+/******************************************************************************/
+/*
+ * Begin miscellaneous support functions.
+ */
+
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_initialized(void)
+{
+
+ return (malloc_init_state == malloc_init_initialized);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void
+malloc_thread_init(void)
+{
+
+ /*
+ * TSD initialization can't be safely done as a side effect of
+ * deallocation, because it is possible for a thread to do nothing but
+ * deallocate its TLS data via free(), in which case writing to TLS
+ * would cause write-after-free memory corruption. The quarantine
+ * facility *only* gets used as a side effect of deallocation, so make
+ * a best effort attempt at initializing its TSD by hooking all
+ * allocation events.
+ */
+ if (config_fill && unlikely(opt_quarantine))
+ quarantine_alloc_hook();
+}
+
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_init_a0(void)
+{
+
+ if (unlikely(malloc_init_state == malloc_init_uninitialized))
+ return (malloc_init_hard_a0());
+ return (false);
+}
+
+JEMALLOC_ALWAYS_INLINE_C bool
+malloc_init(void)
+{
+
+ if (unlikely(!malloc_initialized()) && malloc_init_hard())
+ return (true);
+ malloc_thread_init();
+
+ return (false);
+}
+
+/*
+ * The a0*() functions are used instead of i{d,}alloc() in situations that
+ * cannot tolerate TLS variable access.
+ */
+
+static void *
+a0ialloc(size_t size, bool zero, bool is_metadata)
+{
+
+ if (unlikely(malloc_init_a0()))
+ return (NULL);
+
+ return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
+ is_metadata, arena_get(TSDN_NULL, 0, true), true));
+}
+
+static void
+a0idalloc(void *ptr, bool is_metadata)
+{
+
+ idalloctm(TSDN_NULL, ptr, false, is_metadata, true);
+}
+
+arena_t *
+a0get(void)
+{
+
+ return (a0);
+}
+
+void *
+a0malloc(size_t size)
+{
+
+ return (a0ialloc(size, false, true));
+}
+
+void
+a0dalloc(void *ptr)
+{
+
+ a0idalloc(ptr, true);
+}
+
+/*
+ * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
+ * situations that cannot tolerate TLS variable access (TLS allocation and very
+ * early internal data structure initialization).
+ */
+
+void *
+bootstrap_malloc(size_t size)
+{
+
+ if (unlikely(size == 0))
+ size = 1;
+
+ return (a0ialloc(size, false, false));
+}
+
+void *
+bootstrap_calloc(size_t num, size_t size)
+{
+ size_t num_size;
+
+ num_size = num * size;
+ if (unlikely(num_size == 0)) {
+ assert(num == 0 || size == 0);
+ num_size = 1;
+ }
+
+ return (a0ialloc(num_size, true, false));
+}
+
+void
+bootstrap_free(void *ptr)
+{
+
+ if (unlikely(ptr == NULL))
+ return;
+
+ a0idalloc(ptr, false);
+}
+
+static void
+arena_set(unsigned ind, arena_t *arena)
+{
+
+ atomic_write_p((void **)&arenas[ind], arena);
+}
+
+static void
+narenas_total_set(unsigned narenas)
+{
+
+ atomic_write_u(&narenas_total, narenas);
+}
+
+static void
+narenas_total_inc(void)
+{
+
+ atomic_add_u(&narenas_total, 1);
+}
+
+unsigned
+narenas_total_get(void)
+{
+
+ return (atomic_read_u(&narenas_total));
+}
+
+/* Create a new arena and insert it into the arenas array at index ind. */
+static arena_t *
+arena_init_locked(tsdn_t *tsdn, unsigned ind)
+{
+ arena_t *arena;
+
+ assert(ind <= narenas_total_get());
+ if (ind > MALLOCX_ARENA_MAX)
+ return (NULL);
+ if (ind == narenas_total_get())
+ narenas_total_inc();
+
+ /*
+ * Another thread may have already initialized arenas[ind] if it's an
+ * auto arena.
+ */
+ arena = arena_get(tsdn, ind, false);
+ if (arena != NULL) {
+ assert(ind < narenas_auto);
+ return (arena);
+ }
+
+ /* Actually initialize the arena. */
+ arena = arena_new(tsdn, ind);
+ arena_set(ind, arena);
+ return (arena);
+}
+
+arena_t *
+arena_init(tsdn_t *tsdn, unsigned ind)
+{
+ arena_t *arena;
+
+ malloc_mutex_lock(tsdn, &arenas_lock);
+ arena = arena_init_locked(tsdn, ind);
+ malloc_mutex_unlock(tsdn, &arenas_lock);
+ return (arena);
+}
+
+static void
+arena_bind(tsd_t *tsd, unsigned ind, bool internal)
+{
+ arena_t *arena;
+
+ if (!tsd_nominal(tsd))
+ return;
+
+ arena = arena_get(tsd_tsdn(tsd), ind, false);
+ arena_nthreads_inc(arena, internal);
+
+ if (internal)
+ tsd_iarena_set(tsd, arena);
+ else
+ tsd_arena_set(tsd, arena);
+}
+
+void
+arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
+{
+ arena_t *oldarena, *newarena;
+
+ oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
+ newarena = arena_get(tsd_tsdn(tsd), newind, false);
+ arena_nthreads_dec(oldarena, false);
+ arena_nthreads_inc(newarena, false);
+ tsd_arena_set(tsd, newarena);
+}
+
+static void
+arena_unbind(tsd_t *tsd, unsigned ind, bool internal)
+{
+ arena_t *arena;
+
+ arena = arena_get(tsd_tsdn(tsd), ind, false);
+ arena_nthreads_dec(arena, internal);
+ if (internal)
+ tsd_iarena_set(tsd, NULL);
+ else
+ tsd_arena_set(tsd, NULL);
+}
+
+arena_tdata_t *
+arena_tdata_get_hard(tsd_t *tsd, unsigned ind)
+{
+ arena_tdata_t *tdata, *arenas_tdata_old;
+ arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
+ unsigned narenas_tdata_old, i;
+ unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
+ unsigned narenas_actual = narenas_total_get();
+
+ /*
+ * Dissociate old tdata array (and set up for deallocation upon return)
+ * if it's too small.
+ */
+ if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
+ arenas_tdata_old = arenas_tdata;
+ narenas_tdata_old = narenas_tdata;
+ arenas_tdata = NULL;
+ narenas_tdata = 0;
+ tsd_arenas_tdata_set(tsd, arenas_tdata);
+ tsd_narenas_tdata_set(tsd, narenas_tdata);
+ } else {
+ arenas_tdata_old = NULL;
+ narenas_tdata_old = 0;
+ }
+
+ /* Allocate tdata array if it's missing. */
+ if (arenas_tdata == NULL) {
+ bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
+ narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
+
+ if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
+ *arenas_tdata_bypassp = true;
+ arenas_tdata = (arena_tdata_t *)a0malloc(
+ sizeof(arena_tdata_t) * narenas_tdata);
+ *arenas_tdata_bypassp = false;
+ }
+ if (arenas_tdata == NULL) {
+ tdata = NULL;
+ goto label_return;
+ }
+ assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
+ tsd_arenas_tdata_set(tsd, arenas_tdata);
+ tsd_narenas_tdata_set(tsd, narenas_tdata);
+ }
+
+ /*
+ * Copy to tdata array. It's possible that the actual number of arenas
+ * has increased since narenas_total_get() was called above, but that
+ * causes no correctness issues unless two threads concurrently execute
+ * the arenas.extend mallctl, which we trust mallctl synchronization to
+ * prevent.
+ */
+
+ /* Copy/initialize tickers. */
+ for (i = 0; i < narenas_actual; i++) {
+ if (i < narenas_tdata_old) {
+ ticker_copy(&arenas_tdata[i].decay_ticker,
+ &arenas_tdata_old[i].decay_ticker);
+ } else {
+ ticker_init(&arenas_tdata[i].decay_ticker,
+ DECAY_NTICKS_PER_UPDATE);
+ }
+ }
+ if (narenas_tdata > narenas_actual) {
+ memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
+ * (narenas_tdata - narenas_actual));
+ }
+
+ /* Read the refreshed tdata array. */
+ tdata = &arenas_tdata[ind];
+label_return:
+ if (arenas_tdata_old != NULL)
+ a0dalloc(arenas_tdata_old);
+ return (tdata);
+}
+
+/* Slow path, called only by arena_choose(). */
+arena_t *
+arena_choose_hard(tsd_t *tsd, bool internal)
+{
+ arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+
+ if (narenas_auto > 1) {
+ unsigned i, j, choose[2], first_null;
+
+ /*
+ * Determine binding for both non-internal and internal
+ * allocation.
+ *
+ * choose[0]: For application allocation.
+ * choose[1]: For internal metadata allocation.
+ */
+
+ for (j = 0; j < 2; j++)
+ choose[j] = 0;
+
+ first_null = narenas_auto;
+ malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
+ assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
+ for (i = 1; i < narenas_auto; i++) {
+ if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
+ /*
+ * Choose the first arena that has the lowest
+ * number of threads assigned to it.
+ */
+ for (j = 0; j < 2; j++) {
+ if (arena_nthreads_get(arena_get(
+ tsd_tsdn(tsd), i, false), !!j) <
+ arena_nthreads_get(arena_get(
+ tsd_tsdn(tsd), choose[j], false),
+ !!j))
+ choose[j] = i;
+ }
+ } else if (first_null == narenas_auto) {
+ /*
+ * Record the index of the first uninitialized
+ * arena, in case all extant arenas are in use.
+ *
+ * NB: It is possible for there to be
+ * discontinuities in terms of initialized
+ * versus uninitialized arenas, due to the
+ * "thread.arena" mallctl.
+ */
+ first_null = i;
+ }
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
+ choose[j], false), !!j) == 0 || first_null ==
+ narenas_auto) {
+ /*
+ * Use an unloaded arena, or the least loaded
+ * arena if all arenas are already initialized.
+ */
+ if (!!j == internal) {
+ ret = arena_get(tsd_tsdn(tsd),
+ choose[j], false);
+ }
+ } else {
+ arena_t *arena;
+
+ /* Initialize a new arena. */
+ choose[j] = first_null;
+ arena = arena_init_locked(tsd_tsdn(tsd),
+ choose[j]);
+ if (arena == NULL) {
+ malloc_mutex_unlock(tsd_tsdn(tsd),
+ &arenas_lock);
+ return (NULL);
+ }
+ if (!!j == internal)
+ ret = arena;
+ }
+ arena_bind(tsd, choose[j], !!j);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
+ } else {
+ ret = arena_get(tsd_tsdn(tsd), 0, false);
+ arena_bind(tsd, 0, false);
+ arena_bind(tsd, 0, true);
+ }
+
+ return (ret);
+}
+
+void
+thread_allocated_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+thread_deallocated_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+iarena_cleanup(tsd_t *tsd)
+{
+ arena_t *iarena;
+
+ iarena = tsd_iarena_get(tsd);
+ if (iarena != NULL)
+ arena_unbind(tsd, iarena->ind, true);
+}
+
+void
+arena_cleanup(tsd_t *tsd)
+{
+ arena_t *arena;
+
+ arena = tsd_arena_get(tsd);
+ if (arena != NULL)
+ arena_unbind(tsd, arena->ind, false);
+}
+
+void
+arenas_tdata_cleanup(tsd_t *tsd)
+{
+ arena_tdata_t *arenas_tdata;
+
+ /* Prevent tsd->arenas_tdata from being (re)created. */
+ *tsd_arenas_tdata_bypassp_get(tsd) = true;
+
+ arenas_tdata = tsd_arenas_tdata_get(tsd);
+ if (arenas_tdata != NULL) {
+ tsd_arenas_tdata_set(tsd, NULL);
+ a0dalloc(arenas_tdata);
+ }
+}
+
+void
+narenas_tdata_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+arenas_tdata_bypass_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+static void
+stats_print_atexit(void)
+{
+
+ if (config_tcache && config_stats) {
+ tsdn_t *tsdn;
+ unsigned narenas, i;
+
+ tsdn = tsdn_fetch();
+
+ /*
+ * Merge stats from extant threads. This is racy, since
+ * individual threads do not lock when recording tcache stats
+ * events. As a consequence, the final stats may be slightly
+ * out of date by the time they are reported, if other threads
+ * continue to allocate.
+ */
+ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+ arena_t *arena = arena_get(tsdn, i, false);
+ if (arena != NULL) {
+ tcache_t *tcache;
+
+ /*
+ * tcache_stats_merge() locks bins, so if any
+ * code is introduced that acquires both arena
+ * and bin locks in the opposite order,
+ * deadlocks may result.
+ */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ ql_foreach(tcache, &arena->tcache_ql, link) {
+ tcache_stats_merge(tsdn, tcache, arena);
+ }
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ }
+ }
+ }
+ je_malloc_stats_print(NULL, NULL, NULL);
+}
+
+/*
+ * End miscellaneous support functions.
+ */
+/******************************************************************************/
+/*
+ * Begin initialization functions.
+ */
+
+#ifndef JEMALLOC_HAVE_SECURE_GETENV
+static char *
+secure_getenv(const char *name)
+{
+
+# ifdef JEMALLOC_HAVE_ISSETUGID
+ if (issetugid() != 0)
+ return (NULL);
+# endif
+ return (getenv(name));
+}
+#endif
+
+static unsigned
+malloc_ncpus(void)
+{
+ long result;
+
+#ifdef _WIN32
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ result = si.dwNumberOfProcessors;
+#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
+ /*
+ * glibc >= 2.6 has the CPU_COUNT macro.
+ *
+ * glibc's sysconf() uses isspace(). glibc allocates for the first time
+ * *before* setting up the isspace tables. Therefore we need a
+ * different method to get the number of CPUs.
+ */
+ {
+ cpu_set_t set;
+
+ pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
+ result = CPU_COUNT(&set);
+ }
+#else
+ result = sysconf(_SC_NPROCESSORS_ONLN);
+#endif
+ return ((result == -1) ? 1 : (unsigned)result);
+}
+
+static bool
+malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
+ char const **v_p, size_t *vlen_p)
+{
+ bool accept;
+ const char *opts = *opts_p;
+
+ *k_p = opts;
+
+ for (accept = false; !accept;) {
+ switch (*opts) {
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
+ case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
+ case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
+ case 'Y': case 'Z':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
+ case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
+ case 's': case 't': case 'u': case 'v': case 'w': case 'x':
+ case 'y': case 'z':
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ case '_':
+ opts++;
+ break;
+ case ':':
+ opts++;
+ *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
+ *v_p = opts;
+ accept = true;
+ break;
+ case '\0':
+ if (opts != *opts_p) {
+ malloc_write("<jemalloc>: Conf string ends "
+ "with key\n");
+ }
+ return (true);
+ default:
+ malloc_write("<jemalloc>: Malformed conf string\n");
+ return (true);
+ }
+ }
+
+ for (accept = false; !accept;) {
+ switch (*opts) {
+ case ',':
+ opts++;
+ /*
+ * Look ahead one character here, because the next time
+ * this function is called, it will assume that end of
+ * input has been cleanly reached if no input remains,
+ * but we have optimistically already consumed the
+ * comma if one exists.
+ */
+ if (*opts == '\0') {
+ malloc_write("<jemalloc>: Conf string ends "
+ "with comma\n");
+ }
+ *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
+ accept = true;
+ break;
+ case '\0':
+ *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
+ accept = true;
+ break;
+ default:
+ opts++;
+ break;
+ }
+ }
+
+ *opts_p = opts;
+ return (false);
+}
+
+static void
+malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
+ size_t vlen)
+{
+
+ malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
+ (int)vlen, v);
+}
+
+static void
+malloc_slow_flag_init(void)
+{
+ /*
+ * Combine the runtime options into malloc_slow for fast path. Called
+ * after processing all the options.
+ */
+ malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
+ | (opt_junk_free ? flag_opt_junk_free : 0)
+ | (opt_quarantine ? flag_opt_quarantine : 0)
+ | (opt_zero ? flag_opt_zero : 0)
+ | (opt_utrace ? flag_opt_utrace : 0)
+ | (opt_xmalloc ? flag_opt_xmalloc : 0);
+
+ if (config_valgrind)
+ malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0);
+
+ malloc_slow = (malloc_slow_flags != 0);
+}
+
+static void
+malloc_conf_init(void)
+{
+ unsigned i;
+ char buf[PATH_MAX + 1];
+ const char *opts, *k, *v;
+ size_t klen, vlen;
+
+ /*
+ * Automatically configure valgrind before processing options. The
+ * valgrind option remains in jemalloc 3.x for compatibility reasons.
+ */
+ if (config_valgrind) {
+ in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
+ if (config_fill && unlikely(in_valgrind)) {
+ opt_junk = "false";
+ opt_junk_alloc = false;
+ opt_junk_free = false;
+ assert(!opt_zero);
+ opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
+ opt_redzone = true;
+ }
+ if (config_tcache && unlikely(in_valgrind))
+ opt_tcache = false;
+ }
+
+ for (i = 0; i < 4; i++) {
+ /* Get runtime configuration. */
+ switch (i) {
+ case 0:
+ opts = config_malloc_conf;
+ break;
+ case 1:
+ if (je_malloc_conf != NULL) {
+ /*
+ * Use options that were compiled into the
+ * program.
+ */
+ opts = je_malloc_conf;
+ } else {
+ /* No configuration specified. */
+ buf[0] = '\0';
+ opts = buf;
+ }
+ break;
+ case 2: {
+ ssize_t linklen = 0;
+#ifndef _WIN32
+ int saved_errno = errno;
+ const char *linkname =
+# ifdef JEMALLOC_PREFIX
+ "/etc/"JEMALLOC_PREFIX"malloc.conf"
+# else
+ "/etc/malloc.conf"
+# endif
+ ;
+
+ /*
+ * Try to use the contents of the "/etc/malloc.conf"
+ * symbolic link's name.
+ */
+ linklen = readlink(linkname, buf, sizeof(buf) - 1);
+ if (linklen == -1) {
+ /* No configuration specified. */
+ linklen = 0;
+ /* Restore errno. */
+ set_errno(saved_errno);
+ }
+#endif
+ buf[linklen] = '\0';
+ opts = buf;
+ break;
+ } case 3: {
+ const char *envname =
+#ifdef JEMALLOC_PREFIX
+ JEMALLOC_CPREFIX"MALLOC_CONF"
+#else
+ "MALLOC_CONF"
+#endif
+ ;
+
+ if ((opts = secure_getenv(envname)) != NULL) {
+ /*
+ * Do nothing; opts is already initialized to
+ * the value of the MALLOC_CONF environment
+ * variable.
+ */
+ } else {
+ /* No configuration specified. */
+ buf[0] = '\0';
+ opts = buf;
+ }
+ break;
+ } default:
+ not_reached();
+ buf[0] = '\0';
+ opts = buf;
+ }
+
+ while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
+ &vlen)) {
+#define CONF_MATCH(n) \
+ (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
+#define CONF_MATCH_VALUE(n) \
+ (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
+#define CONF_HANDLE_BOOL(o, n, cont) \
+ if (CONF_MATCH(n)) { \
+ if (CONF_MATCH_VALUE("true")) \
+ o = true; \
+ else if (CONF_MATCH_VALUE("false")) \
+ o = false; \
+ else { \
+ malloc_conf_error( \
+ "Invalid conf value", \
+ k, klen, v, vlen); \
+ } \
+ if (cont) \
+ continue; \
+ }
+#define CONF_MIN_no(um, min) false
+#define CONF_MIN_yes(um, min) ((um) < (min))
+#define CONF_MAX_no(um, max) false
+#define CONF_MAX_yes(um, max) ((um) > (max))
+#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
+ if (CONF_MATCH(n)) { \
+ uintmax_t um; \
+ char *end; \
+ \
+ set_errno(0); \
+ um = malloc_strtoumax(v, &end, 0); \
+ if (get_errno() != 0 || (uintptr_t)end -\
+ (uintptr_t)v != vlen) { \
+ malloc_conf_error( \
+ "Invalid conf value", \
+ k, klen, v, vlen); \
+ } else if (clip) { \
+ if (CONF_MIN_##check_min(um, \
+ (min))) \
+ o = (t)(min); \
+ else if (CONF_MAX_##check_max( \
+ um, (max))) \
+ o = (t)(max); \
+ else \
+ o = (t)um; \
+ } else { \
+ if (CONF_MIN_##check_min(um, \
+ (min)) || \
+ CONF_MAX_##check_max(um, \
+ (max))) { \
+ malloc_conf_error( \
+ "Out-of-range " \
+ "conf value", \
+ k, klen, v, vlen); \
+ } else \
+ o = (t)um; \
+ } \
+ continue; \
+ }
+#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
+ clip) \
+ CONF_HANDLE_T_U(unsigned, o, n, min, max, \
+ check_min, check_max, clip)
+#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
+ CONF_HANDLE_T_U(size_t, o, n, min, max, \
+ check_min, check_max, clip)
+#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
+ if (CONF_MATCH(n)) { \
+ long l; \
+ char *end; \
+ \
+ set_errno(0); \
+ l = strtol(v, &end, 0); \
+ if (get_errno() != 0 || (uintptr_t)end -\
+ (uintptr_t)v != vlen) { \
+ malloc_conf_error( \
+ "Invalid conf value", \
+ k, klen, v, vlen); \
+ } else if (l < (ssize_t)(min) || l > \
+ (ssize_t)(max)) { \
+ malloc_conf_error( \
+ "Out-of-range conf value", \
+ k, klen, v, vlen); \
+ } else \
+ o = l; \
+ continue; \
+ }
+#define CONF_HANDLE_CHAR_P(o, n, d) \
+ if (CONF_MATCH(n)) { \
+ size_t cpylen = (vlen <= \
+ sizeof(o)-1) ? vlen : \
+ sizeof(o)-1; \
+ strncpy(o, v, cpylen); \
+ o[cpylen] = '\0'; \
+ continue; \
+ }
+
+ CONF_HANDLE_BOOL(opt_abort, "abort", true)
+ /*
+ * Chunks always require at least one header page,
+ * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
+ * possibly an additional page in the presence of
+ * redzones. In order to simplify options processing,
+ * use a conservative bound that accommodates all these
+ * constraints.
+ */
+ CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
+ LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
+ (sizeof(size_t) << 3) - 1, yes, yes, true)
+ if (strncmp("dss", k, klen) == 0) {
+ int i;
+ bool match = false;
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strncmp(dss_prec_names[i], v, vlen)
+ == 0) {
+ if (chunk_dss_prec_set(i)) {
+ malloc_conf_error(
+ "Error setting dss",
+ k, klen, v, vlen);
+ } else {
+ opt_dss =
+ dss_prec_names[i];
+ match = true;
+ break;
+ }
+ }
+ }
+ if (!match) {
+ malloc_conf_error("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ continue;
+ }
+ CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
+ UINT_MAX, yes, no, false)
+ if (strncmp("purge", k, klen) == 0) {
+ int i;
+ bool match = false;
+ for (i = 0; i < purge_mode_limit; i++) {
+ if (strncmp(purge_mode_names[i], v,
+ vlen) == 0) {
+ opt_purge = (purge_mode_t)i;
+ match = true;
+ break;
+ }
+ }
+ if (!match) {
+ malloc_conf_error("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ continue;
+ }
+ CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
+ -1, (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1,
+ NSTIME_SEC_MAX);
+ CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
+ if (config_fill) {
+ if (CONF_MATCH("junk")) {
+ if (CONF_MATCH_VALUE("true")) {
+ if (config_valgrind &&
+ unlikely(in_valgrind)) {
+ malloc_conf_error(
+ "Deallocation-time "
+ "junk filling cannot "
+ "be enabled while "
+ "running inside "
+ "Valgrind", k, klen, v,
+ vlen);
+ } else {
+ opt_junk = "true";
+ opt_junk_alloc = true;
+ opt_junk_free = true;
+ }
+ } else if (CONF_MATCH_VALUE("false")) {
+ opt_junk = "false";
+ opt_junk_alloc = opt_junk_free =
+ false;
+ } else if (CONF_MATCH_VALUE("alloc")) {
+ opt_junk = "alloc";
+ opt_junk_alloc = true;
+ opt_junk_free = false;
+ } else if (CONF_MATCH_VALUE("free")) {
+ if (config_valgrind &&
+ unlikely(in_valgrind)) {
+ malloc_conf_error(
+ "Deallocation-time "
+ "junk filling cannot "
+ "be enabled while "
+ "running inside "
+ "Valgrind", k, klen, v,
+ vlen);
+ } else {
+ opt_junk = "free";
+ opt_junk_alloc = false;
+ opt_junk_free = true;
+ }
+ } else {
+ malloc_conf_error(
+ "Invalid conf value", k,
+ klen, v, vlen);
+ }
+ continue;
+ }
+ CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
+ 0, SIZE_T_MAX, no, no, false)
+ CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
+ CONF_HANDLE_BOOL(opt_zero, "zero", true)
+ }
+ if (config_utrace) {
+ CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
+ }
+ if (config_xmalloc) {
+ CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
+ }
+ if (config_tcache) {
+ CONF_HANDLE_BOOL(opt_tcache, "tcache",
+ !config_valgrind || !in_valgrind)
+ if (CONF_MATCH("tcache")) {
+ assert(config_valgrind && in_valgrind);
+ if (opt_tcache) {
+ opt_tcache = false;
+ malloc_conf_error(
+ "tcache cannot be enabled "
+ "while running inside Valgrind",
+ k, klen, v, vlen);
+ }
+ continue;
+ }
+ CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
+ "lg_tcache_max", -1,
+ (sizeof(size_t) << 3) - 1)
+ }
+ if (config_prof) {
+ CONF_HANDLE_BOOL(opt_prof, "prof", true)
+ CONF_HANDLE_CHAR_P(opt_prof_prefix,
+ "prof_prefix", "jeprof")
+ CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
+ true)
+ CONF_HANDLE_BOOL(opt_prof_thread_active_init,
+ "prof_thread_active_init", true)
+ CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
+ "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
+ - 1, no, yes, true)
+ CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
+ true)
+ CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
+ "lg_prof_interval", -1,
+ (sizeof(uint64_t) << 3) - 1)
+ CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
+ true)
+ CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
+ true)
+ CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
+ true)
+ }
+ malloc_conf_error("Invalid conf pair", k, klen, v,
+ vlen);
+#undef CONF_MATCH
+#undef CONF_MATCH_VALUE
+#undef CONF_HANDLE_BOOL
+#undef CONF_MIN_no
+#undef CONF_MIN_yes
+#undef CONF_MAX_no
+#undef CONF_MAX_yes
+#undef CONF_HANDLE_T_U
+#undef CONF_HANDLE_UNSIGNED
+#undef CONF_HANDLE_SIZE_T
+#undef CONF_HANDLE_SSIZE_T
+#undef CONF_HANDLE_CHAR_P
+ }
+ }
+}
+
+static bool
+malloc_init_hard_needed(void)
+{
+
+ if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
+ malloc_init_recursible)) {
+ /*
+ * Another thread initialized the allocator before this one
+ * acquired init_lock, or this thread is the initializing
+ * thread, and it is recursively allocating.
+ */
+ return (false);
+ }
+#ifdef JEMALLOC_THREADED_INIT
+ if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
+ spin_t spinner;
+
+ /* Busy-wait until the initializing thread completes. */
+ spin_init(&spinner);
+ do {
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ spin_adaptive(&spinner);
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
+ } while (!malloc_initialized());
+ return (false);
+ }
+#endif
+ return (true);
+}
+
+static bool
+malloc_init_hard_a0_locked()
+{
+
+ malloc_initializer = INITIALIZER;
+
+ if (config_prof)
+ prof_boot0();
+ malloc_conf_init();
+ if (opt_stats_print) {
+ /* Print statistics at exit. */
+ if (atexit(stats_print_atexit) != 0) {
+ malloc_write("<jemalloc>: Error in atexit()\n");
+ if (opt_abort)
+ abort();
+ }
+ }
+ pages_boot();
+ if (base_boot())
+ return (true);
+ if (chunk_boot())
+ return (true);
+ if (ctl_boot())
+ return (true);
+ if (config_prof)
+ prof_boot1();
+ arena_boot();
+ if (config_tcache && tcache_boot(TSDN_NULL))
+ return (true);
+ if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
+ return (true);
+ /*
+ * Create enough scaffolding to allow recursive allocation in
+ * malloc_ncpus().
+ */
+ narenas_auto = 1;
+ narenas_total_set(narenas_auto);
+ arenas = &a0;
+ memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
+ /*
+ * Initialize one arena here. The rest are lazily created in
+ * arena_choose_hard().
+ */
+ if (arena_init(TSDN_NULL, 0) == NULL)
+ return (true);
+
+ malloc_init_state = malloc_init_a0_initialized;
+
+ return (false);
+}
+
+static bool
+malloc_init_hard_a0(void)
+{
+ bool ret;
+
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
+ ret = malloc_init_hard_a0_locked();
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ return (ret);
+}
+
+/* Initialize data structures which may trigger recursive allocation. */
+static bool
+malloc_init_hard_recursible(void)
+{
+
+ malloc_init_state = malloc_init_recursible;
+
+ ncpus = malloc_ncpus();
+
+#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
+ && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
+ !defined(__native_client__))
+ /* LinuxThreads' pthread_atfork() allocates. */
+ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
+ jemalloc_postfork_child) != 0) {
+ malloc_write("<jemalloc>: Error in pthread_atfork()\n");
+ if (opt_abort)
+ abort();
+ return (true);
+ }
+#endif
+
+ return (false);
+}
+
+static bool
+malloc_init_hard_finish(tsdn_t *tsdn)
+{
+
+ if (malloc_mutex_boot())
+ return (true);
+
+ if (opt_narenas == 0) {
+ /*
+ * For SMP systems, create more than one arena per CPU by
+ * default.
+ */
+ if (ncpus > 1)
+ opt_narenas = ncpus << 2;
+ else
+ opt_narenas = 1;
+ }
+ narenas_auto = opt_narenas;
+ /*
+ * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
+ */
+ if (narenas_auto > MALLOCX_ARENA_MAX) {
+ narenas_auto = MALLOCX_ARENA_MAX;
+ malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
+ narenas_auto);
+ }
+ narenas_total_set(narenas_auto);
+
+ /* Allocate and initialize arenas. */
+ arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) *
+ (MALLOCX_ARENA_MAX+1));
+ if (arenas == NULL)
+ return (true);
+ /* Copy the pointer to the one arena that was already initialized. */
+ arena_set(0, a0);
+
+ malloc_init_state = malloc_init_initialized;
+ malloc_slow_flag_init();
+
+ return (false);
+}
+
+static bool
+malloc_init_hard(void)
+{
+ tsd_t *tsd;
+
+#if defined(_WIN32) && _WIN32_WINNT < 0x0600
+ _init_init_lock();
+#endif
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
+ if (!malloc_init_hard_needed()) {
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ return (false);
+ }
+
+ if (malloc_init_state != malloc_init_a0_initialized &&
+ malloc_init_hard_a0_locked()) {
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ return (true);
+ }
+
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ /* Recursive allocation relies on functional tsd. */
+ tsd = malloc_tsd_boot0();
+ if (tsd == NULL)
+ return (true);
+ if (malloc_init_hard_recursible())
+ return (true);
+ malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
+
+ if (config_prof && prof_boot2(tsd)) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+ return (true);
+ }
+
+ if (malloc_init_hard_finish(tsd_tsdn(tsd))) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+ return (true);
+ }
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+ malloc_tsd_boot1();
+ return (false);
+}
+
+/*
+ * End initialization functions.
+ */
+/******************************************************************************/
+/*
+ * Begin malloc(3)-compatible functions.
+ */
+
+static void *
+ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
+ prof_tctx_t *tctx, bool slow_path)
+{
+ void *p;
+
+ if (tctx == NULL)
+ return (NULL);
+ if (usize <= SMALL_MAXCLASS) {
+ szind_t ind_large = size2index(LARGE_MINCLASS);
+ p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ } else
+ p = ialloc(tsd, usize, ind, zero, slow_path);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path)
+{
+ void *p;
+ prof_tctx_t *tctx;
+
+ tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path);
+ else
+ p = ialloc(tsd, usize, ind, zero, slow_path);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ return (NULL);
+ }
+ prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
+
+ return (p);
+}
+
+/*
+ * ialloc_body() is inlined so that fast and slow paths are generated separately
+ * with statically known slow_path.
+ *
+ * This function guarantees that *tsdn is non-NULL on success.
+ */
+JEMALLOC_ALWAYS_INLINE_C void *
+ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
+ bool slow_path)
+{
+ tsd_t *tsd;
+ szind_t ind;
+
+ if (slow_path && unlikely(malloc_init())) {
+ *tsdn = NULL;
+ return (NULL);
+ }
+
+ tsd = tsd_fetch();
+ *tsdn = tsd_tsdn(tsd);
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ ind = size2index(size);
+ if (unlikely(ind >= NSIZES))
+ return (NULL);
+
+ if (config_stats || (config_prof && opt_prof) || (slow_path &&
+ config_valgrind && unlikely(in_valgrind))) {
+ *usize = index2size(ind);
+ assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
+ }
+
+ if (config_prof && opt_prof)
+ return (ialloc_prof(tsd, *usize, ind, zero, slow_path));
+
+ return (ialloc(tsd, size, ind, zero, slow_path));
+}
+
+JEMALLOC_ALWAYS_INLINE_C void
+ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
+ bool update_errno, bool slow_path)
+{
+
+ assert(!tsdn_null(tsdn) || ret == NULL);
+
+ if (unlikely(ret == NULL)) {
+ if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_printf("<jemalloc>: Error in %s(): out of "
+ "memory\n", func);
+ abort();
+ }
+ if (update_errno)
+ set_errno(ENOMEM);
+ }
+ if (config_stats && likely(ret != NULL)) {
+ assert(usize == isalloc(tsdn, ret, config_prof));
+ *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
+ }
+ witness_assert_lockless(tsdn);
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
+je_malloc(size_t size)
+{
+ void *ret;
+ tsdn_t *tsdn;
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+
+ if (size == 0)
+ size = 1;
+
+ if (likely(!malloc_slow)) {
+ ret = ialloc_body(size, false, &tsdn, &usize, false);
+ ialloc_post_check(ret, tsdn, usize, "malloc", true, false);
+ } else {
+ ret = ialloc_body(size, false, &tsdn, &usize, true);
+ ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
+ UTRACE(0, size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
+ }
+
+ return (ret);
+}
+
+static void *
+imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
+ prof_tctx_t *tctx)
+{
+ void *p;
+
+ if (tctx == NULL)
+ return (NULL);
+ if (usize <= SMALL_MAXCLASS) {
+ assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
+ p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ } else
+ p = ipalloc(tsd, usize, alignment, false);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
+{
+ void *p;
+ prof_tctx_t *tctx;
+
+ tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = imemalign_prof_sample(tsd, alignment, usize, tctx);
+ else
+ p = ipalloc(tsd, usize, alignment, false);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ return (NULL);
+ }
+ prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
+
+ return (p);
+}
+
+JEMALLOC_ATTR(nonnull(1))
+static int
+imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
+{
+ int ret;
+ tsd_t *tsd;
+ size_t usize;
+ void *result;
+
+ assert(min_alignment != 0);
+
+ if (unlikely(malloc_init())) {
+ tsd = NULL;
+ result = NULL;
+ goto label_oom;
+ }
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+ if (size == 0)
+ size = 1;
+
+ /* Make sure that alignment is a large enough power of 2. */
+ if (unlikely(((alignment - 1) & alignment) != 0
+ || (alignment < min_alignment))) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error allocating "
+ "aligned memory: invalid alignment\n");
+ abort();
+ }
+ result = NULL;
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ usize = sa2u(size, alignment);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) {
+ result = NULL;
+ goto label_oom;
+ }
+
+ if (config_prof && opt_prof)
+ result = imemalign_prof(tsd, alignment, usize);
+ else
+ result = ipalloc(tsd, usize, alignment, false);
+ if (unlikely(result == NULL))
+ goto label_oom;
+ assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
+
+ *memptr = result;
+ ret = 0;
+label_return:
+ if (config_stats && likely(result != NULL)) {
+ assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof));
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ }
+ UTRACE(0, size, result);
+ JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
+ false);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (ret);
+label_oom:
+ assert(result == NULL);
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error allocating aligned memory: "
+ "out of memory\n");
+ abort();
+ }
+ ret = ENOMEM;
+ witness_assert_lockless(tsd_tsdn(tsd));
+ goto label_return;
+}
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+JEMALLOC_ATTR(nonnull(1))
+je_posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+ int ret;
+
+ ret = imemalign(memptr, alignment, size, sizeof(void *));
+
+ return (ret);
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
+je_aligned_alloc(size_t alignment, size_t size)
+{
+ void *ret;
+ int err;
+
+ if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
+ ret = NULL;
+ set_errno(err);
+ }
+
+ return (ret);
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
+je_calloc(size_t num, size_t size)
+{
+ void *ret;
+ tsdn_t *tsdn;
+ size_t num_size;
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+
+ num_size = num * size;
+ if (unlikely(num_size == 0)) {
+ if (num == 0 || size == 0)
+ num_size = 1;
+ else
+ num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */
+ /*
+ * Try to avoid division here. We know that it isn't possible to
+ * overflow during multiplication if neither operand uses any of the
+ * most significant half of the bits in a size_t.
+ */
+ } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
+ 2))) && (num_size / size != num)))
+ num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */
+
+ if (likely(!malloc_slow)) {
+ ret = ialloc_body(num_size, true, &tsdn, &usize, false);
+ ialloc_post_check(ret, tsdn, usize, "calloc", true, false);
+ } else {
+ ret = ialloc_body(num_size, true, &tsdn, &usize, true);
+ ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
+ UTRACE(0, num_size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true);
+ }
+
+ return (ret);
+}
+
+static void *
+irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
+ prof_tctx_t *tctx)
+{
+ void *p;
+
+ if (tctx == NULL)
+ return (NULL);
+ if (usize <= SMALL_MAXCLASS) {
+ p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ } else
+ p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
+{
+ void *p;
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
+
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
+ tctx = prof_alloc_prep(tsd, usize, prof_active, true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
+ p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
+ else
+ p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ return (NULL);
+ }
+ prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
+ old_tctx);
+
+ return (p);
+}
+
+JEMALLOC_INLINE_C void
+ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
+{
+ size_t usize;
+ UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ if (config_prof && opt_prof) {
+ usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ prof_free(tsd, ptr, usize);
+ } else if (config_stats || config_valgrind)
+ usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ if (config_stats)
+ *tsd_thread_deallocatedp_get(tsd) += usize;
+
+ if (likely(!slow_path))
+ iqalloc(tsd, ptr, tcache, false);
+ else {
+ if (config_valgrind && unlikely(in_valgrind))
+ rzsize = p2rz(tsd_tsdn(tsd), ptr);
+ iqalloc(tsd, ptr, tcache, true);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+ }
+}
+
+JEMALLOC_INLINE_C void
+isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
+{
+ UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ if (config_prof && opt_prof)
+ prof_free(tsd, ptr, usize);
+ if (config_stats)
+ *tsd_thread_deallocatedp_get(tsd) += usize;
+ if (config_valgrind && unlikely(in_valgrind))
+ rzsize = p2rz(tsd_tsdn(tsd), ptr);
+ isqalloc(tsd, ptr, usize, tcache, slow_path);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
+je_realloc(void *ptr, size_t size)
+{
+ void *ret;
+ tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+ size_t old_usize = 0;
+ UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+ if (unlikely(size == 0)) {
+ if (ptr != NULL) {
+ tsd_t *tsd;
+
+ /* realloc(ptr, 0) is equivalent to free(ptr). */
+ UTRACE(ptr, 0, 0);
+ tsd = tsd_fetch();
+ ifree(tsd, ptr, tcache_get(tsd, false), true);
+ return (NULL);
+ }
+ size = 1;
+ }
+
+ if (likely(ptr != NULL)) {
+ tsd_t *tsd;
+
+ assert(malloc_initialized() || IS_INITIALIZER);
+ malloc_thread_init();
+ tsd = tsd_fetch();
+
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ if (config_valgrind && unlikely(in_valgrind)) {
+ old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
+ u2rz(old_usize);
+ }
+
+ if (config_prof && opt_prof) {
+ usize = s2u(size);
+ ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ?
+ NULL : irealloc_prof(tsd, ptr, old_usize, usize);
+ } else {
+ if (config_stats || (config_valgrind &&
+ unlikely(in_valgrind)))
+ usize = s2u(size);
+ ret = iralloc(tsd, ptr, old_usize, size, 0, false);
+ }
+ tsdn = tsd_tsdn(tsd);
+ } else {
+ /* realloc(NULL, size) is equivalent to malloc(size). */
+ if (likely(!malloc_slow))
+ ret = ialloc_body(size, false, &tsdn, &usize, false);
+ else
+ ret = ialloc_body(size, false, &tsdn, &usize, true);
+ assert(!tsdn_null(tsdn) || ret == NULL);
+ }
+
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error in realloc(): "
+ "out of memory\n");
+ abort();
+ }
+ set_errno(ENOMEM);
+ }
+ if (config_stats && likely(ret != NULL)) {
+ tsd_t *tsd;
+
+ assert(usize == isalloc(tsdn, ret, config_prof));
+ tsd = tsdn_tsd(tsdn);
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
+ }
+ UTRACE(ptr, size, ret);
+ JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr,
+ old_usize, old_rzsize, maybe, false);
+ witness_assert_lockless(tsdn);
+ return (ret);
+}
+
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_free(void *ptr)
+{
+
+ UTRACE(ptr, 0, 0);
+ if (likely(ptr != NULL)) {
+ tsd_t *tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+ if (likely(!malloc_slow))
+ ifree(tsd, ptr, tcache_get(tsd, false), false);
+ else
+ ifree(tsd, ptr, tcache_get(tsd, false), true);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ }
+}
+
+/*
+ * End malloc(3)-compatible functions.
+ */
+/******************************************************************************/
+/*
+ * Begin non-standard override functions.
+ */
+
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
+je_memalign(size_t alignment, size_t size)
+{
+ void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+ if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
+ ret = NULL;
+ return (ret);
+}
+#endif
+
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
+je_valloc(size_t size)
+{
+ void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+ if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
+ ret = NULL;
+ return (ret);
+}
+#endif
+
+/*
+ * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
+ * #define je_malloc malloc
+ */
+#define malloc_is_malloc 1
+#define is_malloc_(a) malloc_is_ ## a
+#define is_malloc(a) is_malloc_(a)
+
+#if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
+/*
+ * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
+ * to inconsistently reference libc's malloc(3)-compatible functions
+ * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
+ *
+ * These definitions interpose hooks in glibc. The functions are actually
+ * passed an extra argument for the caller return address, which will be
+ * ignored.
+ */
+JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
+JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
+JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
+# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
+JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
+ je_memalign;
+# endif
+
+#ifdef CPU_COUNT
+/*
+ * To enable static linking with glibc, the libc specific malloc interface must
+ * be implemented also, so none of glibc's malloc.o functions are added to the
+ * link.
+ */
+#define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
+/* To force macro expansion of je_ prefix before stringification. */
+#define PREALIAS(je_fn) ALIAS(je_fn)
+void *__libc_malloc(size_t size) PREALIAS(je_malloc);
+void __libc_free(void* ptr) PREALIAS(je_free);
+void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
+void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
+void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
+void *__libc_valloc(size_t size) PREALIAS(je_valloc);
+int __posix_memalign(void** r, size_t a, size_t s)
+ PREALIAS(je_posix_memalign);
+#undef PREALIAS
+#undef ALIAS
+
+#endif
+
+#endif
+
+/*
+ * End non-standard override functions.
+ */
+/******************************************************************************/
+/*
+ * Begin non-standard functions.
+ */
+
+JEMALLOC_ALWAYS_INLINE_C bool
+imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
+ size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
+{
+
+ if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
+ *alignment = 0;
+ *usize = s2u(size);
+ } else {
+ *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
+ *usize = sa2u(size, *alignment);
+ }
+ if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
+ return (true);
+ *zero = MALLOCX_ZERO_GET(flags);
+ if ((flags & MALLOCX_TCACHE_MASK) != 0) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ *tcache = NULL;
+ else
+ *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ *tcache = tcache_get(tsd, true);
+ if ((flags & MALLOCX_ARENA_MASK) != 0) {
+ unsigned arena_ind = MALLOCX_ARENA_GET(flags);
+ *arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
+ if (unlikely(*arena == NULL))
+ return (true);
+ } else
+ *arena = NULL;
+ return (false);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena, bool slow_path)
+{
+ szind_t ind;
+
+ if (unlikely(alignment != 0))
+ return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
+ ind = size2index(usize);
+ assert(ind < NSIZES);
+ return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena,
+ slow_path));
+}
+
+static void *
+imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena, bool slow_path)
+{
+ void *p;
+
+ if (usize <= SMALL_MAXCLASS) {
+ assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
+ sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
+ p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero,
+ tcache, arena, slow_path);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(tsdn, p, usize);
+ } else {
+ p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
+ slow_path);
+ }
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
+{
+ void *p;
+ size_t alignment;
+ bool zero;
+ tcache_t *tcache;
+ arena_t *arena;
+ prof_tctx_t *tctx;
+
+ if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
+ &zero, &tcache, &arena)))
+ return (NULL);
+ tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
+ if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
+ p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero,
+ tcache, arena, slow_path);
+ } else if ((uintptr_t)tctx > (uintptr_t)1U) {
+ p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero,
+ tcache, arena, slow_path);
+ } else
+ p = NULL;
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ return (NULL);
+ }
+ prof_malloc(tsd_tsdn(tsd), p, *usize, tctx);
+
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
+ bool slow_path)
+{
+ void *p;
+ size_t alignment;
+ bool zero;
+ tcache_t *tcache;
+ arena_t *arena;
+
+ if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
+ &zero, &tcache, &arena)))
+ return (NULL);
+ p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache,
+ arena, slow_path);
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+ return (p);
+}
+
+/* This function guarantees that *tsdn is non-NULL on success. */
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
+ bool slow_path)
+{
+ tsd_t *tsd;
+
+ if (slow_path && unlikely(malloc_init())) {
+ *tsdn = NULL;
+ return (NULL);
+ }
+
+ tsd = tsd_fetch();
+ *tsdn = tsd_tsdn(tsd);
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ if (likely(flags == 0)) {
+ szind_t ind = size2index(size);
+ if (unlikely(ind >= NSIZES))
+ return (NULL);
+ if (config_stats || (config_prof && opt_prof) || (slow_path &&
+ config_valgrind && unlikely(in_valgrind))) {
+ *usize = index2size(ind);
+ assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
+ }
+
+ if (config_prof && opt_prof) {
+ return (ialloc_prof(tsd, *usize, ind, false,
+ slow_path));
+ }
+
+ return (ialloc(tsd, size, ind, false, slow_path));
+ }
+
+ if (config_prof && opt_prof)
+ return (imallocx_prof(tsd, size, flags, usize, slow_path));
+
+ return (imallocx_no_prof(tsd, size, flags, usize, slow_path));
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
+je_mallocx(size_t size, int flags)
+{
+ tsdn_t *tsdn;
+ void *p;
+ size_t usize;
+
+ assert(size != 0);
+
+ if (likely(!malloc_slow)) {
+ p = imallocx_body(size, flags, &tsdn, &usize, false);
+ ialloc_post_check(p, tsdn, usize, "mallocx", false, false);
+ } else {
+ p = imallocx_body(size, flags, &tsdn, &usize, true);
+ ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
+ UTRACE(0, size, p);
+ JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
+ MALLOCX_ZERO_GET(flags));
+ }
+
+ return (p);
+}
+
+static void *
+irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
+ size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
+ prof_tctx_t *tctx)
+{
+ void *p;
+
+ if (tctx == NULL)
+ return (NULL);
+ if (usize <= SMALL_MAXCLASS) {
+ p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment,
+ zero, tcache, arena);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(tsd_tsdn(tsd), p, usize);
+ } else {
+ p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
+ tcache, arena);
+ }
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
+ size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
+ arena_t *arena)
+{
+ void *p;
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
+
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
+ tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
+ p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
+ alignment, zero, tcache, arena, tctx);
+ } else {
+ p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero,
+ tcache, arena);
+ }
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, false);
+ return (NULL);
+ }
+
+ if (p == old_ptr && alignment != 0) {
+ /*
+ * The allocation did not move, so it is possible that the size
+ * class is smaller than would guarantee the requested
+ * alignment, and that the alignment constraint was
+ * serendipitously satisfied. Additionally, old_usize may not
+ * be the same as the current usize because of in-place large
+ * reallocation. Therefore, query the actual value of usize.
+ */
+ *usize = isalloc(tsd_tsdn(tsd), p, config_prof);
+ }
+ prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
+ old_usize, old_tctx);
+
+ return (p);
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
+je_rallocx(void *ptr, size_t size, int flags)
+{
+ void *p;
+ tsd_t *tsd;
+ size_t usize;
+ size_t old_usize;
+ UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
+ bool zero = flags & MALLOCX_ZERO;
+ arena_t *arena;
+ tcache_t *tcache;
+
+ assert(ptr != NULL);
+ assert(size != 0);
+ assert(malloc_initialized() || IS_INITIALIZER);
+ malloc_thread_init();
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
+ unsigned arena_ind = MALLOCX_ARENA_GET(flags);
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
+ if (unlikely(arena == NULL))
+ goto label_oom;
+ } else
+ arena = NULL;
+
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ tcache = NULL;
+ else
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ tcache = tcache_get(tsd, true);
+
+ old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ if (config_valgrind && unlikely(in_valgrind))
+ old_rzsize = u2rz(old_usize);
+
+ if (config_prof && opt_prof) {
+ usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+ if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
+ goto label_oom;
+ p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
+ zero, tcache, arena);
+ if (unlikely(p == NULL))
+ goto label_oom;
+ } else {
+ p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
+ tcache, arena);
+ if (unlikely(p == NULL))
+ goto label_oom;
+ if (config_stats || (config_valgrind && unlikely(in_valgrind)))
+ usize = isalloc(tsd_tsdn(tsd), p, config_prof);
+ }
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+
+ if (config_stats) {
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
+ }
+ UTRACE(ptr, size, p);
+ JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr,
+ old_usize, old_rzsize, no, zero);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (p);
+label_oom:
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
+ abort();
+ }
+ UTRACE(ptr, size, 0);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (NULL);
+}
+
+JEMALLOC_ALWAYS_INLINE_C size_t
+ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero)
+{
+ size_t usize;
+
+ if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero))
+ return (old_usize);
+ usize = isalloc(tsdn, ptr, config_prof);
+
+ return (usize);
+}
+
+static size_t
+ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
+{
+ size_t usize;
+
+ if (tctx == NULL)
+ return (old_usize);
+ usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
+ zero);
+
+ return (usize);
+}
+
+JEMALLOC_ALWAYS_INLINE_C size_t
+ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero)
+{
+ size_t usize_max, usize;
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
+
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
+ /*
+ * usize isn't knowable before ixalloc() returns when extra is non-zero.
+ * Therefore, compute its maximum possible value and use that in
+ * prof_alloc_prep() to decide whether to capture a backtrace.
+ * prof_realloc() will use the actual usize to decide whether to sample.
+ */
+ if (alignment == 0) {
+ usize_max = s2u(size+extra);
+ assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS);
+ } else {
+ usize_max = sa2u(size+extra, alignment);
+ if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) {
+ /*
+ * usize_max is out of range, and chances are that
+ * allocation will fail, but use the maximum possible
+ * value and carry on with prof_alloc_prep(), just in
+ * case allocation succeeds.
+ */
+ usize_max = HUGE_MAXCLASS;
+ }
+ }
+ tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
+
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
+ usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
+ size, extra, alignment, zero, tctx);
+ } else {
+ usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
+ extra, alignment, zero);
+ }
+ if (usize == old_usize) {
+ prof_alloc_rollback(tsd, tctx, false);
+ return (usize);
+ }
+ prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
+ old_tctx);
+
+ return (usize);
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+je_xallocx(void *ptr, size_t size, size_t extra, int flags)
+{
+ tsd_t *tsd;
+ size_t usize, old_usize;
+ UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
+ bool zero = flags & MALLOCX_ZERO;
+
+ assert(ptr != NULL);
+ assert(size != 0);
+ assert(SIZE_T_MAX - size >= extra);
+ assert(malloc_initialized() || IS_INITIALIZER);
+ malloc_thread_init();
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+
+ /*
+ * The API explicitly absolves itself of protecting against (size +
+ * extra) numerical overflow, but we may need to clamp extra to avoid
+ * exceeding HUGE_MAXCLASS.
+ *
+ * Ordinarily, size limit checking is handled deeper down, but here we
+ * have to check as part of (size + extra) clamping, since we need the
+ * clamped value in the above helper functions.
+ */
+ if (unlikely(size > HUGE_MAXCLASS)) {
+ usize = old_usize;
+ goto label_not_resized;
+ }
+ if (unlikely(HUGE_MAXCLASS - size < extra))
+ extra = HUGE_MAXCLASS - size;
+
+ if (config_valgrind && unlikely(in_valgrind))
+ old_rzsize = u2rz(old_usize);
+
+ if (config_prof && opt_prof) {
+ usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
+ alignment, zero);
+ } else {
+ usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
+ extra, alignment, zero);
+ }
+ if (unlikely(usize == old_usize))
+ goto label_not_resized;
+
+ if (config_stats) {
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
+ }
+ JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr,
+ old_usize, old_rzsize, no, zero);
+label_not_resized:
+ UTRACE(ptr, size, ptr);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (usize);
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
+je_sallocx(const void *ptr, int flags)
+{
+ size_t usize;
+ tsdn_t *tsdn;
+
+ assert(malloc_initialized() || IS_INITIALIZER);
+ malloc_thread_init();
+
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+
+ if (config_ivsalloc)
+ usize = ivsalloc(tsdn, ptr, config_prof);
+ else
+ usize = isalloc(tsdn, ptr, config_prof);
+
+ witness_assert_lockless(tsdn);
+ return (usize);
+}
+
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_dallocx(void *ptr, int flags)
+{
+ tsd_t *tsd;
+ tcache_t *tcache;
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ tcache = NULL;
+ else
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ tcache = tcache_get(tsd, false);
+
+ UTRACE(ptr, 0, 0);
+ if (likely(!malloc_slow))
+ ifree(tsd, ptr, tcache, false);
+ else
+ ifree(tsd, ptr, tcache, true);
+ witness_assert_lockless(tsd_tsdn(tsd));
+}
+
+JEMALLOC_ALWAYS_INLINE_C size_t
+inallocx(tsdn_t *tsdn, size_t size, int flags)
+{
+ size_t usize;
+
+ witness_assert_lockless(tsdn);
+
+ if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
+ usize = s2u(size);
+ else
+ usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
+ witness_assert_lockless(tsdn);
+ return (usize);
+}
+
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_sdallocx(void *ptr, size_t size, int flags)
+{
+ tsd_t *tsd;
+ tcache_t *tcache;
+ size_t usize;
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+ tsd = tsd_fetch();
+ usize = inallocx(tsd_tsdn(tsd), size, flags);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof));
+
+ witness_assert_lockless(tsd_tsdn(tsd));
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
+ tcache = NULL;
+ else
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ } else
+ tcache = tcache_get(tsd, false);
+
+ UTRACE(ptr, 0, 0);
+ if (likely(!malloc_slow))
+ isfree(tsd, ptr, usize, tcache, false);
+ else
+ isfree(tsd, ptr, usize, tcache, true);
+ witness_assert_lockless(tsd_tsdn(tsd));
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
+je_nallocx(size_t size, int flags)
+{
+ size_t usize;
+ tsdn_t *tsdn;
+
+ assert(size != 0);
+
+ if (unlikely(malloc_init()))
+ return (0);
+
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+
+ usize = inallocx(tsdn, size, flags);
+ if (unlikely(usize > HUGE_MAXCLASS))
+ return (0);
+
+ witness_assert_lockless(tsdn);
+ return (usize);
+}
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen)
+{
+ int ret;
+ tsd_t *tsd;
+
+ if (unlikely(malloc_init()))
+ return (EAGAIN);
+
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+ ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (ret);
+}
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
+{
+ int ret;
+ tsdn_t *tsdn;
+
+ if (unlikely(malloc_init()))
+ return (EAGAIN);
+
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+ ret = ctl_nametomib(tsdn, name, mibp, miblenp);
+ witness_assert_lockless(tsdn);
+ return (ret);
+}
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ tsd_t *tsd;
+
+ if (unlikely(malloc_init()))
+ return (EAGAIN);
+
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+ ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (ret);
+}
+
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts)
+{
+ tsdn_t *tsdn;
+
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+ stats_print(write_cb, cbopaque, opts);
+ witness_assert_lockless(tsdn);
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
+{
+ size_t ret;
+ tsdn_t *tsdn;
+
+ assert(malloc_initialized() || IS_INITIALIZER);
+ malloc_thread_init();
+
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+
+ if (config_ivsalloc)
+ ret = ivsalloc(tsdn, ptr, config_prof);
+ else
+ ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof);
+
+ witness_assert_lockless(tsdn);
+ return (ret);
+}
+
+/*
+ * End non-standard functions.
+ */
+/******************************************************************************/
+/*
+ * The following functions are used by threading libraries for protection of
+ * malloc during fork().
+ */
+
+/*
+ * If an application creates a thread before doing any allocation in the main
+ * thread, then calls fork(2) in the main thread followed by memory allocation
+ * in the child process, a race can occur that results in deadlock within the
+ * child: the main thread may have forked while the created thread had
+ * partially initialized the allocator. Ordinarily jemalloc prevents
+ * fork/malloc races via the following functions it registers during
+ * initialization using pthread_atfork(), but of course that does no good if
+ * the allocator isn't fully initialized at fork time. The following library
+ * constructor is a partial solution to this problem. It may still be possible
+ * to trigger the deadlock described above, but doing so would involve forking
+ * via a library constructor that runs before jemalloc's runs.
+ */
+#ifndef JEMALLOC_JET
+JEMALLOC_ATTR(constructor)
+static void
+jemalloc_constructor(void)
+{
+
+ malloc_init();
+}
+#endif
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
+void
+jemalloc_prefork(void)
+#else
+JEMALLOC_EXPORT void
+_malloc_prefork(void)
+#endif
+{
+ tsd_t *tsd;
+ unsigned i, j, narenas;
+ arena_t *arena;
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ if (!malloc_initialized())
+ return;
+#endif
+ assert(malloc_initialized());
+
+ tsd = tsd_fetch();
+
+ narenas = narenas_total_get();
+
+ witness_prefork(tsd);
+ /* Acquire all mutexes in a safe order. */
+ ctl_prefork(tsd_tsdn(tsd));
+ malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
+ prof_prefork0(tsd_tsdn(tsd));
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < narenas; j++) {
+ if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
+ NULL) {
+ switch (i) {
+ case 0:
+ arena_prefork0(tsd_tsdn(tsd), arena);
+ break;
+ case 1:
+ arena_prefork1(tsd_tsdn(tsd), arena);
+ break;
+ case 2:
+ arena_prefork2(tsd_tsdn(tsd), arena);
+ break;
+ default: not_reached();
+ }
+ }
+ }
+ }
+ base_prefork(tsd_tsdn(tsd));
+ for (i = 0; i < narenas; i++) {
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ arena_prefork3(tsd_tsdn(tsd), arena);
+ }
+ prof_prefork1(tsd_tsdn(tsd));
+}
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
+void
+jemalloc_postfork_parent(void)
+#else
+JEMALLOC_EXPORT void
+_malloc_postfork(void)
+#endif
+{
+ tsd_t *tsd;
+ unsigned i, narenas;
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ if (!malloc_initialized())
+ return;
+#endif
+ assert(malloc_initialized());
+
+ tsd = tsd_fetch();
+
+ witness_postfork_parent(tsd);
+ /* Release all mutexes, now that fork() has completed. */
+ base_postfork_parent(tsd_tsdn(tsd));
+ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+ arena_t *arena;
+
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ arena_postfork_parent(tsd_tsdn(tsd), arena);
+ }
+ prof_postfork_parent(tsd_tsdn(tsd));
+ malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
+ ctl_postfork_parent(tsd_tsdn(tsd));
+}
+
+void
+jemalloc_postfork_child(void)
+{
+ tsd_t *tsd;
+ unsigned i, narenas;
+
+ assert(malloc_initialized());
+
+ tsd = tsd_fetch();
+
+ witness_postfork_child(tsd);
+ /* Release all mutexes, now that fork() has completed. */
+ base_postfork_child(tsd_tsdn(tsd));
+ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+ arena_t *arena;
+
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ arena_postfork_child(tsd_tsdn(tsd), arena);
+ }
+ prof_postfork_child(tsd_tsdn(tsd));
+ malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
+ ctl_postfork_child(tsd_tsdn(tsd));
+}
+
+/******************************************************************************/
+
+/* Helps the application decide if a pointer is worth re-allocating in order to reduce fragmentation.
+ * returns 0 if the allocation is in the currently active run,
+ * or when it is not causing any frag issue (large or huge bin)
+ * returns the bin utilization and run utilization both in fixed point 16:16.
+ * If the application decides to re-allocate it should use MALLOCX_TCACHE_NONE when doing so. */
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+je_get_defrag_hint(void* ptr, int *bin_util, int *run_util) {
+ int defrag = 0;
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (likely(chunk != ptr)) { /* indication that this is not a HUGE alloc */
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t mapbits = arena_mapbits_get(chunk, pageind);
+ if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) { /* indication that this is not a LARGE alloc */
+ arena_t *arena = extent_node_arena_get(&chunk->node);
+ size_t rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
+ arena_run_t *run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
+ arena_bin_t *bin = &arena->bins[run->binind];
+ tsd_t *tsd = tsd_fetch();
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ /* runs that are in the same chunk in as the current chunk, are likely to be the next currun */
+ if (chunk != (arena_chunk_t *)CHUNK_ADDR2BASE(bin->runcur)) {
+ arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
+ size_t availregs = bin_info->nregs * bin->stats.curruns;
+ *bin_util = (bin->stats.curregs<<16) / availregs;
+ *run_util = ((bin_info->nregs - run->nfree)<<16) / bin_info->nregs;
+ defrag = 1;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ }
+ }
+ return defrag;
+}
diff --git a/deps/jemalloc/src/mb.c b/deps/jemalloc/src/mb.c
new file mode 100644
index 0000000..dc2c0a2
--- /dev/null
+++ b/deps/jemalloc/src/mb.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_MB_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/deps/jemalloc/src/mutex.c b/deps/jemalloc/src/mutex.c
new file mode 100644
index 0000000..6333e73
--- /dev/null
+++ b/deps/jemalloc/src/mutex.c
@@ -0,0 +1,158 @@
+#define JEMALLOC_MUTEX_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
+#include <dlfcn.h>
+#endif
+
+#ifndef _CRT_SPINCOUNT
+#define _CRT_SPINCOUNT 4000
+#endif
+
+/******************************************************************************/
+/* Data. */
+
+#ifdef JEMALLOC_LAZY_LOCK
+bool isthreaded = false;
+#endif
+#ifdef JEMALLOC_MUTEX_INIT_CB
+static bool postpone_init = true;
+static malloc_mutex_t *postponed_mutexes = NULL;
+#endif
+
+#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
+static void pthread_create_once(void);
+#endif
+
+/******************************************************************************/
+/*
+ * We intercept pthread_create() calls in order to toggle isthreaded if the
+ * process goes multi-threaded.
+ */
+
+#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
+static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
+ void *(*)(void *), void *__restrict);
+
+static void
+pthread_create_once(void)
+{
+
+ pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
+ if (pthread_create_fptr == NULL) {
+ malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
+ "\"pthread_create\")\n");
+ abort();
+ }
+
+ isthreaded = true;
+}
+
+JEMALLOC_EXPORT int
+pthread_create(pthread_t *__restrict thread,
+ const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
+ void *__restrict arg)
+{
+ static pthread_once_t once_control = PTHREAD_ONCE_INIT;
+
+ pthread_once(&once_control, pthread_create_once);
+
+ return (pthread_create_fptr(thread, attr, start_routine, arg));
+}
+#endif
+
+/******************************************************************************/
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ void *(calloc_cb)(size_t, size_t));
+#endif
+
+bool
+malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
+{
+
+#ifdef _WIN32
+# if _WIN32_WINNT >= 0x0600
+ InitializeSRWLock(&mutex->lock);
+# else
+ if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
+ _CRT_SPINCOUNT))
+ return (true);
+# endif
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ mutex->lock = OS_UNFAIR_LOCK_INIT;
+#elif (defined(JEMALLOC_OSSPIN))
+ mutex->lock = 0;
+#elif (defined(JEMALLOC_MUTEX_INIT_CB))
+ if (postpone_init) {
+ mutex->postponed_next = postponed_mutexes;
+ postponed_mutexes = mutex;
+ } else {
+ if (_pthread_mutex_init_calloc_cb(&mutex->lock,
+ bootstrap_calloc) != 0)
+ return (true);
+ }
+#else
+ pthread_mutexattr_t attr;
+
+ if (pthread_mutexattr_init(&attr) != 0)
+ return (true);
+ pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
+ if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
+ pthread_mutexattr_destroy(&attr);
+ return (true);
+ }
+ pthread_mutexattr_destroy(&attr);
+#endif
+ if (config_debug)
+ witness_init(&mutex->witness, name, rank, NULL);
+ return (false);
+}
+
+void
+malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+ malloc_mutex_lock(tsdn, mutex);
+}
+
+void
+malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+ malloc_mutex_unlock(tsdn, mutex);
+}
+
+void
+malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ malloc_mutex_unlock(tsdn, mutex);
+#else
+ if (malloc_mutex_init(mutex, mutex->witness.name,
+ mutex->witness.rank)) {
+ malloc_printf("<jemalloc>: Error re-initializing mutex in "
+ "child\n");
+ if (opt_abort)
+ abort();
+ }
+#endif
+}
+
+bool
+malloc_mutex_boot(void)
+{
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ postpone_init = false;
+ while (postponed_mutexes != NULL) {
+ if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
+ bootstrap_calloc) != 0)
+ return (true);
+ postponed_mutexes = postponed_mutexes->postponed_next;
+ }
+#endif
+ return (false);
+}
diff --git a/deps/jemalloc/src/nstime.c b/deps/jemalloc/src/nstime.c
new file mode 100644
index 0000000..0948e29
--- /dev/null
+++ b/deps/jemalloc/src/nstime.c
@@ -0,0 +1,194 @@
+#include "jemalloc/internal/jemalloc_internal.h"
+
+#define BILLION UINT64_C(1000000000)
+
+void
+nstime_init(nstime_t *time, uint64_t ns)
+{
+
+ time->ns = ns;
+}
+
+void
+nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec)
+{
+
+ time->ns = sec * BILLION + nsec;
+}
+
+uint64_t
+nstime_ns(const nstime_t *time)
+{
+
+ return (time->ns);
+}
+
+uint64_t
+nstime_sec(const nstime_t *time)
+{
+
+ return (time->ns / BILLION);
+}
+
+uint64_t
+nstime_nsec(const nstime_t *time)
+{
+
+ return (time->ns % BILLION);
+}
+
+void
+nstime_copy(nstime_t *time, const nstime_t *source)
+{
+
+ *time = *source;
+}
+
+int
+nstime_compare(const nstime_t *a, const nstime_t *b)
+{
+
+ return ((a->ns > b->ns) - (a->ns < b->ns));
+}
+
+void
+nstime_add(nstime_t *time, const nstime_t *addend)
+{
+
+ assert(UINT64_MAX - time->ns >= addend->ns);
+
+ time->ns += addend->ns;
+}
+
+void
+nstime_subtract(nstime_t *time, const nstime_t *subtrahend)
+{
+
+ assert(nstime_compare(time, subtrahend) >= 0);
+
+ time->ns -= subtrahend->ns;
+}
+
+void
+nstime_imultiply(nstime_t *time, uint64_t multiplier)
+{
+
+ assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
+ 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
+
+ time->ns *= multiplier;
+}
+
+void
+nstime_idivide(nstime_t *time, uint64_t divisor)
+{
+
+ assert(divisor != 0);
+
+ time->ns /= divisor;
+}
+
+uint64_t
+nstime_divide(const nstime_t *time, const nstime_t *divisor)
+{
+
+ assert(divisor->ns != 0);
+
+ return (time->ns / divisor->ns);
+}
+
+#ifdef _WIN32
+# define NSTIME_MONOTONIC true
+static void
+nstime_get(nstime_t *time)
+{
+ FILETIME ft;
+ uint64_t ticks_100ns;
+
+ GetSystemTimeAsFileTime(&ft);
+ ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+
+ nstime_init(time, ticks_100ns * 100);
+}
+#elif JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
+# define NSTIME_MONOTONIC true
+static void
+nstime_get(nstime_t *time)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
+ nstime_init2(time, ts.tv_sec, ts.tv_nsec);
+}
+#elif JEMALLOC_HAVE_CLOCK_MONOTONIC
+# define NSTIME_MONOTONIC true
+static void
+nstime_get(nstime_t *time)
+{
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ nstime_init2(time, ts.tv_sec, ts.tv_nsec);
+}
+#elif JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
+# define NSTIME_MONOTONIC true
+static void
+nstime_get(nstime_t *time)
+{
+
+ nstime_init(time, mach_absolute_time());
+}
+#else
+# define NSTIME_MONOTONIC false
+static void
+nstime_get(nstime_t *time)
+{
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000);
+}
+#endif
+
+#ifdef JEMALLOC_JET
+#undef nstime_monotonic
+#define nstime_monotonic JEMALLOC_N(n_nstime_monotonic)
+#endif
+bool
+nstime_monotonic(void)
+{
+
+ return (NSTIME_MONOTONIC);
+#undef NSTIME_MONOTONIC
+}
+#ifdef JEMALLOC_JET
+#undef nstime_monotonic
+#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
+nstime_monotonic_t *nstime_monotonic = JEMALLOC_N(n_nstime_monotonic);
+#endif
+
+#ifdef JEMALLOC_JET
+#undef nstime_update
+#define nstime_update JEMALLOC_N(n_nstime_update)
+#endif
+bool
+nstime_update(nstime_t *time)
+{
+ nstime_t old_time;
+
+ nstime_copy(&old_time, time);
+ nstime_get(time);
+
+ /* Handle non-monotonic clocks. */
+ if (unlikely(nstime_compare(&old_time, time) > 0)) {
+ nstime_copy(time, &old_time);
+ return (true);
+ }
+
+ return (false);
+}
+#ifdef JEMALLOC_JET
+#undef nstime_update
+#define nstime_update JEMALLOC_N(nstime_update)
+nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update);
+#endif
diff --git a/deps/jemalloc/src/pages.c b/deps/jemalloc/src/pages.c
new file mode 100644
index 0000000..5f0c966
--- /dev/null
+++ b/deps/jemalloc/src/pages.c
@@ -0,0 +1,302 @@
+#define JEMALLOC_PAGES_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+#include <sys/sysctl.h>
+#endif
+
+/******************************************************************************/
+/* Data. */
+
+#ifndef _WIN32
+# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
+# define PAGES_PROT_DECOMMIT (PROT_NONE)
+static int mmap_flags;
+#endif
+static bool os_overcommits;
+
+/******************************************************************************/
+
+void *
+pages_map(void *addr, size_t size, bool *commit)
+{
+ void *ret;
+
+ assert(size != 0);
+
+ if (os_overcommits)
+ *commit = true;
+
+#ifdef _WIN32
+ /*
+ * If VirtualAlloc can't allocate at the given address when one is
+ * given, it fails and returns NULL.
+ */
+ ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
+ PAGE_READWRITE);
+#else
+ /*
+ * We don't use MAP_FIXED here, because it can cause the *replacement*
+ * of existing mappings, and we only want to create new mappings.
+ */
+ {
+ int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
+
+ ret = mmap(addr, size, prot, mmap_flags, -1, 0);
+ }
+ assert(ret != NULL);
+
+ if (ret == MAP_FAILED)
+ ret = NULL;
+ else if (addr != NULL && ret != addr) {
+ /*
+ * We succeeded in mapping memory, but not in the right place.
+ */
+ pages_unmap(ret, size);
+ ret = NULL;
+ }
+#endif
+ assert(ret == NULL || (addr == NULL && ret != addr)
+ || (addr != NULL && ret == addr));
+ return (ret);
+}
+
+void
+pages_unmap(void *addr, size_t size)
+{
+
+#ifdef _WIN32
+ if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
+#else
+ if (munmap(addr, size) == -1)
+#endif
+ {
+ char buf[BUFERROR_BUF];
+
+ buferror(get_errno(), buf, sizeof(buf));
+ malloc_printf("<jemalloc>: Error in "
+#ifdef _WIN32
+ "VirtualFree"
+#else
+ "munmap"
+#endif
+ "(): %s\n", buf);
+ if (opt_abort)
+ abort();
+ }
+}
+
+void *
+pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
+ bool *commit)
+{
+ void *ret = (void *)((uintptr_t)addr + leadsize);
+
+ assert(alloc_size >= leadsize + size);
+#ifdef _WIN32
+ {
+ void *new_addr;
+
+ pages_unmap(addr, alloc_size);
+ new_addr = pages_map(ret, size, commit);
+ if (new_addr == ret)
+ return (ret);
+ if (new_addr)
+ pages_unmap(new_addr, size);
+ return (NULL);
+ }
+#else
+ {
+ size_t trailsize = alloc_size - leadsize - size;
+
+ if (leadsize != 0)
+ pages_unmap(addr, leadsize);
+ if (trailsize != 0)
+ pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+ return (ret);
+ }
+#endif
+}
+
+static bool
+pages_commit_impl(void *addr, size_t size, bool commit)
+{
+
+ if (os_overcommits)
+ return (true);
+
+#ifdef _WIN32
+ return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
+ PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
+#else
+ {
+ int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
+ void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
+ -1, 0);
+ if (result == MAP_FAILED)
+ return (true);
+ if (result != addr) {
+ /*
+ * We succeeded in mapping memory, but not in the right
+ * place.
+ */
+ pages_unmap(result, size);
+ return (true);
+ }
+ return (false);
+ }
+#endif
+}
+
+bool
+pages_commit(void *addr, size_t size)
+{
+
+ return (pages_commit_impl(addr, size, true));
+}
+
+bool
+pages_decommit(void *addr, size_t size)
+{
+
+ return (pages_commit_impl(addr, size, false));
+}
+
+bool
+pages_purge(void *addr, size_t size)
+{
+ bool unzeroed;
+
+#ifdef _WIN32
+ VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
+ unzeroed = true;
+#elif (defined(JEMALLOC_PURGE_MADVISE_FREE) || \
+ defined(JEMALLOC_PURGE_MADVISE_DONTNEED))
+# if defined(JEMALLOC_PURGE_MADVISE_FREE)
+# define JEMALLOC_MADV_PURGE MADV_FREE
+# define JEMALLOC_MADV_ZEROS false
+# elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED)
+# define JEMALLOC_MADV_PURGE MADV_DONTNEED
+# define JEMALLOC_MADV_ZEROS true
+# else
+# error No madvise(2) flag defined for purging unused dirty pages
+# endif
+ int err = madvise(addr, size, JEMALLOC_MADV_PURGE);
+ unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
+# undef JEMALLOC_MADV_PURGE
+# undef JEMALLOC_MADV_ZEROS
+#else
+ /* Last resort no-op. */
+ unzeroed = true;
+#endif
+ return (unzeroed);
+}
+
+bool
+pages_huge(void *addr, size_t size)
+{
+
+ assert(PAGE_ADDR2BASE(addr) == addr);
+ assert(PAGE_CEILING(size) == size);
+
+#ifdef JEMALLOC_THP
+ return (madvise(addr, size, MADV_HUGEPAGE) != 0);
+#else
+ return (false);
+#endif
+}
+
+bool
+pages_nohuge(void *addr, size_t size)
+{
+
+ assert(PAGE_ADDR2BASE(addr) == addr);
+ assert(PAGE_CEILING(size) == size);
+
+#ifdef JEMALLOC_THP
+ return (madvise(addr, size, MADV_NOHUGEPAGE) != 0);
+#else
+ return (false);
+#endif
+}
+
+#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+static bool
+os_overcommits_sysctl(void)
+{
+ int vm_overcommit;
+ size_t sz;
+
+ sz = sizeof(vm_overcommit);
+ if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0)
+ return (false); /* Error. */
+
+ return ((vm_overcommit & 0x3) == 0);
+}
+#endif
+
+#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
+/*
+ * Use syscall(2) rather than {open,read,close}(2) when possible to avoid
+ * reentry during bootstrapping if another library has interposed system call
+ * wrappers.
+ */
+static bool
+os_overcommits_proc(void)
+{
+ int fd;
+ char buf[1];
+ ssize_t nread;
+
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
+ fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY);
+#else
+ fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
+#endif
+ if (fd == -1)
+ return (false); /* Error. */
+
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
+ nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
+#else
+ nread = read(fd, &buf, sizeof(buf));
+#endif
+
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
+ syscall(SYS_close, fd);
+#else
+ close(fd);
+#endif
+
+ if (nread < 1)
+ return (false); /* Error. */
+ /*
+ * /proc/sys/vm/overcommit_memory meanings:
+ * 0: Heuristic overcommit.
+ * 1: Always overcommit.
+ * 2: Never overcommit.
+ */
+ return (buf[0] == '0' || buf[0] == '1');
+}
+#endif
+
+void
+pages_boot(void)
+{
+
+#ifndef _WIN32
+ mmap_flags = MAP_PRIVATE | MAP_ANON;
+#endif
+
+#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+ os_overcommits = os_overcommits_sysctl();
+#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
+ os_overcommits = os_overcommits_proc();
+# ifdef MAP_NORESERVE
+ if (os_overcommits)
+ mmap_flags |= MAP_NORESERVE;
+# endif
+#else
+ os_overcommits = false;
+#endif
+}
diff --git a/deps/jemalloc/src/prng.c b/deps/jemalloc/src/prng.c
new file mode 100644
index 0000000..76646a2
--- /dev/null
+++ b/deps/jemalloc/src/prng.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_PRNG_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/deps/jemalloc/src/prof.c b/deps/jemalloc/src/prof.c
new file mode 100644
index 0000000..c89dade
--- /dev/null
+++ b/deps/jemalloc/src/prof.c
@@ -0,0 +1,2355 @@
+#define JEMALLOC_PROF_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+/******************************************************************************/
+
+#ifdef JEMALLOC_PROF_LIBUNWIND
+#define UNW_LOCAL_ONLY
+#include <libunwind.h>
+#endif
+
+#ifdef JEMALLOC_PROF_LIBGCC
+#include <unwind.h>
+#endif
+
+/******************************************************************************/
+/* Data. */
+
+bool opt_prof = false;
+bool opt_prof_active = true;
+bool opt_prof_thread_active_init = true;
+size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
+ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
+bool opt_prof_gdump = false;
+bool opt_prof_final = false;
+bool opt_prof_leak = false;
+bool opt_prof_accum = false;
+char opt_prof_prefix[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
+
+/*
+ * Initialized as opt_prof_active, and accessed via
+ * prof_active_[gs]et{_unlocked,}().
+ */
+bool prof_active;
+static malloc_mutex_t prof_active_mtx;
+
+/*
+ * Initialized as opt_prof_thread_active_init, and accessed via
+ * prof_thread_active_init_[gs]et().
+ */
+static bool prof_thread_active_init;
+static malloc_mutex_t prof_thread_active_init_mtx;
+
+/*
+ * Initialized as opt_prof_gdump, and accessed via
+ * prof_gdump_[gs]et{_unlocked,}().
+ */
+bool prof_gdump_val;
+static malloc_mutex_t prof_gdump_mtx;
+
+uint64_t prof_interval = 0;
+
+size_t lg_prof_sample;
+
+/*
+ * Table of mutexes that are shared among gctx's. These are leaf locks, so
+ * there is no problem with using them for more than one gctx at the same time.
+ * The primary motivation for this sharing though is that gctx's are ephemeral,
+ * and destroying mutexes causes complications for systems that allocate when
+ * creating/destroying mutexes.
+ */
+static malloc_mutex_t *gctx_locks;
+static unsigned cum_gctxs; /* Atomic counter. */
+
+/*
+ * Table of mutexes that are shared among tdata's. No operations require
+ * holding multiple tdata locks, so there is no problem with using them for more
+ * than one tdata at the same time, even though a gctx lock may be acquired
+ * while holding a tdata lock.
+ */
+static malloc_mutex_t *tdata_locks;
+
+/*
+ * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
+ * structure that knows about all backtraces currently captured.
+ */
+static ckh_t bt2gctx;
+static malloc_mutex_t bt2gctx_mtx;
+
+/*
+ * Tree of all extant prof_tdata_t structures, regardless of state,
+ * {attached,detached,expired}.
+ */
+static prof_tdata_tree_t tdatas;
+static malloc_mutex_t tdatas_mtx;
+
+static uint64_t next_thr_uid;
+static malloc_mutex_t next_thr_uid_mtx;
+
+static malloc_mutex_t prof_dump_seq_mtx;
+static uint64_t prof_dump_seq;
+static uint64_t prof_dump_iseq;
+static uint64_t prof_dump_mseq;
+static uint64_t prof_dump_useq;
+
+/*
+ * This buffer is rather large for stack allocation, so use a single buffer for
+ * all profile dumps.
+ */
+static malloc_mutex_t prof_dump_mtx;
+static char prof_dump_buf[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PROF_DUMP_BUFSIZE
+#else
+ 1
+#endif
+];
+static size_t prof_dump_buf_end;
+static int prof_dump_fd;
+
+/* Do not dump any profiles until bootstrapping is complete. */
+static bool prof_booted = false;
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
+static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
+static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
+ bool even_if_attached);
+static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached);
+static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
+
+/******************************************************************************/
+/* Red-black trees. */
+
+JEMALLOC_INLINE_C int
+prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
+{
+ uint64_t a_thr_uid = a->thr_uid;
+ uint64_t b_thr_uid = b->thr_uid;
+ int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
+ if (ret == 0) {
+ uint64_t a_thr_discrim = a->thr_discrim;
+ uint64_t b_thr_discrim = b->thr_discrim;
+ ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
+ b_thr_discrim);
+ if (ret == 0) {
+ uint64_t a_tctx_uid = a->tctx_uid;
+ uint64_t b_tctx_uid = b->tctx_uid;
+ ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
+ b_tctx_uid);
+ }
+ }
+ return (ret);
+}
+
+rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
+ tctx_link, prof_tctx_comp)
+
+JEMALLOC_INLINE_C int
+prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
+{
+ unsigned a_len = a->bt.len;
+ unsigned b_len = b->bt.len;
+ unsigned comp_len = (a_len < b_len) ? a_len : b_len;
+ int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
+ if (ret == 0)
+ ret = (a_len > b_len) - (a_len < b_len);
+ return (ret);
+}
+
+rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
+ prof_gctx_comp)
+
+JEMALLOC_INLINE_C int
+prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
+{
+ int ret;
+ uint64_t a_uid = a->thr_uid;
+ uint64_t b_uid = b->thr_uid;
+
+ ret = ((a_uid > b_uid) - (a_uid < b_uid));
+ if (ret == 0) {
+ uint64_t a_discrim = a->thr_discrim;
+ uint64_t b_discrim = b->thr_discrim;
+
+ ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
+ }
+ return (ret);
+}
+
+rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
+ prof_tdata_comp)
+
+/******************************************************************************/
+
+void
+prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
+{
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ if (updated) {
+ /*
+ * Compute a new sample threshold. This isn't very important in
+ * practice, because this function is rarely executed, so the
+ * potential for sample bias is minimal except in contrived
+ * programs.
+ */
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata != NULL)
+ prof_sample_threshold_update(tdata);
+ }
+
+ if ((uintptr_t)tctx > (uintptr_t)1U) {
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ tctx->prepared = false;
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
+ prof_tctx_destroy(tsd, tctx);
+ else
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ }
+}
+
+void
+prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx)
+{
+
+ prof_tctx_set(tsdn, ptr, usize, tctx);
+
+ malloc_mutex_lock(tsdn, tctx->tdata->lock);
+ tctx->cnts.curobjs++;
+ tctx->cnts.curbytes += usize;
+ if (opt_prof_accum) {
+ tctx->cnts.accumobjs++;
+ tctx->cnts.accumbytes += usize;
+ }
+ tctx->prepared = false;
+ malloc_mutex_unlock(tsdn, tctx->tdata->lock);
+}
+
+void
+prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
+{
+
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ assert(tctx->cnts.curobjs > 0);
+ assert(tctx->cnts.curbytes >= usize);
+ tctx->cnts.curobjs--;
+ tctx->cnts.curbytes -= usize;
+
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
+ prof_tctx_destroy(tsd, tctx);
+ else
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+}
+
+void
+bt_init(prof_bt_t *bt, void **vec)
+{
+
+ cassert(config_prof);
+
+ bt->vec = vec;
+ bt->len = 0;
+}
+
+JEMALLOC_INLINE_C void
+prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
+{
+
+ cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
+
+ if (tdata != NULL) {
+ assert(!tdata->enq);
+ tdata->enq = true;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+}
+
+JEMALLOC_INLINE_C void
+prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
+{
+
+ cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+
+ if (tdata != NULL) {
+ bool idump, gdump;
+
+ assert(tdata->enq);
+ tdata->enq = false;
+ idump = tdata->enq_idump;
+ tdata->enq_idump = false;
+ gdump = tdata->enq_gdump;
+ tdata->enq_gdump = false;
+
+ if (idump)
+ prof_idump(tsd_tsdn(tsd));
+ if (gdump)
+ prof_gdump(tsd_tsdn(tsd));
+ }
+}
+
+#ifdef JEMALLOC_PROF_LIBUNWIND
+void
+prof_backtrace(prof_bt_t *bt)
+{
+ int nframes;
+
+ cassert(config_prof);
+ assert(bt->len == 0);
+ assert(bt->vec != NULL);
+
+ nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
+ if (nframes <= 0)
+ return;
+ bt->len = nframes;
+}
+#elif (defined(JEMALLOC_PROF_LIBGCC))
+static _Unwind_Reason_Code
+prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
+{
+
+ cassert(config_prof);
+
+ return (_URC_NO_REASON);
+}
+
+static _Unwind_Reason_Code
+prof_unwind_callback(struct _Unwind_Context *context, void *arg)
+{
+ prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
+ void *ip;
+
+ cassert(config_prof);
+
+ ip = (void *)_Unwind_GetIP(context);
+ if (ip == NULL)
+ return (_URC_END_OF_STACK);
+ data->bt->vec[data->bt->len] = ip;
+ data->bt->len++;
+ if (data->bt->len == data->max)
+ return (_URC_END_OF_STACK);
+
+ return (_URC_NO_REASON);
+}
+
+void
+prof_backtrace(prof_bt_t *bt)
+{
+ prof_unwind_data_t data = {bt, PROF_BT_MAX};
+
+ cassert(config_prof);
+
+ _Unwind_Backtrace(prof_unwind_callback, &data);
+}
+#elif (defined(JEMALLOC_PROF_GCC))
+void
+prof_backtrace(prof_bt_t *bt)
+{
+#define BT_FRAME(i) \
+ if ((i) < PROF_BT_MAX) { \
+ void *p; \
+ if (__builtin_frame_address(i) == 0) \
+ return; \
+ p = __builtin_return_address(i); \
+ if (p == NULL) \
+ return; \
+ bt->vec[(i)] = p; \
+ bt->len = (i) + 1; \
+ } else \
+ return;
+
+ cassert(config_prof);
+
+ BT_FRAME(0)
+ BT_FRAME(1)
+ BT_FRAME(2)
+ BT_FRAME(3)
+ BT_FRAME(4)
+ BT_FRAME(5)
+ BT_FRAME(6)
+ BT_FRAME(7)
+ BT_FRAME(8)
+ BT_FRAME(9)
+
+ BT_FRAME(10)
+ BT_FRAME(11)
+ BT_FRAME(12)
+ BT_FRAME(13)
+ BT_FRAME(14)
+ BT_FRAME(15)
+ BT_FRAME(16)
+ BT_FRAME(17)
+ BT_FRAME(18)
+ BT_FRAME(19)
+
+ BT_FRAME(20)
+ BT_FRAME(21)
+ BT_FRAME(22)
+ BT_FRAME(23)
+ BT_FRAME(24)
+ BT_FRAME(25)
+ BT_FRAME(26)
+ BT_FRAME(27)
+ BT_FRAME(28)
+ BT_FRAME(29)
+
+ BT_FRAME(30)
+ BT_FRAME(31)
+ BT_FRAME(32)
+ BT_FRAME(33)
+ BT_FRAME(34)
+ BT_FRAME(35)
+ BT_FRAME(36)
+ BT_FRAME(37)
+ BT_FRAME(38)
+ BT_FRAME(39)
+
+ BT_FRAME(40)
+ BT_FRAME(41)
+ BT_FRAME(42)
+ BT_FRAME(43)
+ BT_FRAME(44)
+ BT_FRAME(45)
+ BT_FRAME(46)
+ BT_FRAME(47)
+ BT_FRAME(48)
+ BT_FRAME(49)
+
+ BT_FRAME(50)
+ BT_FRAME(51)
+ BT_FRAME(52)
+ BT_FRAME(53)
+ BT_FRAME(54)
+ BT_FRAME(55)
+ BT_FRAME(56)
+ BT_FRAME(57)
+ BT_FRAME(58)
+ BT_FRAME(59)
+
+ BT_FRAME(60)
+ BT_FRAME(61)
+ BT_FRAME(62)
+ BT_FRAME(63)
+ BT_FRAME(64)
+ BT_FRAME(65)
+ BT_FRAME(66)
+ BT_FRAME(67)
+ BT_FRAME(68)
+ BT_FRAME(69)
+
+ BT_FRAME(70)
+ BT_FRAME(71)
+ BT_FRAME(72)
+ BT_FRAME(73)
+ BT_FRAME(74)
+ BT_FRAME(75)
+ BT_FRAME(76)
+ BT_FRAME(77)
+ BT_FRAME(78)
+ BT_FRAME(79)
+
+ BT_FRAME(80)
+ BT_FRAME(81)
+ BT_FRAME(82)
+ BT_FRAME(83)
+ BT_FRAME(84)
+ BT_FRAME(85)
+ BT_FRAME(86)
+ BT_FRAME(87)
+ BT_FRAME(88)
+ BT_FRAME(89)
+
+ BT_FRAME(90)
+ BT_FRAME(91)
+ BT_FRAME(92)
+ BT_FRAME(93)
+ BT_FRAME(94)
+ BT_FRAME(95)
+ BT_FRAME(96)
+ BT_FRAME(97)
+ BT_FRAME(98)
+ BT_FRAME(99)
+
+ BT_FRAME(100)
+ BT_FRAME(101)
+ BT_FRAME(102)
+ BT_FRAME(103)
+ BT_FRAME(104)
+ BT_FRAME(105)
+ BT_FRAME(106)
+ BT_FRAME(107)
+ BT_FRAME(108)
+ BT_FRAME(109)
+
+ BT_FRAME(110)
+ BT_FRAME(111)
+ BT_FRAME(112)
+ BT_FRAME(113)
+ BT_FRAME(114)
+ BT_FRAME(115)
+ BT_FRAME(116)
+ BT_FRAME(117)
+ BT_FRAME(118)
+ BT_FRAME(119)
+
+ BT_FRAME(120)
+ BT_FRAME(121)
+ BT_FRAME(122)
+ BT_FRAME(123)
+ BT_FRAME(124)
+ BT_FRAME(125)
+ BT_FRAME(126)
+ BT_FRAME(127)
+#undef BT_FRAME
+}
+#else
+void
+prof_backtrace(prof_bt_t *bt)
+{
+
+ cassert(config_prof);
+ not_reached();
+}
+#endif
+
+static malloc_mutex_t *
+prof_gctx_mutex_choose(void)
+{
+ unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
+
+ return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
+}
+
+static malloc_mutex_t *
+prof_tdata_mutex_choose(uint64_t thr_uid)
+{
+
+ return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
+}
+
+static prof_gctx_t *
+prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
+{
+ /*
+ * Create a single allocation that has space for vec of length bt->len.
+ */
+ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
+ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
+ size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
+ true);
+ if (gctx == NULL)
+ return (NULL);
+ gctx->lock = prof_gctx_mutex_choose();
+ /*
+ * Set nlimbo to 1, in order to avoid a race condition with
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
+ */
+ gctx->nlimbo = 1;
+ tctx_tree_new(&gctx->tctxs);
+ /* Duplicate bt. */
+ memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
+ gctx->bt.vec = gctx->vec;
+ gctx->bt.len = bt->len;
+ return (gctx);
+}
+
+static void
+prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
+ prof_tdata_t *tdata)
+{
+
+ cassert(config_prof);
+
+ /*
+ * Check that gctx is still unused by any thread cache before destroying
+ * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
+ * condition with this function, as does prof_tctx_destroy() in order to
+ * avoid a race between the main body of prof_tctx_destroy() and entry
+ * into this function.
+ */
+ prof_enter(tsd, tdata_self);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ assert(gctx->nlimbo != 0);
+ if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
+ /* Remove gctx from bt2gctx. */
+ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
+ not_reached();
+ prof_leave(tsd, tdata_self);
+ /* Destroy gctx. */
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true);
+ } else {
+ /*
+ * Compensate for increment in prof_tctx_destroy() or
+ * prof_lookup().
+ */
+ gctx->nlimbo--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ prof_leave(tsd, tdata_self);
+ }
+}
+
+static bool
+prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx)
+{
+
+ malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
+
+ if (opt_prof_accum)
+ return (false);
+ if (tctx->cnts.curobjs != 0)
+ return (false);
+ if (tctx->prepared)
+ return (false);
+ return (true);
+}
+
+static bool
+prof_gctx_should_destroy(prof_gctx_t *gctx)
+{
+
+ if (opt_prof_accum)
+ return (false);
+ if (!tctx_tree_empty(&gctx->tctxs))
+ return (false);
+ if (gctx->nlimbo != 0)
+ return (false);
+ return (true);
+}
+
+static void
+prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
+{
+ prof_tdata_t *tdata = tctx->tdata;
+ prof_gctx_t *gctx = tctx->gctx;
+ bool destroy_tdata, destroy_tctx, destroy_gctx;
+
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ assert(tctx->cnts.curobjs == 0);
+ assert(tctx->cnts.curbytes == 0);
+ assert(!opt_prof_accum);
+ assert(tctx->cnts.accumobjs == 0);
+ assert(tctx->cnts.accumbytes == 0);
+
+ ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
+ destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ tctx_tree_remove(&gctx->tctxs, tctx);
+ destroy_tctx = true;
+ if (prof_gctx_should_destroy(gctx)) {
+ /*
+ * Increment gctx->nlimbo in order to keep another
+ * thread from winning the race to destroy gctx while
+ * this one has gctx->lock dropped. Without this, it
+ * would be possible for another thread to:
+ *
+ * 1) Sample an allocation associated with gctx.
+ * 2) Deallocate the sampled object.
+ * 3) Successfully prof_gctx_try_destroy(gctx).
+ *
+ * The result would be that gctx no longer exists by the
+ * time this thread accesses it in
+ * prof_gctx_try_destroy().
+ */
+ gctx->nlimbo++;
+ destroy_gctx = true;
+ } else
+ destroy_gctx = false;
+ break;
+ case prof_tctx_state_dumping:
+ /*
+ * A dumping thread needs tctx to remain valid until dumping
+ * has finished. Change state such that the dumping thread will
+ * complete destruction during a late dump iteration phase.
+ */
+ tctx->state = prof_tctx_state_purgatory;
+ destroy_tctx = false;
+ destroy_gctx = false;
+ break;
+ default:
+ not_reached();
+ destroy_tctx = false;
+ destroy_gctx = false;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ if (destroy_gctx) {
+ prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
+ tdata);
+ }
+
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ if (destroy_tdata)
+ prof_tdata_destroy(tsd, tdata, false);
+
+ if (destroy_tctx)
+ idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true);
+}
+
+static bool
+prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
+ void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
+{
+ union {
+ prof_gctx_t *p;
+ void *v;
+ } gctx;
+ union {
+ prof_bt_t *p;
+ void *v;
+ } btkey;
+ bool new_gctx;
+
+ prof_enter(tsd, tdata);
+ if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
+ /* bt has never been seen before. Insert it. */
+ gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
+ if (gctx.v == NULL) {
+ prof_leave(tsd, tdata);
+ return (true);
+ }
+ btkey.p = &gctx.p->bt;
+ if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
+ /* OOM. */
+ prof_leave(tsd, tdata);
+ idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true);
+ return (true);
+ }
+ new_gctx = true;
+ } else {
+ /*
+ * Increment nlimbo, in order to avoid a race condition with
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
+ */
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
+ gctx.p->nlimbo++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
+ new_gctx = false;
+ }
+ prof_leave(tsd, tdata);
+
+ *p_btkey = btkey.v;
+ *p_gctx = gctx.p;
+ *p_new_gctx = new_gctx;
+ return (false);
+}
+
+prof_tctx_t *
+prof_lookup(tsd_t *tsd, prof_bt_t *bt)
+{
+ union {
+ prof_tctx_t *p;
+ void *v;
+ } ret;
+ prof_tdata_t *tdata;
+ bool not_found;
+
+ cassert(config_prof);
+
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
+ return (NULL);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
+ if (!not_found) /* Note double negative! */
+ ret.p->prepared = true;
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (not_found) {
+ void *btkey;
+ prof_gctx_t *gctx;
+ bool new_gctx, error;
+
+ /*
+ * This thread's cache lacks bt. Look for it in the global
+ * cache.
+ */
+ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
+ &new_gctx))
+ return (NULL);
+
+ /* Link a prof_tctx_t into gctx for this thread. */
+ ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
+ size2index(sizeof(prof_tctx_t)), false, NULL, true,
+ arena_ichoose(tsd, NULL), true);
+ if (ret.p == NULL) {
+ if (new_gctx)
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ return (NULL);
+ }
+ ret.p->tdata = tdata;
+ ret.p->thr_uid = tdata->thr_uid;
+ ret.p->thr_discrim = tdata->thr_discrim;
+ memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
+ ret.p->gctx = gctx;
+ ret.p->tctx_uid = tdata->tctx_uid_next++;
+ ret.p->prepared = true;
+ ret.p->state = prof_tctx_state_initializing;
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (error) {
+ if (new_gctx)
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true);
+ return (NULL);
+ }
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ ret.p->state = prof_tctx_state_nominal;
+ tctx_tree_insert(&gctx->tctxs, ret.p);
+ gctx->nlimbo--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ }
+
+ return (ret.p);
+}
+
+/*
+ * The bodies of this function and prof_leakcheck() are compiled out unless heap
+ * profiling is enabled, so that it is possible to compile jemalloc with
+ * floating point support completely disabled. Avoiding floating point code is
+ * important on memory-constrained systems, but it also enables a workaround for
+ * versions of glibc that don't properly save/restore floating point registers
+ * during dynamic lazy symbol loading (which internally calls into whatever
+ * malloc implementation happens to be integrated into the application). Note
+ * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
+ * memory moves, so jemalloc must be compiled with such optimizations disabled
+ * (e.g.
+ * -mno-sse) in order for the workaround to be complete.
+ */
+void
+prof_sample_threshold_update(prof_tdata_t *tdata)
+{
+#ifdef JEMALLOC_PROF
+ uint64_t r;
+ double u;
+
+ if (!config_prof)
+ return;
+
+ if (lg_prof_sample == 0) {
+ tdata->bytes_until_sample = 0;
+ return;
+ }
+
+ /*
+ * Compute sample interval as a geometrically distributed random
+ * variable with mean (2^lg_prof_sample).
+ *
+ * __ __
+ * | log(u) | 1
+ * tdata->bytes_until_sample = | -------- |, where p = ---------------
+ * | log(1-p) | lg_prof_sample
+ * 2
+ *
+ * For more information on the math, see:
+ *
+ * Non-Uniform Random Variate Generation
+ * Luc Devroye
+ * Springer-Verlag, New York, 1986
+ * pp 500
+ * (http://luc.devroye.org/rnbookindex.html)
+ */
+ r = prng_lg_range_u64(&tdata->prng_state, 53);
+ u = (double)r * (1.0/9007199254740992.0L);
+ tdata->bytes_until_sample = (uint64_t)(log(u) /
+ log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ + (uint64_t)1U;
+#endif
+}
+
+#ifdef JEMALLOC_JET
+static prof_tdata_t *
+prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+ size_t *tdata_count = (size_t *)arg;
+
+ (*tdata_count)++;
+
+ return (NULL);
+}
+
+size_t
+prof_tdata_count(void)
+{
+ size_t tdata_count = 0;
+ tsdn_t *tsdn;
+
+ tsdn = tsdn_fetch();
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
+ (void *)&tdata_count);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
+
+ return (tdata_count);
+}
+#endif
+
+#ifdef JEMALLOC_JET
+size_t
+prof_bt_count(void)
+{
+ size_t bt_count;
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
+ return (0);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ bt_count = ckh_count(&bt2gctx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+
+ return (bt_count);
+}
+#endif
+
+#ifdef JEMALLOC_JET
+#undef prof_dump_open
+#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
+#endif
+static int
+prof_dump_open(bool propagate_err, const char *filename)
+{
+ int fd;
+
+ fd = creat(filename, 0644);
+ if (fd == -1 && !propagate_err) {
+ malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
+ filename);
+ if (opt_abort)
+ abort();
+ }
+
+ return (fd);
+}
+#ifdef JEMALLOC_JET
+#undef prof_dump_open
+#define prof_dump_open JEMALLOC_N(prof_dump_open)
+prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
+#endif
+
+static bool
+prof_dump_flush(bool propagate_err)
+{
+ bool ret = false;
+ ssize_t err;
+
+ cassert(config_prof);
+
+ err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
+ if (err == -1) {
+ if (!propagate_err) {
+ malloc_write("<jemalloc>: write() failed during heap "
+ "profile flush\n");
+ if (opt_abort)
+ abort();
+ }
+ ret = true;
+ }
+ prof_dump_buf_end = 0;
+
+ return (ret);
+}
+
+static bool
+prof_dump_close(bool propagate_err)
+{
+ bool ret;
+
+ assert(prof_dump_fd != -1);
+ ret = prof_dump_flush(propagate_err);
+ close(prof_dump_fd);
+ prof_dump_fd = -1;
+
+ return (ret);
+}
+
+static bool
+prof_dump_write(bool propagate_err, const char *s)
+{
+ size_t i, slen, n;
+
+ cassert(config_prof);
+
+ i = 0;
+ slen = strlen(s);
+ while (i < slen) {
+ /* Flush the buffer if it is full. */
+ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
+ if (prof_dump_flush(propagate_err) && propagate_err)
+ return (true);
+
+ if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
+ /* Finish writing. */
+ n = slen - i;
+ } else {
+ /* Write as much of s as will fit. */
+ n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
+ }
+ memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
+ prof_dump_buf_end += n;
+ i += n;
+ }
+
+ return (false);
+}
+
+JEMALLOC_FORMAT_PRINTF(2, 3)
+static bool
+prof_dump_printf(bool propagate_err, const char *format, ...)
+{
+ bool ret;
+ va_list ap;
+ char buf[PROF_PRINTF_BUFSIZE];
+
+ va_start(ap, format);
+ malloc_vsnprintf(buf, sizeof(buf), format, ap);
+ va_end(ap);
+ ret = prof_dump_write(propagate_err, buf);
+
+ return (ret);
+}
+
+static void
+prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
+{
+
+ malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
+
+ malloc_mutex_lock(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+ return;
+ case prof_tctx_state_nominal:
+ tctx->state = prof_tctx_state_dumping;
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+
+ memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
+
+ tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+ if (opt_prof_accum) {
+ tdata->cnt_summed.accumobjs +=
+ tctx->dump_cnts.accumobjs;
+ tdata->cnt_summed.accumbytes +=
+ tctx->dump_cnts.accumbytes;
+ }
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ not_reached();
+ }
+}
+
+static void
+prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
+{
+
+ malloc_mutex_assert_owner(tsdn, gctx->lock);
+
+ gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+ if (opt_prof_accum) {
+ gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
+ gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
+ }
+}
+
+static prof_tctx_t *
+prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
+{
+ tsdn_t *tsdn = (tsdn_t *)arg;
+
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
+ break;
+ default:
+ not_reached();
+ }
+
+ return (NULL);
+}
+
+struct prof_tctx_dump_iter_arg_s {
+ tsdn_t *tsdn;
+ bool propagate_err;
+};
+
+static prof_tctx_t *
+prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
+{
+ struct prof_tctx_dump_iter_arg_s *arg =
+ (struct prof_tctx_dump_iter_arg_s *)opaque;
+
+ malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ case prof_tctx_state_nominal:
+ /* Not captured by this dump. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ if (prof_dump_printf(arg->propagate_err,
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
+ "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
+ tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
+ tctx->dump_cnts.accumbytes))
+ return (tctx);
+ break;
+ default:
+ not_reached();
+ }
+ return (NULL);
+}
+
+static prof_tctx_t *
+prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
+{
+ tsdn_t *tsdn = (tsdn_t *)arg;
+ prof_tctx_t *ret;
+
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ tctx->state = prof_tctx_state_nominal;
+ break;
+ case prof_tctx_state_purgatory:
+ ret = tctx;
+ goto label_return;
+ default:
+ not_reached();
+ }
+
+ ret = NULL;
+label_return:
+ return (ret);
+}
+
+static void
+prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
+{
+
+ cassert(config_prof);
+
+ malloc_mutex_lock(tsdn, gctx->lock);
+
+ /*
+ * Increment nlimbo so that gctx won't go away before dump.
+ * Additionally, link gctx into the dump list so that it is included in
+ * prof_dump()'s second pass.
+ */
+ gctx->nlimbo++;
+ gctx_tree_insert(gctxs, gctx);
+
+ memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
+
+ malloc_mutex_unlock(tsdn, gctx->lock);
+}
+
+struct prof_gctx_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ size_t leak_ngctx;
+};
+
+static prof_gctx_t *
+prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
+{
+ struct prof_gctx_merge_iter_arg_s *arg =
+ (struct prof_gctx_merge_iter_arg_s *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
+ tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
+ (void *)arg->tsdn);
+ if (gctx->cnt_summed.curobjs != 0)
+ arg->leak_ngctx++;
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
+
+ return (NULL);
+}
+
+static void
+prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
+{
+ prof_tdata_t *tdata = prof_tdata_get(tsd, false);
+ prof_gctx_t *gctx;
+
+ /*
+ * Standard tree iteration won't work here, because as soon as we
+ * decrement gctx->nlimbo and unlock gctx, another thread can
+ * concurrently destroy it, which will corrupt the tree. Therefore,
+ * tear down the tree one node at a time during iteration.
+ */
+ while ((gctx = gctx_tree_first(gctxs)) != NULL) {
+ gctx_tree_remove(gctxs, gctx);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ {
+ prof_tctx_t *next;
+
+ next = NULL;
+ do {
+ prof_tctx_t *to_destroy =
+ tctx_tree_iter(&gctx->tctxs, next,
+ prof_tctx_finish_iter,
+ (void *)tsd_tsdn(tsd));
+ if (to_destroy != NULL) {
+ next = tctx_tree_next(&gctx->tctxs,
+ to_destroy);
+ tctx_tree_remove(&gctx->tctxs,
+ to_destroy);
+ idalloctm(tsd_tsdn(tsd), to_destroy,
+ NULL, true, true);
+ } else
+ next = NULL;
+ } while (next != NULL);
+ }
+ gctx->nlimbo--;
+ if (prof_gctx_should_destroy(gctx)) {
+ gctx->nlimbo++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ } else
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ }
+}
+
+struct prof_tdata_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ prof_cnt_t cnt_all;
+};
+
+static prof_tdata_t *
+prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *opaque)
+{
+ struct prof_tdata_merge_iter_arg_s *arg =
+ (struct prof_tdata_merge_iter_arg_s *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, tdata->lock);
+ if (!tdata->expired) {
+ size_t tabind;
+ union {
+ prof_tctx_t *p;
+ void *v;
+ } tctx;
+
+ tdata->dumping = true;
+ memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
+ for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
+ &tctx.v);)
+ prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
+
+ arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
+ arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
+ if (opt_prof_accum) {
+ arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
+ arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
+ }
+ } else
+ tdata->dumping = false;
+ malloc_mutex_unlock(arg->tsdn, tdata->lock);
+
+ return (NULL);
+}
+
+static prof_tdata_t *
+prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+ bool propagate_err = *(bool *)arg;
+
+ if (!tdata->dumping)
+ return (NULL);
+
+ if (prof_dump_printf(propagate_err,
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
+ tdata->thr_uid, tdata->cnt_summed.curobjs,
+ tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
+ tdata->cnt_summed.accumbytes,
+ (tdata->thread_name != NULL) ? " " : "",
+ (tdata->thread_name != NULL) ? tdata->thread_name : ""))
+ return (tdata);
+ return (NULL);
+}
+
+#ifdef JEMALLOC_JET
+#undef prof_dump_header
+#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
+#endif
+static bool
+prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
+{
+ bool ret;
+
+ if (prof_dump_printf(propagate_err,
+ "heap_v2/%"FMTu64"\n"
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
+ ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
+ cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
+ return (true);
+
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
+ ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
+ (void *)&propagate_err) != NULL);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
+ return (ret);
+}
+#ifdef JEMALLOC_JET
+#undef prof_dump_header
+#define prof_dump_header JEMALLOC_N(prof_dump_header)
+prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
+#endif
+
+static bool
+prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
+ const prof_bt_t *bt, prof_gctx_tree_t *gctxs)
+{
+ bool ret;
+ unsigned i;
+ struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
+
+ cassert(config_prof);
+ malloc_mutex_assert_owner(tsdn, gctx->lock);
+
+ /* Avoid dumping such gctx's that have no useful data. */
+ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
+ (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
+ assert(gctx->cnt_summed.curobjs == 0);
+ assert(gctx->cnt_summed.curbytes == 0);
+ assert(gctx->cnt_summed.accumobjs == 0);
+ assert(gctx->cnt_summed.accumbytes == 0);
+ ret = false;
+ goto label_return;
+ }
+
+ if (prof_dump_printf(propagate_err, "@")) {
+ ret = true;
+ goto label_return;
+ }
+ for (i = 0; i < bt->len; i++) {
+ if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
+ (uintptr_t)bt->vec[i])) {
+ ret = true;
+ goto label_return;
+ }
+ }
+
+ if (prof_dump_printf(propagate_err,
+ "\n"
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
+ gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
+ gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
+ ret = true;
+ goto label_return;
+ }
+
+ prof_tctx_dump_iter_arg.tsdn = tsdn;
+ prof_tctx_dump_iter_arg.propagate_err = propagate_err;
+ if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
+ (void *)&prof_tctx_dump_iter_arg) != NULL) {
+ ret = true;
+ goto label_return;
+ }
+
+ ret = false;
+label_return:
+ return (ret);
+}
+
+#ifndef _WIN32
+JEMALLOC_FORMAT_PRINTF(1, 2)
+static int
+prof_open_maps(const char *format, ...)
+{
+ int mfd;
+ va_list ap;
+ char filename[PATH_MAX + 1];
+
+ va_start(ap, format);
+ malloc_vsnprintf(filename, sizeof(filename), format, ap);
+ va_end(ap);
+ mfd = open(filename, O_RDONLY);
+
+ return (mfd);
+}
+#endif
+
+static int
+prof_getpid(void)
+{
+
+#ifdef _WIN32
+ return (GetCurrentProcessId());
+#else
+ return (getpid());
+#endif
+}
+
+static bool
+prof_dump_maps(bool propagate_err)
+{
+ bool ret;
+ int mfd;
+
+ cassert(config_prof);
+#ifdef __FreeBSD__
+ mfd = prof_open_maps("/proc/curproc/map");
+#elif defined(_WIN32)
+ mfd = -1; // Not implemented
+#else
+ {
+ int pid = prof_getpid();
+
+ mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
+ if (mfd == -1)
+ mfd = prof_open_maps("/proc/%d/maps", pid);
+ }
+#endif
+ if (mfd != -1) {
+ ssize_t nread;
+
+ if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
+ propagate_err) {
+ ret = true;
+ goto label_return;
+ }
+ nread = 0;
+ do {
+ prof_dump_buf_end += nread;
+ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
+ /* Make space in prof_dump_buf before read(). */
+ if (prof_dump_flush(propagate_err) &&
+ propagate_err) {
+ ret = true;
+ goto label_return;
+ }
+ }
+ nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
+ PROF_DUMP_BUFSIZE - prof_dump_buf_end);
+ } while (nread > 0);
+ } else {
+ ret = true;
+ goto label_return;
+ }
+
+ ret = false;
+label_return:
+ if (mfd != -1)
+ close(mfd);
+ return (ret);
+}
+
+/*
+ * See prof_sample_threshold_update() comment for why the body of this function
+ * is conditionally compiled.
+ */
+static void
+prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
+ const char *filename)
+{
+
+#ifdef JEMALLOC_PROF
+ /*
+ * Scaling is equivalent AdjustSamples() in jeprof, but the result may
+ * differ slightly from what jeprof reports, because here we scale the
+ * summary values, whereas jeprof scales each context individually and
+ * reports the sums of the scaled values.
+ */
+ if (cnt_all->curbytes != 0) {
+ double sample_period = (double)((uint64_t)1 << lg_prof_sample);
+ double ratio = (((double)cnt_all->curbytes) /
+ (double)cnt_all->curobjs) / sample_period;
+ double scale_factor = 1.0 / (1.0 - exp(-ratio));
+ uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
+ * scale_factor);
+ uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
+ scale_factor);
+
+ malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
+ " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
+ curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
+ 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
+ malloc_printf(
+ "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
+ filename);
+ }
+#endif
+}
+
+struct prof_gctx_dump_iter_arg_s {
+ tsdn_t *tsdn;
+ bool propagate_err;
+};
+
+static prof_gctx_t *
+prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
+{
+ prof_gctx_t *ret;
+ struct prof_gctx_dump_iter_arg_s *arg =
+ (struct prof_gctx_dump_iter_arg_s *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
+
+ if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
+ gctxs)) {
+ ret = gctx;
+ goto label_return;
+ }
+
+ ret = NULL;
+label_return:
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
+ return (ret);
+}
+
+static bool
+prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
+{
+ prof_tdata_t *tdata;
+ struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
+ size_t tabind;
+ union {
+ prof_gctx_t *p;
+ void *v;
+ } gctx;
+ struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
+ struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
+ prof_gctx_tree_t gctxs;
+
+ cassert(config_prof);
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (true);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+ prof_enter(tsd, tdata);
+
+ /*
+ * Put gctx's in limbo and clear their counters in preparation for
+ * summing.
+ */
+ gctx_tree_new(&gctxs);
+ for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
+ prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs);
+
+ /*
+ * Iterate over tdatas, and for the non-expired ones snapshot their tctx
+ * stats and merge them into the associated gctx's.
+ */
+ prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd);
+ memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t));
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
+ (void *)&prof_tdata_merge_iter_arg);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ /* Merge tctx stats into gctx's. */
+ prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd);
+ prof_gctx_merge_iter_arg.leak_ngctx = 0;
+ gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter,
+ (void *)&prof_gctx_merge_iter_arg);
+
+ prof_leave(tsd, tdata);
+
+ /* Create dump file. */
+ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
+ goto label_open_close_error;
+
+ /* Dump profile header. */
+ if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
+ &prof_tdata_merge_iter_arg.cnt_all))
+ goto label_write_error;
+
+ /* Dump per gctx profile stats. */
+ prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd);
+ prof_gctx_dump_iter_arg.propagate_err = propagate_err;
+ if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
+ (void *)&prof_gctx_dump_iter_arg) != NULL)
+ goto label_write_error;
+
+ /* Dump /proc/<pid>/maps if possible. */
+ if (prof_dump_maps(propagate_err))
+ goto label_write_error;
+
+ if (prof_dump_close(propagate_err))
+ goto label_open_close_error;
+
+ prof_gctx_finish(tsd, &gctxs);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+
+ if (leakcheck) {
+ prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
+ prof_gctx_merge_iter_arg.leak_ngctx, filename);
+ }
+ return (false);
+label_write_error:
+ prof_dump_close(propagate_err);
+label_open_close_error:
+ prof_gctx_finish(tsd, &gctxs);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ return (true);
+}
+
+#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
+#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
+static void
+prof_dump_filename(char *filename, char v, uint64_t vseq)
+{
+
+ cassert(config_prof);
+
+ if (vseq != VSEQ_INVALID) {
+ /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
+ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+ "%s.%d.%"FMTu64".%c%"FMTu64".heap",
+ opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
+ } else {
+ /* "<prefix>.<pid>.<seq>.<v>.heap" */
+ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+ "%s.%d.%"FMTu64".%c.heap",
+ opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
+ }
+ prof_dump_seq++;
+}
+
+static void
+prof_fdump(void)
+{
+ tsd_t *tsd;
+ char filename[DUMP_FILENAME_BUFSIZE];
+
+ cassert(config_prof);
+ assert(opt_prof_final);
+ assert(opt_prof_prefix[0] != '\0');
+
+ if (!prof_booted)
+ return;
+ tsd = tsd_fetch();
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'f', VSEQ_INVALID);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, opt_prof_leak);
+}
+
+void
+prof_idump(tsdn_t *tsdn)
+{
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ if (!prof_booted || tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
+ return;
+ if (tdata->enq) {
+ tdata->enq_idump = true;
+ return;
+ }
+
+ if (opt_prof_prefix[0] != '\0') {
+ char filename[PATH_MAX + 1];
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'i', prof_dump_iseq);
+ prof_dump_iseq++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, false);
+ }
+}
+
+bool
+prof_mdump(tsd_t *tsd, const char *filename)
+{
+ char filename_buf[DUMP_FILENAME_BUFSIZE];
+
+ cassert(config_prof);
+
+ if (!opt_prof || !prof_booted)
+ return (true);
+
+ if (filename == NULL) {
+ /* No filename specified, so automatically generate one. */
+ if (opt_prof_prefix[0] == '\0')
+ return (true);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
+ prof_dump_mseq++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ filename = filename_buf;
+ }
+ return (prof_dump(tsd, true, filename, false));
+}
+
+void
+prof_gdump(tsdn_t *tsdn)
+{
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ if (!prof_booted || tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL)
+ return;
+ if (tdata->enq) {
+ tdata->enq_gdump = true;
+ return;
+ }
+
+ if (opt_prof_prefix[0] != '\0') {
+ char filename[DUMP_FILENAME_BUFSIZE];
+ malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'u', prof_dump_useq);
+ prof_dump_useq++;
+ malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, false);
+ }
+}
+
+static void
+prof_bt_hash(const void *key, size_t r_hash[2])
+{
+ prof_bt_t *bt = (prof_bt_t *)key;
+
+ cassert(config_prof);
+
+ hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
+}
+
+static bool
+prof_bt_keycomp(const void *k1, const void *k2)
+{
+ const prof_bt_t *bt1 = (prof_bt_t *)k1;
+ const prof_bt_t *bt2 = (prof_bt_t *)k2;
+
+ cassert(config_prof);
+
+ if (bt1->len != bt2->len)
+ return (false);
+ return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
+}
+
+JEMALLOC_INLINE_C uint64_t
+prof_thr_uid_alloc(tsdn_t *tsdn)
+{
+ uint64_t thr_uid;
+
+ malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
+ thr_uid = next_thr_uid;
+ next_thr_uid++;
+ malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
+
+ return (thr_uid);
+}
+
+static prof_tdata_t *
+prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
+ char *thread_name, bool active)
+{
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ /* Initialize an empty cache for this thread. */
+ tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
+ size2index(sizeof(prof_tdata_t)), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+ if (tdata == NULL)
+ return (NULL);
+
+ tdata->lock = prof_tdata_mutex_choose(thr_uid);
+ tdata->thr_uid = thr_uid;
+ tdata->thr_discrim = thr_discrim;
+ tdata->thread_name = thread_name;
+ tdata->attached = true;
+ tdata->expired = false;
+ tdata->tctx_uid_next = 0;
+
+ if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ prof_bt_keycomp)) {
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
+ return (NULL);
+ }
+
+ tdata->prng_state = (uint64_t)(uintptr_t)tdata;
+ prof_sample_threshold_update(tdata);
+
+ tdata->enq = false;
+ tdata->enq_idump = false;
+ tdata->enq_gdump = false;
+
+ tdata->dumping = false;
+ tdata->active = active;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ tdata_tree_insert(&tdatas, tdata);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ return (tdata);
+}
+
+prof_tdata_t *
+prof_tdata_init(tsd_t *tsd)
+{
+
+ return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
+ NULL, prof_thread_active_init_get(tsd_tsdn(tsd))));
+}
+
+static bool
+prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
+{
+
+ if (tdata->attached && !even_if_attached)
+ return (false);
+ if (ckh_count(&tdata->bt2tctx) != 0)
+ return (false);
+ return (true);
+}
+
+static bool
+prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
+ bool even_if_attached)
+{
+
+ malloc_mutex_assert_owner(tsdn, tdata->lock);
+
+ return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
+}
+
+static void
+prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached)
+{
+
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
+
+ tdata_tree_remove(&tdatas, tdata);
+
+ assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
+
+ if (tdata->thread_name != NULL)
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
+ ckh_delete(tsd, &tdata->bt2tctx);
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, true, true);
+}
+
+static void
+prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
+{
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+}
+
+static void
+prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
+{
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ if (tdata->attached) {
+ destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
+ true);
+ /*
+ * Only detach if !destroy_tdata, because detaching would allow
+ * another thread to win the race to destroy tdata.
+ */
+ if (!destroy_tdata)
+ tdata->attached = false;
+ tsd_prof_tdata_set(tsd, NULL);
+ } else
+ destroy_tdata = false;
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (destroy_tdata)
+ prof_tdata_destroy(tsd, tdata, true);
+}
+
+prof_tdata_t *
+prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
+{
+ uint64_t thr_uid = tdata->thr_uid;
+ uint64_t thr_discrim = tdata->thr_discrim + 1;
+ char *thread_name = (tdata->thread_name != NULL) ?
+ prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
+ bool active = tdata->active;
+
+ prof_tdata_detach(tsd, tdata);
+ return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
+ active));
+}
+
+static bool
+prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
+{
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tsdn, tdata->lock);
+ if (!tdata->expired) {
+ tdata->expired = true;
+ destroy_tdata = tdata->attached ? false :
+ prof_tdata_should_destroy(tsdn, tdata, false);
+ } else
+ destroy_tdata = false;
+ malloc_mutex_unlock(tsdn, tdata->lock);
+
+ return (destroy_tdata);
+}
+
+static prof_tdata_t *
+prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+{
+ tsdn_t *tsdn = (tsdn_t *)arg;
+
+ return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
+}
+
+void
+prof_reset(tsd_t *tsd, size_t lg_sample)
+{
+ prof_tdata_t *next;
+
+ assert(lg_sample < (sizeof(uint64_t) << 3));
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ lg_prof_sample = lg_sample;
+
+ next = NULL;
+ do {
+ prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
+ prof_tdata_reset_iter, (void *)tsd);
+ if (to_destroy != NULL) {
+ next = tdata_tree_next(&tdatas, to_destroy);
+ prof_tdata_destroy_locked(tsd, to_destroy, false);
+ } else
+ next = NULL;
+ } while (next != NULL);
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+}
+
+void
+prof_tdata_cleanup(tsd_t *tsd)
+{
+ prof_tdata_t *tdata;
+
+ if (!config_prof)
+ return;
+
+ tdata = tsd_prof_tdata_get(tsd);
+ if (tdata != NULL)
+ prof_tdata_detach(tsd, tdata);
+}
+
+bool
+prof_active_get(tsdn_t *tsdn)
+{
+ bool prof_active_current;
+
+ malloc_mutex_lock(tsdn, &prof_active_mtx);
+ prof_active_current = prof_active;
+ malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ return (prof_active_current);
+}
+
+bool
+prof_active_set(tsdn_t *tsdn, bool active)
+{
+ bool prof_active_old;
+
+ malloc_mutex_lock(tsdn, &prof_active_mtx);
+ prof_active_old = prof_active;
+ prof_active = active;
+ malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ return (prof_active_old);
+}
+
+const char *
+prof_thread_name_get(tsd_t *tsd)
+{
+ prof_tdata_t *tdata;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return ("");
+ return (tdata->thread_name != NULL ? tdata->thread_name : "");
+}
+
+static char *
+prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
+{
+ char *ret;
+ size_t size;
+
+ if (thread_name == NULL)
+ return (NULL);
+
+ size = strlen(thread_name) + 1;
+ if (size == 1)
+ return ("");
+
+ ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+ if (ret == NULL)
+ return (NULL);
+ memcpy(ret, thread_name, size);
+ return (ret);
+}
+
+int
+prof_thread_name_set(tsd_t *tsd, const char *thread_name)
+{
+ prof_tdata_t *tdata;
+ unsigned i;
+ char *s;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (EAGAIN);
+
+ /* Validate input. */
+ if (thread_name == NULL)
+ return (EFAULT);
+ for (i = 0; thread_name[i] != '\0'; i++) {
+ char c = thread_name[i];
+ if (!isgraph(c) && !isblank(c))
+ return (EFAULT);
+ }
+
+ s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
+ if (s == NULL)
+ return (EAGAIN);
+
+ if (tdata->thread_name != NULL) {
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
+ tdata->thread_name = NULL;
+ }
+ if (strlen(s) > 0)
+ tdata->thread_name = s;
+ return (0);
+}
+
+bool
+prof_thread_active_get(tsd_t *tsd)
+{
+ prof_tdata_t *tdata;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (false);
+ return (tdata->active);
+}
+
+bool
+prof_thread_active_set(tsd_t *tsd, bool active)
+{
+ prof_tdata_t *tdata;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL)
+ return (true);
+ tdata->active = active;
+ return (false);
+}
+
+bool
+prof_thread_active_init_get(tsdn_t *tsdn)
+{
+ bool active_init;
+
+ malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
+ active_init = prof_thread_active_init;
+ malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
+ return (active_init);
+}
+
+bool
+prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
+{
+ bool active_init_old;
+
+ malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
+ active_init_old = prof_thread_active_init;
+ prof_thread_active_init = active_init;
+ malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
+ return (active_init_old);
+}
+
+bool
+prof_gdump_get(tsdn_t *tsdn)
+{
+ bool prof_gdump_current;
+
+ malloc_mutex_lock(tsdn, &prof_gdump_mtx);
+ prof_gdump_current = prof_gdump_val;
+ malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
+ return (prof_gdump_current);
+}
+
+bool
+prof_gdump_set(tsdn_t *tsdn, bool gdump)
+{
+ bool prof_gdump_old;
+
+ malloc_mutex_lock(tsdn, &prof_gdump_mtx);
+ prof_gdump_old = prof_gdump_val;
+ prof_gdump_val = gdump;
+ malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
+ return (prof_gdump_old);
+}
+
+void
+prof_boot0(void)
+{
+
+ cassert(config_prof);
+
+ memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
+ sizeof(PROF_PREFIX_DEFAULT));
+}
+
+void
+prof_boot1(void)
+{
+
+ cassert(config_prof);
+
+ /*
+ * opt_prof must be in its final state before any arenas are
+ * initialized, so this function must be executed early.
+ */
+
+ if (opt_prof_leak && !opt_prof) {
+ /*
+ * Enable opt_prof, but in such a way that profiles are never
+ * automatically dumped.
+ */
+ opt_prof = true;
+ opt_prof_gdump = false;
+ } else if (opt_prof) {
+ if (opt_lg_prof_interval >= 0) {
+ prof_interval = (((uint64_t)1U) <<
+ opt_lg_prof_interval);
+ }
+ }
+}
+
+bool
+prof_boot2(tsd_t *tsd)
+{
+
+ cassert(config_prof);
+
+ if (opt_prof) {
+ unsigned i;
+
+ lg_prof_sample = opt_lg_prof_sample;
+
+ prof_active = opt_prof_active;
+ if (malloc_mutex_init(&prof_active_mtx, "prof_active",
+ WITNESS_RANK_PROF_ACTIVE))
+ return (true);
+
+ prof_gdump_val = opt_prof_gdump;
+ if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
+ WITNESS_RANK_PROF_GDUMP))
+ return (true);
+
+ prof_thread_active_init = opt_prof_thread_active_init;
+ if (malloc_mutex_init(&prof_thread_active_init_mtx,
+ "prof_thread_active_init",
+ WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
+ return (true);
+
+ if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ prof_bt_keycomp))
+ return (true);
+ if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
+ WITNESS_RANK_PROF_BT2GCTX))
+ return (true);
+
+ tdata_tree_new(&tdatas);
+ if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
+ WITNESS_RANK_PROF_TDATAS))
+ return (true);
+
+ next_thr_uid = 0;
+ if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
+ WITNESS_RANK_PROF_NEXT_THR_UID))
+ return (true);
+
+ if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
+ WITNESS_RANK_PROF_DUMP_SEQ))
+ return (true);
+ if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
+ WITNESS_RANK_PROF_DUMP))
+ return (true);
+
+ if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
+ atexit(prof_fdump) != 0) {
+ malloc_write("<jemalloc>: Error in atexit()\n");
+ if (opt_abort)
+ abort();
+ }
+
+ gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
+ PROF_NCTX_LOCKS * sizeof(malloc_mutex_t));
+ if (gctx_locks == NULL)
+ return (true);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
+ if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
+ WITNESS_RANK_PROF_GCTX))
+ return (true);
+ }
+
+ tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
+ PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t));
+ if (tdata_locks == NULL)
+ return (true);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
+ WITNESS_RANK_PROF_TDATA))
+ return (true);
+ }
+ }
+
+#ifdef JEMALLOC_PROF_LIBGCC
+ /*
+ * Cause the backtracing machinery to allocate its internal state
+ * before enabling profiling.
+ */
+ _Unwind_Backtrace(prof_unwind_init_callback, NULL);
+#endif
+
+ prof_booted = true;
+
+ return (false);
+}
+
+void
+prof_prefork0(tsdn_t *tsdn)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ malloc_mutex_prefork(tsdn, &prof_dump_mtx);
+ malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
+ malloc_mutex_prefork(tsdn, &tdatas_mtx);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_prefork(tsdn, &tdata_locks[i]);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_prefork(tsdn, &gctx_locks[i]);
+ }
+}
+
+void
+prof_prefork1(tsdn_t *tsdn)
+{
+
+ if (opt_prof) {
+ malloc_mutex_prefork(tsdn, &prof_active_mtx);
+ malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
+ malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
+ }
+}
+
+void
+prof_postfork_parent(tsdn_t *tsdn)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ malloc_mutex_postfork_parent(tsdn,
+ &prof_thread_active_init_mtx);
+ malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
+ malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
+ malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
+ }
+}
+
+void
+prof_postfork_child(tsdn_t *tsdn)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
+ malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
+ malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
+ }
+}
+
+/******************************************************************************/
diff --git a/deps/jemalloc/src/quarantine.c b/deps/jemalloc/src/quarantine.c
new file mode 100644
index 0000000..18903fb
--- /dev/null
+++ b/deps/jemalloc/src/quarantine.c
@@ -0,0 +1,183 @@
+#define JEMALLOC_QUARANTINE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/*
+ * Quarantine pointers close to NULL are used to encode state information that
+ * is used for cleaning up during thread shutdown.
+ */
+#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
+#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2)
+#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
+static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine);
+static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine,
+ size_t upper_bound);
+
+/******************************************************************************/
+
+static quarantine_t *
+quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs)
+{
+ quarantine_t *quarantine;
+ size_t size;
+
+ size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) *
+ sizeof(quarantine_obj_t));
+ quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size),
+ false, NULL, true, arena_get(TSDN_NULL, 0, true), true);
+ if (quarantine == NULL)
+ return (NULL);
+ quarantine->curbytes = 0;
+ quarantine->curobjs = 0;
+ quarantine->first = 0;
+ quarantine->lg_maxobjs = lg_maxobjs;
+
+ return (quarantine);
+}
+
+void
+quarantine_alloc_hook_work(tsd_t *tsd)
+{
+ quarantine_t *quarantine;
+
+ if (!tsd_nominal(tsd))
+ return;
+
+ quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT);
+ /*
+ * Check again whether quarantine has been initialized, because
+ * quarantine_init() may have triggered recursive initialization.
+ */
+ if (tsd_quarantine_get(tsd) == NULL)
+ tsd_quarantine_set(tsd, quarantine);
+ else
+ idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
+}
+
+static quarantine_t *
+quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
+{
+ quarantine_t *ret;
+
+ ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1);
+ if (ret == NULL) {
+ quarantine_drain_one(tsd_tsdn(tsd), quarantine);
+ return (quarantine);
+ }
+
+ ret->curbytes = quarantine->curbytes;
+ ret->curobjs = quarantine->curobjs;
+ if (quarantine->first + quarantine->curobjs <= (ZU(1) <<
+ quarantine->lg_maxobjs)) {
+ /* objs ring buffer data are contiguous. */
+ memcpy(ret->objs, &quarantine->objs[quarantine->first],
+ quarantine->curobjs * sizeof(quarantine_obj_t));
+ } else {
+ /* objs ring buffer data wrap around. */
+ size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) -
+ quarantine->first;
+ size_t ncopy_b = quarantine->curobjs - ncopy_a;
+
+ memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a
+ * sizeof(quarantine_obj_t));
+ memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
+ sizeof(quarantine_obj_t));
+ }
+ idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
+
+ tsd_quarantine_set(tsd, ret);
+ return (ret);
+}
+
+static void
+quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
+{
+ quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
+ assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof));
+ idalloctm(tsdn, obj->ptr, NULL, false, true);
+ quarantine->curbytes -= obj->usize;
+ quarantine->curobjs--;
+ quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
+ quarantine->lg_maxobjs) - 1);
+}
+
+static void
+quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound)
+{
+
+ while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
+ quarantine_drain_one(tsdn, quarantine);
+}
+
+void
+quarantine(tsd_t *tsd, void *ptr)
+{
+ quarantine_t *quarantine;
+ size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+
+ cassert(config_fill);
+ assert(opt_quarantine);
+
+ if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
+ idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
+ return;
+ }
+ /*
+ * Drain one or more objects if the quarantine size limit would be
+ * exceeded by appending ptr.
+ */
+ if (quarantine->curbytes + usize > opt_quarantine) {
+ size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
+ - usize : 0;
+ quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound);
+ }
+ /* Grow the quarantine ring buffer if it's full. */
+ if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
+ quarantine = quarantine_grow(tsd, quarantine);
+ /* quarantine_grow() must free a slot if it fails to grow. */
+ assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
+ /* Append ptr if its size doesn't exceed the quarantine size. */
+ if (quarantine->curbytes + usize <= opt_quarantine) {
+ size_t offset = (quarantine->first + quarantine->curobjs) &
+ ((ZU(1) << quarantine->lg_maxobjs) - 1);
+ quarantine_obj_t *obj = &quarantine->objs[offset];
+ obj->ptr = ptr;
+ obj->usize = usize;
+ quarantine->curbytes += usize;
+ quarantine->curobjs++;
+ if (config_fill && unlikely(opt_junk_free)) {
+ /*
+ * Only do redzone validation if Valgrind isn't in
+ * operation.
+ */
+ if ((!config_valgrind || likely(!in_valgrind))
+ && usize <= SMALL_MAXCLASS)
+ arena_quarantine_junk_small(ptr, usize);
+ else
+ memset(ptr, JEMALLOC_FREE_JUNK, usize);
+ }
+ } else {
+ assert(quarantine->curbytes == 0);
+ idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
+ }
+}
+
+void
+quarantine_cleanup(tsd_t *tsd)
+{
+ quarantine_t *quarantine;
+
+ if (!config_fill)
+ return;
+
+ quarantine = tsd_quarantine_get(tsd);
+ if (quarantine != NULL) {
+ quarantine_drain(tsd_tsdn(tsd), quarantine, 0);
+ idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
+ tsd_quarantine_set(tsd, NULL);
+ }
+}
diff --git a/deps/jemalloc/src/rtree.c b/deps/jemalloc/src/rtree.c
new file mode 100644
index 0000000..f2e2997
--- /dev/null
+++ b/deps/jemalloc/src/rtree.c
@@ -0,0 +1,132 @@
+#define JEMALLOC_RTREE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+static unsigned
+hmin(unsigned ha, unsigned hb)
+{
+
+ return (ha < hb ? ha : hb);
+}
+
+/* Only the most significant bits of keys passed to rtree_[gs]et() are used. */
+bool
+rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
+ rtree_node_dalloc_t *dalloc)
+{
+ unsigned bits_in_leaf, height, i;
+
+ assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
+ RTREE_BITS_PER_LEVEL));
+ assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
+
+ bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
+ : (bits % RTREE_BITS_PER_LEVEL);
+ if (bits > bits_in_leaf) {
+ height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL;
+ if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits)
+ height++;
+ } else
+ height = 1;
+ assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits);
+
+ rtree->alloc = alloc;
+ rtree->dalloc = dalloc;
+ rtree->height = height;
+
+ /* Root level. */
+ rtree->levels[0].subtree = NULL;
+ rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL :
+ bits_in_leaf;
+ rtree->levels[0].cumbits = rtree->levels[0].bits;
+ /* Interior levels. */
+ for (i = 1; i < height-1; i++) {
+ rtree->levels[i].subtree = NULL;
+ rtree->levels[i].bits = RTREE_BITS_PER_LEVEL;
+ rtree->levels[i].cumbits = rtree->levels[i-1].cumbits +
+ RTREE_BITS_PER_LEVEL;
+ }
+ /* Leaf level. */
+ if (height > 1) {
+ rtree->levels[height-1].subtree = NULL;
+ rtree->levels[height-1].bits = bits_in_leaf;
+ rtree->levels[height-1].cumbits = bits;
+ }
+
+ /* Compute lookup table to be used by rtree_start_level(). */
+ for (i = 0; i < RTREE_HEIGHT_MAX; i++) {
+ rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height -
+ 1);
+ }
+
+ return (false);
+}
+
+static void
+rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level)
+{
+
+ if (level + 1 < rtree->height) {
+ size_t nchildren, i;
+
+ nchildren = ZU(1) << rtree->levels[level].bits;
+ for (i = 0; i < nchildren; i++) {
+ rtree_node_elm_t *child = node[i].child;
+ if (child != NULL)
+ rtree_delete_subtree(rtree, child, level + 1);
+ }
+ }
+ rtree->dalloc(node);
+}
+
+void
+rtree_delete(rtree_t *rtree)
+{
+ unsigned i;
+
+ for (i = 0; i < rtree->height; i++) {
+ rtree_node_elm_t *subtree = rtree->levels[i].subtree;
+ if (subtree != NULL)
+ rtree_delete_subtree(rtree, subtree, i);
+ }
+}
+
+static rtree_node_elm_t *
+rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp)
+{
+ rtree_node_elm_t *node;
+
+ if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) {
+ spin_t spinner;
+
+ /*
+ * Another thread is already in the process of initializing.
+ * Spin-wait until initialization is complete.
+ */
+ spin_init(&spinner);
+ do {
+ spin_adaptive(&spinner);
+ node = atomic_read_p((void **)elmp);
+ } while (node == RTREE_NODE_INITIALIZING);
+ } else {
+ node = rtree->alloc(ZU(1) << rtree->levels[level].bits);
+ if (node == NULL)
+ return (NULL);
+ atomic_write_p((void **)elmp, node);
+ }
+
+ return (node);
+}
+
+rtree_node_elm_t *
+rtree_subtree_read_hard(rtree_t *rtree, unsigned level)
+{
+
+ return (rtree_node_init(rtree, level, &rtree->levels[level].subtree));
+}
+
+rtree_node_elm_t *
+rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
+{
+
+ return (rtree_node_init(rtree, level+1, &elm->child));
+}
diff --git a/deps/jemalloc/src/spin.c b/deps/jemalloc/src/spin.c
new file mode 100644
index 0000000..5242d95
--- /dev/null
+++ b/deps/jemalloc/src/spin.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_SPIN_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/deps/jemalloc/src/stats.c b/deps/jemalloc/src/stats.c
new file mode 100644
index 0000000..1360f3b
--- /dev/null
+++ b/deps/jemalloc/src/stats.c
@@ -0,0 +1,1154 @@
+#define JEMALLOC_STATS_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+#define CTL_GET(n, v, t) do { \
+ size_t sz = sizeof(t); \
+ xmallctl(n, (void *)v, &sz, NULL, 0); \
+} while (0)
+
+#define CTL_M2_GET(n, i, v, t) do { \
+ size_t mib[6]; \
+ size_t miblen = sizeof(mib) / sizeof(size_t); \
+ size_t sz = sizeof(t); \
+ xmallctlnametomib(n, mib, &miblen); \
+ mib[2] = (i); \
+ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
+} while (0)
+
+#define CTL_M2_M4_GET(n, i, j, v, t) do { \
+ size_t mib[6]; \
+ size_t miblen = sizeof(mib) / sizeof(size_t); \
+ size_t sz = sizeof(t); \
+ xmallctlnametomib(n, mib, &miblen); \
+ mib[2] = (i); \
+ mib[4] = (j); \
+ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
+} while (0)
+
+/******************************************************************************/
+/* Data. */
+
+bool opt_stats_print = false;
+
+size_t stats_cactive = 0;
+
+/******************************************************************************/
+
+static void
+stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ bool json, bool large, bool huge, unsigned i)
+{
+ size_t page;
+ bool config_tcache, in_gap, in_gap_prev;
+ unsigned nbins, j;
+
+ CTL_GET("arenas.page", &page, size_t);
+
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"bins\": [\n");
+ } else {
+ CTL_GET("config.tcache", &config_tcache, bool);
+ if (config_tcache) {
+ malloc_cprintf(write_cb, cbopaque,
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs"
+ " curruns regs pgs util nfills"
+ " nflushes newruns reruns\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs"
+ " curruns regs pgs util newruns"
+ " reruns\n");
+ }
+ }
+ for (j = 0, in_gap = false; j < nbins; j++) {
+ uint64_t nruns;
+ size_t reg_size, run_size, curregs;
+ size_t curruns;
+ uint32_t nregs;
+ uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
+ uint64_t nreruns;
+
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nruns", i, j, &nruns,
+ uint64_t);
+ in_gap_prev = in_gap;
+ in_gap = (nruns == 0);
+
+ if (!json && in_gap_prev && !in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+
+ CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
+ CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
+ CTL_M2_GET("arenas.bin.0.run_size", j, &run_size, size_t);
+
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
+ size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ if (config_tcache) {
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j,
+ &nfills, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j,
+ &nflushes, uint64_t);
+ }
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nreruns", i, j, &nreruns,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curruns", i, j, &curruns,
+ size_t);
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t{\n"
+ "\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"curregs\": %zu,\n"
+ "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n",
+ nmalloc,
+ ndalloc,
+ curregs,
+ nrequests);
+ if (config_tcache) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n",
+ nfills,
+ nflushes);
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\t\"nreruns\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"curruns\": %zu\n"
+ "\t\t\t\t\t}%s\n",
+ nreruns,
+ curruns,
+ (j + 1 < nbins) ? "," : "");
+ } else if (!in_gap) {
+ size_t availregs, milli;
+ char util[6]; /* "x.yyy". */
+
+ availregs = nregs * curruns;
+ milli = (availregs != 0) ? (1000 * curregs) / availregs
+ : 1000;
+ assert(milli <= 1000);
+ if (milli < 10) {
+ malloc_snprintf(util, sizeof(util),
+ "0.00%zu", milli);
+ } else if (milli < 100) {
+ malloc_snprintf(util, sizeof(util), "0.0%zu",
+ milli);
+ } else if (milli < 1000) {
+ malloc_snprintf(util, sizeof(util), "0.%zu",
+ milli);
+ } else
+ malloc_snprintf(util, sizeof(util), "1");
+
+ if (config_tcache) {
+ malloc_cprintf(write_cb, cbopaque,
+ "%20zu %3u %12zu %12"FMTu64
+ " %12"FMTu64" %12"FMTu64" %12zu"
+ " %12zu %4u %3zu %-5s %12"FMTu64
+ " %12"FMTu64" %12"FMTu64" %12"FMTu64"\n",
+ reg_size, j, curregs * reg_size, nmalloc,
+ ndalloc, nrequests, curregs, curruns, nregs,
+ run_size / page, util, nfills, nflushes,
+ nruns, nreruns);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "%20zu %3u %12zu %12"FMTu64
+ " %12"FMTu64" %12"FMTu64" %12zu"
+ " %12zu %4u %3zu %-5s %12"FMTu64
+ " %12"FMTu64"\n",
+ reg_size, j, curregs * reg_size, nmalloc,
+ ndalloc, nrequests, curregs, curruns, nregs,
+ run_size / page, util, nruns, nreruns);
+ }
+ }
+ }
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t]%s\n", (large || huge) ? "," : "");
+ } else {
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+ }
+}
+
+static void
+stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ bool json, bool huge, unsigned i)
+{
+ unsigned nbins, nlruns, j;
+ bool in_gap, in_gap_prev;
+
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ CTL_GET("arenas.nlruns", &nlruns, unsigned);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"lruns\": [\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "large: size ind allocated nmalloc"
+ " ndalloc nrequests curruns\n");
+ }
+ for (j = 0, in_gap = false; j < nlruns; j++) {
+ uint64_t nmalloc, ndalloc, nrequests;
+ size_t run_size, curruns;
+
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.nmalloc", i, j, &nmalloc,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.ndalloc", i, j, &ndalloc,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ in_gap_prev = in_gap;
+ in_gap = (nrequests == 0);
+
+ if (!json && in_gap_prev && !in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+
+ CTL_M2_GET("arenas.lrun.0.size", j, &run_size, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.lruns.0.curruns", i, j, &curruns,
+ size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t{\n"
+ "\t\t\t\t\t\t\"curruns\": %zu\n"
+ "\t\t\t\t\t}%s\n",
+ curruns,
+ (j + 1 < nlruns) ? "," : "");
+ } else if (!in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64" %12zu\n",
+ run_size, nbins + j, curruns * run_size, nmalloc,
+ ndalloc, nrequests, curruns);
+ }
+ }
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t]%s\n", huge ? "," : "");
+ } else {
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+ }
+}
+
+static void
+stats_arena_hchunks_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, bool json, unsigned i)
+{
+ unsigned nbins, nlruns, nhchunks, j;
+ bool in_gap, in_gap_prev;
+
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ CTL_GET("arenas.nlruns", &nlruns, unsigned);
+ CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"hchunks\": [\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: size ind allocated nmalloc"
+ " ndalloc nrequests curhchunks\n");
+ }
+ for (j = 0, in_gap = false; j < nhchunks; j++) {
+ uint64_t nmalloc, ndalloc, nrequests;
+ size_t hchunk_size, curhchunks;
+
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nmalloc", i, j,
+ &nmalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.ndalloc", i, j,
+ &ndalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ in_gap_prev = in_gap;
+ in_gap = (nrequests == 0);
+
+ if (!json && in_gap_prev && !in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+
+ CTL_M2_GET("arenas.hchunk.0.size", j, &hchunk_size, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.hchunks.0.curhchunks", i, j,
+ &curhchunks, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t{\n"
+ "\t\t\t\t\t\t\"curhchunks\": %zu\n"
+ "\t\t\t\t\t}%s\n",
+ curhchunks,
+ (j + 1 < nhchunks) ? "," : "");
+ } else if (!in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64" %12zu\n",
+ hchunk_size, nbins + nlruns + j,
+ curhchunks * hchunk_size, nmalloc, ndalloc,
+ nrequests, curhchunks);
+ }
+ }
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t]\n");
+ } else {
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+ }
+}
+
+static void
+stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ bool json, unsigned i, bool bins, bool large, bool huge)
+{
+ unsigned nthreads;
+ const char *dss;
+ ssize_t lg_dirty_mult, decay_time;
+ size_t page, pactive, pdirty, mapped, retained;
+ size_t metadata_mapped, metadata_allocated;
+ uint64_t npurge, nmadvise, purged;
+ size_t small_allocated;
+ uint64_t small_nmalloc, small_ndalloc, small_nrequests;
+ size_t large_allocated;
+ uint64_t large_nmalloc, large_ndalloc, large_nrequests;
+ size_t huge_allocated;
+ uint64_t huge_nmalloc, huge_ndalloc, huge_nrequests;
+
+ CTL_GET("arenas.page", &page, size_t);
+
+ CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"nthreads\": %u,\n", nthreads);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "assigned threads: %u\n", nthreads);
+ }
+
+ CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"dss\": \"%s\",\n", dss);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "dss allocation precedence: %s\n", dss);
+ }
+
+ CTL_M2_GET("stats.arenas.0.lg_dirty_mult", i, &lg_dirty_mult, ssize_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"lg_dirty_mult\": %zd,\n", lg_dirty_mult);
+ } else {
+ if (opt_purge == purge_mode_ratio) {
+ if (lg_dirty_mult >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "min active:dirty page ratio: %u:1\n",
+ (1U << lg_dirty_mult));
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "min active:dirty page ratio: N/A\n");
+ }
+ }
+ }
+
+ CTL_M2_GET("stats.arenas.0.decay_time", i, &decay_time, ssize_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"decay_time\": %zd,\n", decay_time);
+ } else {
+ if (opt_purge == purge_mode_decay) {
+ if (decay_time >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "decay time: %zd\n", decay_time);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "decay time: N/A\n");
+ }
+ }
+ }
+
+ CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
+ CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
+ CTL_M2_GET("stats.arenas.0.npurge", i, &npurge, uint64_t);
+ CTL_M2_GET("stats.arenas.0.nmadvise", i, &nmadvise, uint64_t);
+ CTL_M2_GET("stats.arenas.0.purged", i, &purged, uint64_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"pactive\": %zu,\n", pactive);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"pdirty\": %zu,\n", pdirty);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"npurge\": %"FMTu64",\n", npurge);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"nmadvise\": %"FMTu64",\n", nmadvise);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"purged\": %"FMTu64",\n", purged);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "purging: dirty: %zu, sweeps: %"FMTu64", madvises: %"FMTu64
+ ", purged: %"FMTu64"\n", pdirty, npurge, nmadvise, purged);
+ }
+
+ CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
+ uint64_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"small\": {\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"allocated\": %zu,\n", small_allocated);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t},\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ " allocated nmalloc"
+ " ndalloc nrequests\n");
+ malloc_cprintf(write_cb, cbopaque,
+ "small: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ small_allocated, small_nmalloc, small_ndalloc,
+ small_nrequests);
+ }
+
+ CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
+ uint64_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"large\": {\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"allocated\": %zu,\n", large_allocated);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t},\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "large: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ large_allocated, large_nmalloc, large_ndalloc,
+ large_nrequests);
+ }
+
+ CTL_M2_GET("stats.arenas.0.huge.allocated", i, &huge_allocated, size_t);
+ CTL_M2_GET("stats.arenas.0.huge.nmalloc", i, &huge_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.huge.ndalloc", i, &huge_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.huge.nrequests", i, &huge_nrequests,
+ uint64_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"huge\": {\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"allocated\": %zu,\n", huge_allocated);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", huge_nmalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", huge_ndalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", huge_nrequests);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t},\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "huge: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ huge_allocated, huge_nmalloc, huge_ndalloc, huge_nrequests);
+ malloc_cprintf(write_cb, cbopaque,
+ "total: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ small_allocated + large_allocated + huge_allocated,
+ small_nmalloc + large_nmalloc + huge_nmalloc,
+ small_ndalloc + large_ndalloc + huge_ndalloc,
+ small_nrequests + large_nrequests + huge_nrequests);
+ }
+ if (!json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "active: %12zu\n", pactive * page);
+ }
+
+ CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"mapped\": %zu,\n", mapped);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "mapped: %12zu\n", mapped);
+ }
+
+ CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"retained\": %zu,\n", retained);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "retained: %12zu\n", retained);
+ }
+
+ CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
+ size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"metadata\": {\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"mapped\": %zu,\n", metadata_mapped);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"allocated\": %zu\n", metadata_allocated);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t},\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "metadata: mapped: %zu, allocated: %zu\n",
+ metadata_mapped, metadata_allocated);
+ }
+
+ if (bins) {
+ stats_arena_bins_print(write_cb, cbopaque, json, large, huge,
+ i);
+ }
+ if (large)
+ stats_arena_lruns_print(write_cb, cbopaque, json, huge, i);
+ if (huge)
+ stats_arena_hchunks_print(write_cb, cbopaque, json, i);
+}
+
+static void
+stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ bool json, bool merged, bool unmerged)
+{
+ const char *cpv;
+ bool bv;
+ unsigned uv;
+ uint32_t u32v;
+ uint64_t u64v;
+ ssize_t ssv;
+ size_t sv, bsz, usz, ssz, sssz, cpsz;
+
+ bsz = sizeof(bool);
+ usz = sizeof(unsigned);
+ ssz = sizeof(size_t);
+ sssz = sizeof(ssize_t);
+ cpsz = sizeof(const char *);
+
+ CTL_GET("version", &cpv, const char *);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"version\": \"%s\",\n", cpv);
+ } else
+ malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
+
+ /* config. */
+#define CONFIG_WRITE_BOOL_JSON(n, c) \
+ if (json) { \
+ CTL_GET("config."#n, &bv, bool); \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \
+ (c)); \
+ }
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"config\": {\n");
+ }
+
+ CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",")
+
+ CTL_GET("config.debug", &bv, bool);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"debug\": %s,\n", bv ? "true" : "false");
+ } else {
+ malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
+ bv ? "enabled" : "disabled");
+ }
+
+ CONFIG_WRITE_BOOL_JSON(fill, ",")
+ CONFIG_WRITE_BOOL_JSON(lazy_lock, ",")
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"malloc_conf\": \"%s\",\n",
+ config_malloc_conf);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "config.malloc_conf: \"%s\"\n", config_malloc_conf);
+ }
+
+ CONFIG_WRITE_BOOL_JSON(munmap, ",")
+ CONFIG_WRITE_BOOL_JSON(prof, ",")
+ CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",")
+ CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",")
+ CONFIG_WRITE_BOOL_JSON(stats, ",")
+ CONFIG_WRITE_BOOL_JSON(tcache, ",")
+ CONFIG_WRITE_BOOL_JSON(tls, ",")
+ CONFIG_WRITE_BOOL_JSON(utrace, ",")
+ CONFIG_WRITE_BOOL_JSON(valgrind, ",")
+ CONFIG_WRITE_BOOL_JSON(xmalloc, "")
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t},\n");
+ }
+#undef CONFIG_WRITE_BOOL_JSON
+
+ /* opt. */
+#define OPT_WRITE_BOOL(n, c) \
+ if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
+ "false", (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %s\n", bv ? "true" : "false"); \
+ } \
+ }
+#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \
+ bool bv2; \
+ if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \
+ je_mallctl(#m, &bv2, (void *)&bsz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
+ "false", (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %s ("#m": %s)\n", bv ? "true" \
+ : "false", bv2 ? "true" : "false"); \
+ } \
+ } \
+}
+#define OPT_WRITE_UNSIGNED(n, c) \
+ if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %u%s\n", uv, (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %u\n", uv); \
+ } \
+ }
+#define OPT_WRITE_SIZE_T(n, c) \
+ if (je_mallctl("opt."#n, (void *)&sv, &ssz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %zu%s\n", sv, (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %zu\n", sv); \
+ } \
+ }
+#define OPT_WRITE_SSIZE_T(n, c) \
+ if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %zd\n", ssv); \
+ } \
+ }
+#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \
+ ssize_t ssv2; \
+ if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \
+ je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %zd ("#m": %zd)\n", \
+ ssv, ssv2); \
+ } \
+ } \
+}
+#define OPT_WRITE_CHAR_P(n, c) \
+ if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": \"%s\"\n", cpv); \
+ } \
+ }
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"opt\": {\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "Run-time option settings:\n");
+ }
+ OPT_WRITE_BOOL(abort, ",")
+ OPT_WRITE_SIZE_T(lg_chunk, ",")
+ OPT_WRITE_CHAR_P(dss, ",")
+ OPT_WRITE_UNSIGNED(narenas, ",")
+ OPT_WRITE_CHAR_P(purge, ",")
+ if (json || opt_purge == purge_mode_ratio) {
+ OPT_WRITE_SSIZE_T_MUTABLE(lg_dirty_mult,
+ arenas.lg_dirty_mult, ",")
+ }
+ if (json || opt_purge == purge_mode_decay) {
+ OPT_WRITE_SSIZE_T_MUTABLE(decay_time, arenas.decay_time, ",")
+ }
+ OPT_WRITE_CHAR_P(junk, ",")
+ OPT_WRITE_SIZE_T(quarantine, ",")
+ OPT_WRITE_BOOL(redzone, ",")
+ OPT_WRITE_BOOL(zero, ",")
+ OPT_WRITE_BOOL(utrace, ",")
+ OPT_WRITE_BOOL(xmalloc, ",")
+ OPT_WRITE_BOOL(tcache, ",")
+ OPT_WRITE_SSIZE_T(lg_tcache_max, ",")
+ OPT_WRITE_BOOL(prof, ",")
+ OPT_WRITE_CHAR_P(prof_prefix, ",")
+ OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",")
+ OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init,
+ ",")
+ OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",")
+ OPT_WRITE_BOOL(prof_accum, ",")
+ OPT_WRITE_SSIZE_T(lg_prof_interval, ",")
+ OPT_WRITE_BOOL(prof_gdump, ",")
+ OPT_WRITE_BOOL(prof_final, ",")
+ OPT_WRITE_BOOL(prof_leak, ",")
+ /*
+ * stats_print is always emitted, so as long as stats_print comes last
+ * it's safe to unconditionally omit the comma here (rather than having
+ * to conditionally omit it elsewhere depending on configuration).
+ */
+ OPT_WRITE_BOOL(stats_print, "")
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t},\n");
+ }
+
+#undef OPT_WRITE_BOOL
+#undef OPT_WRITE_BOOL_MUTABLE
+#undef OPT_WRITE_SIZE_T
+#undef OPT_WRITE_SSIZE_T
+#undef OPT_WRITE_CHAR_P
+
+ /* arenas. */
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"arenas\": {\n");
+ }
+
+ CTL_GET("arenas.narenas", &uv, unsigned);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"narenas\": %u,\n", uv);
+ } else
+ malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
+
+ CTL_GET("arenas.lg_dirty_mult", &ssv, ssize_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"lg_dirty_mult\": %zd,\n", ssv);
+ } else if (opt_purge == purge_mode_ratio) {
+ if (ssv >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "Min active:dirty page ratio per arena: "
+ "%u:1\n", (1U << ssv));
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "Min active:dirty page ratio per arena: "
+ "N/A\n");
+ }
+ }
+ CTL_GET("arenas.decay_time", &ssv, ssize_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"decay_time\": %zd,\n", ssv);
+ } else if (opt_purge == purge_mode_decay) {
+ malloc_cprintf(write_cb, cbopaque,
+ "Unused dirty page decay time: %zd%s\n",
+ ssv, (ssv < 0) ? " (no decay)" : "");
+ }
+
+ CTL_GET("arenas.quantum", &sv, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"quantum\": %zu,\n", sv);
+ } else
+ malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
+
+ CTL_GET("arenas.page", &sv, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"page\": %zu,\n", sv);
+ } else
+ malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
+
+ if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"tcache_max\": %zu,\n", sv);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "Maximum thread-cached size class: %zu\n", sv);
+ }
+ }
+
+ if (json) {
+ unsigned nbins, nlruns, nhchunks, i;
+
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"nbins\": %u,\n", nbins);
+
+ CTL_GET("arenas.nhbins", &uv, unsigned);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"nhbins\": %u,\n", uv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"bin\": [\n");
+ for (i = 0; i < nbins; i++) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t{\n");
+
+ CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"size\": %zu,\n", sv);
+
+ CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v);
+
+ CTL_M2_GET("arenas.bin.0.run_size", i, &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"run_size\": %zu\n", sv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : "");
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t],\n");
+
+ CTL_GET("arenas.nlruns", &nlruns, unsigned);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"nlruns\": %u,\n", nlruns);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"lrun\": [\n");
+ for (i = 0; i < nlruns; i++) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t{\n");
+
+ CTL_M2_GET("arenas.lrun.0.size", i, &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"size\": %zu\n", sv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t}%s\n", (i + 1 < nlruns) ? "," : "");
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t],\n");
+
+ CTL_GET("arenas.nhchunks", &nhchunks, unsigned);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"nhchunks\": %u,\n", nhchunks);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"hchunk\": [\n");
+ for (i = 0; i < nhchunks; i++) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t{\n");
+
+ CTL_M2_GET("arenas.hchunk.0.size", i, &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"size\": %zu\n", sv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t}%s\n", (i + 1 < nhchunks) ? "," : "");
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t]\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t},\n");
+ }
+
+ /* prof. */
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"prof\": {\n");
+
+ CTL_GET("prof.thread_active_init", &bv, bool);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"thread_active_init\": %s,\n", bv ? "true" :
+ "false");
+
+ CTL_GET("prof.active", &bv, bool);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"active\": %s,\n", bv ? "true" : "false");
+
+ CTL_GET("prof.gdump", &bv, bool);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"gdump\": %s,\n", bv ? "true" : "false");
+
+ CTL_GET("prof.interval", &u64v, uint64_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"interval\": %"FMTu64",\n", u64v);
+
+ CTL_GET("prof.lg_sample", &ssv, ssize_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"lg_sample\": %zd\n", ssv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t}%s\n", (config_stats || merged || unmerged) ? "," :
+ "");
+ }
+}
+
+static void
+stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
+ bool json, bool merged, bool unmerged, bool bins, bool large, bool huge)
+{
+ size_t *cactive;
+ size_t allocated, active, metadata, resident, mapped, retained;
+
+ CTL_GET("stats.cactive", &cactive, size_t *);
+ CTL_GET("stats.allocated", &allocated, size_t);
+ CTL_GET("stats.active", &active, size_t);
+ CTL_GET("stats.metadata", &metadata, size_t);
+ CTL_GET("stats.resident", &resident, size_t);
+ CTL_GET("stats.mapped", &mapped, size_t);
+ CTL_GET("stats.retained", &retained, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"stats\": {\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"cactive\": %zu,\n", atomic_read_z(cactive));
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"allocated\": %zu,\n", allocated);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"active\": %zu,\n", active);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"metadata\": %zu,\n", metadata);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"resident\": %zu,\n", resident);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"mapped\": %zu,\n", mapped);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"retained\": %zu\n", retained);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t}%s\n", (merged || unmerged) ? "," : "");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "Allocated: %zu, active: %zu, metadata: %zu,"
+ " resident: %zu, mapped: %zu, retained: %zu\n",
+ allocated, active, metadata, resident, mapped, retained);
+ malloc_cprintf(write_cb, cbopaque,
+ "Current active ceiling: %zu\n",
+ atomic_read_z(cactive));
+ }
+
+ if (merged || unmerged) {
+ unsigned narenas;
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"stats.arenas\": {\n");
+ }
+
+ CTL_GET("arenas.narenas", &narenas, unsigned);
+ {
+ VARIABLE_ARRAY(bool, initialized, narenas);
+ size_t isz;
+ unsigned i, j, ninitialized;
+
+ isz = sizeof(bool) * narenas;
+ xmallctl("arenas.initialized", (void *)initialized,
+ &isz, NULL, 0);
+ for (i = ninitialized = 0; i < narenas; i++) {
+ if (initialized[i])
+ ninitialized++;
+ }
+
+ /* Merged stats. */
+ if (merged && (ninitialized > 1 || !unmerged)) {
+ /* Print merged arena stats. */
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"merged\": {\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "\nMerged arenas stats:\n");
+ }
+ stats_arena_print(write_cb, cbopaque, json,
+ narenas, bins, large, huge);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t}%s\n", (ninitialized > 1) ?
+ "," : "");
+ }
+ }
+
+ /* Unmerged stats. */
+ for (i = j = 0; i < narenas; i++) {
+ if (initialized[i]) {
+ if (json) {
+ j++;
+ malloc_cprintf(write_cb,
+ cbopaque,
+ "\t\t\t\"%u\": {\n", i);
+ } else {
+ malloc_cprintf(write_cb,
+ cbopaque, "\narenas[%u]:\n",
+ i);
+ }
+ stats_arena_print(write_cb, cbopaque,
+ json, i, bins, large, huge);
+ if (json) {
+ malloc_cprintf(write_cb,
+ cbopaque,
+ "\t\t\t}%s\n", (j <
+ ninitialized) ? "," : "");
+ }
+ }
+ }
+ }
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t}\n");
+ }
+ }
+}
+
+void
+stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts)
+{
+ int err;
+ uint64_t epoch;
+ size_t u64sz;
+ bool json = false;
+ bool general = true;
+ bool merged = true;
+ bool unmerged = true;
+ bool bins = true;
+ bool large = true;
+ bool huge = true;
+
+ /*
+ * Refresh stats, in case mallctl() was called by the application.
+ *
+ * Check for OOM here, since refreshing the ctl cache can trigger
+ * allocation. In practice, none of the subsequent mallctl()-related
+ * calls in this function will cause OOM if this one succeeds.
+ * */
+ epoch = 1;
+ u64sz = sizeof(uint64_t);
+ err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
+ sizeof(uint64_t));
+ if (err != 0) {
+ if (err == EAGAIN) {
+ malloc_write("<jemalloc>: Memory allocation failure in "
+ "mallctl(\"epoch\", ...)\n");
+ return;
+ }
+ malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
+ "...)\n");
+ abort();
+ }
+
+ if (opts != NULL) {
+ unsigned i;
+
+ for (i = 0; opts[i] != '\0'; i++) {
+ switch (opts[i]) {
+ case 'J':
+ json = true;
+ break;
+ case 'g':
+ general = false;
+ break;
+ case 'm':
+ merged = false;
+ break;
+ case 'a':
+ unmerged = false;
+ break;
+ case 'b':
+ bins = false;
+ break;
+ case 'l':
+ large = false;
+ break;
+ case 'h':
+ huge = false;
+ break;
+ default:;
+ }
+ }
+ }
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "{\n"
+ "\t\"jemalloc\": {\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "___ Begin jemalloc statistics ___\n");
+ }
+
+ if (general)
+ stats_general_print(write_cb, cbopaque, json, merged, unmerged);
+ if (config_stats) {
+ stats_print_helper(write_cb, cbopaque, json, merged, unmerged,
+ bins, large, huge);
+ }
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t}\n"
+ "}\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "--- End jemalloc statistics ---\n");
+ }
+}
diff --git a/deps/jemalloc/src/tcache.c b/deps/jemalloc/src/tcache.c
new file mode 100644
index 0000000..21540ff
--- /dev/null
+++ b/deps/jemalloc/src/tcache.c
@@ -0,0 +1,555 @@
+#define JEMALLOC_TCACHE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+bool opt_tcache = true;
+ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
+
+tcache_bin_info_t *tcache_bin_info;
+static unsigned stack_nelms; /* Total stack elms per tcache. */
+
+unsigned nhbins;
+size_t tcache_maxclass;
+
+tcaches_t *tcaches;
+
+/* Index of first element within tcaches that has never been used. */
+static unsigned tcaches_past;
+
+/* Head of singly linked list tracking available tcaches elements. */
+static tcaches_t *tcaches_avail;
+
+/******************************************************************************/
+
+size_t
+tcache_salloc(tsdn_t *tsdn, const void *ptr)
+{
+
+ return (arena_salloc(tsdn, ptr, false));
+}
+
+void
+tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
+{
+ szind_t binind = tcache->next_gc_bin;
+ tcache_bin_t *tbin = &tcache->tbins[binind];
+ tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
+
+ if (tbin->low_water > 0) {
+ /*
+ * Flush (ceiling) 3/4 of the objects below the low water mark.
+ */
+ if (binind < NBINS) {
+ tcache_bin_flush_small(tsd, tcache, tbin, binind,
+ tbin->ncached - tbin->low_water + (tbin->low_water
+ >> 2));
+ } else {
+ tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
+ - tbin->low_water + (tbin->low_water >> 2), tcache);
+ }
+ /*
+ * Reduce fill count by 2X. Limit lg_fill_div such that the
+ * fill count is always at least 1.
+ */
+ if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
+ tbin->lg_fill_div++;
+ } else if (tbin->low_water < 0) {
+ /*
+ * Increase fill count by 2X. Make sure lg_fill_div stays
+ * greater than 0.
+ */
+ if (tbin->lg_fill_div > 1)
+ tbin->lg_fill_div--;
+ }
+ tbin->low_water = tbin->ncached;
+
+ tcache->next_gc_bin++;
+ if (tcache->next_gc_bin == nhbins)
+ tcache->next_gc_bin = 0;
+}
+
+void *
+tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
+{
+ void *ret;
+
+ arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
+ tcache->prof_accumbytes : 0);
+ if (config_prof)
+ tcache->prof_accumbytes = 0;
+ ret = tcache_alloc_easy(tbin, tcache_success);
+
+ return (ret);
+}
+
+void
+tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
+ szind_t binind, unsigned rem)
+{
+ arena_t *arena;
+ void *ptr;
+ unsigned i, nflush, ndeferred;
+ bool merged_stats = false;
+
+ assert(binind < NBINS);
+ assert(rem <= tbin->ncached);
+
+ arena = arena_choose(tsd, NULL);
+ assert(arena != NULL);
+ for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
+ /* Lock the arena bin associated with the first object. */
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ *(tbin->avail - 1));
+ arena_t *bin_arena = extent_node_arena_get(&chunk->node);
+ arena_bin_t *bin = &bin_arena->bins[binind];
+
+ if (config_prof && bin_arena == arena) {
+ if (arena_prof_accum(tsd_tsdn(tsd), arena,
+ tcache->prof_accumbytes))
+ prof_idump(tsd_tsdn(tsd));
+ tcache->prof_accumbytes = 0;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ if (config_stats && bin_arena == arena) {
+ assert(!merged_stats);
+ merged_stats = true;
+ bin->stats.nflushes++;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ }
+ ndeferred = 0;
+ for (i = 0; i < nflush; i++) {
+ ptr = *(tbin->avail - 1 - i);
+ assert(ptr != NULL);
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (extent_node_arena_get(&chunk->node) == bin_arena) {
+ size_t pageind = ((uintptr_t)ptr -
+ (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_bits_t *bitselm =
+ arena_bitselm_get_mutable(chunk, pageind);
+ arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
+ bin_arena, chunk, ptr, bitselm);
+ } else {
+ /*
+ * This object was allocated via a different
+ * arena bin than the one that is currently
+ * locked. Stash the object, so that it can be
+ * handled in a future pass.
+ */
+ *(tbin->avail - 1 - ndeferred) = ptr;
+ ndeferred++;
+ }
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
+ }
+ if (config_stats && !merged_stats) {
+ /*
+ * The flush loop didn't happen to flush to this thread's
+ * arena, so the stats didn't get merged. Manually do so now.
+ */
+ arena_bin_t *bin = &arena->bins[binind];
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ bin->stats.nflushes++;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ }
+
+ memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
+ sizeof(void *));
+ tbin->ncached = rem;
+ if ((int)tbin->ncached < tbin->low_water)
+ tbin->low_water = tbin->ncached;
+}
+
+void
+tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
+ unsigned rem, tcache_t *tcache)
+{
+ arena_t *arena;
+ void *ptr;
+ unsigned i, nflush, ndeferred;
+ bool merged_stats = false;
+
+ assert(binind < nhbins);
+ assert(rem <= tbin->ncached);
+
+ arena = arena_choose(tsd, NULL);
+ assert(arena != NULL);
+ for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
+ /* Lock the arena associated with the first object. */
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ *(tbin->avail - 1));
+ arena_t *locked_arena = extent_node_arena_get(&chunk->node);
+ UNUSED bool idump;
+
+ if (config_prof)
+ idump = false;
+ malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
+ if ((config_prof || config_stats) && locked_arena == arena) {
+ if (config_prof) {
+ idump = arena_prof_accum_locked(arena,
+ tcache->prof_accumbytes);
+ tcache->prof_accumbytes = 0;
+ }
+ if (config_stats) {
+ merged_stats = true;
+ arena->stats.nrequests_large +=
+ tbin->tstats.nrequests;
+ arena->stats.lstats[binind - NBINS].nrequests +=
+ tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ }
+ }
+ ndeferred = 0;
+ for (i = 0; i < nflush; i++) {
+ ptr = *(tbin->avail - 1 - i);
+ assert(ptr != NULL);
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (extent_node_arena_get(&chunk->node) ==
+ locked_arena) {
+ arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
+ locked_arena, chunk, ptr);
+ } else {
+ /*
+ * This object was allocated via a different
+ * arena than the one that is currently locked.
+ * Stash the object, so that it can be handled
+ * in a future pass.
+ */
+ *(tbin->avail - 1 - ndeferred) = ptr;
+ ndeferred++;
+ }
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
+ if (config_prof && idump)
+ prof_idump(tsd_tsdn(tsd));
+ arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
+ ndeferred);
+ }
+ if (config_stats && !merged_stats) {
+ /*
+ * The flush loop didn't happen to flush to this thread's
+ * arena, so the stats didn't get merged. Manually do so now.
+ */
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
+ arena->stats.nrequests_large += tbin->tstats.nrequests;
+ arena->stats.lstats[binind - NBINS].nrequests +=
+ tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+ }
+
+ memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
+ sizeof(void *));
+ tbin->ncached = rem;
+ if ((int)tbin->ncached < tbin->low_water)
+ tbin->low_water = tbin->ncached;
+}
+
+static void
+tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
+{
+
+ if (config_stats) {
+ /* Link into list of extant tcaches. */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ ql_elm_new(tcache, link);
+ ql_tail_insert(&arena->tcache_ql, tcache, link);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ }
+}
+
+static void
+tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
+{
+
+ if (config_stats) {
+ /* Unlink from list of extant tcaches. */
+ malloc_mutex_lock(tsdn, &arena->lock);
+ if (config_debug) {
+ bool in_ql = false;
+ tcache_t *iter;
+ ql_foreach(iter, &arena->tcache_ql, link) {
+ if (iter == tcache) {
+ in_ql = true;
+ break;
+ }
+ }
+ assert(in_ql);
+ }
+ ql_remove(&arena->tcache_ql, tcache, link);
+ tcache_stats_merge(tsdn, tcache, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ }
+}
+
+void
+tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
+ arena_t *newarena)
+{
+
+ tcache_arena_dissociate(tsdn, tcache, oldarena);
+ tcache_arena_associate(tsdn, tcache, newarena);
+}
+
+tcache_t *
+tcache_get_hard(tsd_t *tsd)
+{
+ arena_t *arena;
+
+ if (!tcache_enabled_get()) {
+ if (tsd_nominal(tsd))
+ tcache_enabled_set(false); /* Memoize. */
+ return (NULL);
+ }
+ arena = arena_choose(tsd, NULL);
+ if (unlikely(arena == NULL))
+ return (NULL);
+ return (tcache_create(tsd_tsdn(tsd), arena));
+}
+
+tcache_t *
+tcache_create(tsdn_t *tsdn, arena_t *arena)
+{
+ tcache_t *tcache;
+ size_t size, stack_offset;
+ unsigned i;
+
+ size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
+ /* Naturally align the pointer stacks. */
+ size = PTR_CEILING(size);
+ stack_offset = size;
+ size += stack_nelms * sizeof(void *);
+ /* Avoid false cacheline sharing. */
+ size = sa2u(size, CACHELINE);
+
+ tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
+ arena_get(TSDN_NULL, 0, true));
+ if (tcache == NULL)
+ return (NULL);
+
+ tcache_arena_associate(tsdn, tcache, arena);
+
+ ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
+
+ assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
+ for (i = 0; i < nhbins; i++) {
+ tcache->tbins[i].lg_fill_div = 1;
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
+ /*
+ * avail points past the available space. Allocations will
+ * access the slots toward higher addresses (for the benefit of
+ * prefetch).
+ */
+ tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
+ (uintptr_t)stack_offset);
+ }
+
+ return (tcache);
+}
+
+static void
+tcache_destroy(tsd_t *tsd, tcache_t *tcache)
+{
+ arena_t *arena;
+ unsigned i;
+
+ arena = arena_choose(tsd, NULL);
+ tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena);
+
+ for (i = 0; i < NBINS; i++) {
+ tcache_bin_t *tbin = &tcache->tbins[i];
+ tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
+
+ if (config_stats && tbin->tstats.nrequests != 0) {
+ arena_bin_t *bin = &arena->bins[i];
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ }
+ }
+
+ for (; i < nhbins; i++) {
+ tcache_bin_t *tbin = &tcache->tbins[i];
+ tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
+
+ if (config_stats && tbin->tstats.nrequests != 0) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
+ arena->stats.nrequests_large += tbin->tstats.nrequests;
+ arena->stats.lstats[i - NBINS].nrequests +=
+ tbin->tstats.nrequests;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
+ }
+ }
+
+ if (config_prof && tcache->prof_accumbytes > 0 &&
+ arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
+ prof_idump(tsd_tsdn(tsd));
+
+ idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
+}
+
+void
+tcache_cleanup(tsd_t *tsd)
+{
+ tcache_t *tcache;
+
+ if (!config_tcache)
+ return;
+
+ if ((tcache = tsd_tcache_get(tsd)) != NULL) {
+ tcache_destroy(tsd, tcache);
+ tsd_tcache_set(tsd, NULL);
+ }
+}
+
+void
+tcache_enabled_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
+{
+ unsigned i;
+
+ cassert(config_stats);
+
+ malloc_mutex_assert_owner(tsdn, &arena->lock);
+
+ /* Merge and reset tcache stats. */
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+ tcache_bin_t *tbin = &tcache->tbins[i];
+ malloc_mutex_lock(tsdn, &bin->lock);
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ tbin->tstats.nrequests = 0;
+ }
+
+ for (; i < nhbins; i++) {
+ malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
+ tcache_bin_t *tbin = &tcache->tbins[i];
+ arena->stats.nrequests_large += tbin->tstats.nrequests;
+ lstats->nrequests += tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ }
+}
+
+bool
+tcaches_create(tsd_t *tsd, unsigned *r_ind)
+{
+ arena_t *arena;
+ tcache_t *tcache;
+ tcaches_t *elm;
+
+ if (tcaches == NULL) {
+ tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) *
+ (MALLOCX_TCACHE_MAX+1));
+ if (tcaches == NULL)
+ return (true);
+ }
+
+ if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
+ return (true);
+ arena = arena_ichoose(tsd, NULL);
+ if (unlikely(arena == NULL))
+ return (true);
+ tcache = tcache_create(tsd_tsdn(tsd), arena);
+ if (tcache == NULL)
+ return (true);
+
+ if (tcaches_avail != NULL) {
+ elm = tcaches_avail;
+ tcaches_avail = tcaches_avail->next;
+ elm->tcache = tcache;
+ *r_ind = (unsigned)(elm - tcaches);
+ } else {
+ elm = &tcaches[tcaches_past];
+ elm->tcache = tcache;
+ *r_ind = tcaches_past;
+ tcaches_past++;
+ }
+
+ return (false);
+}
+
+static void
+tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm)
+{
+
+ if (elm->tcache == NULL)
+ return;
+ tcache_destroy(tsd, elm->tcache);
+ elm->tcache = NULL;
+}
+
+void
+tcaches_flush(tsd_t *tsd, unsigned ind)
+{
+
+ tcaches_elm_flush(tsd, &tcaches[ind]);
+}
+
+void
+tcaches_destroy(tsd_t *tsd, unsigned ind)
+{
+ tcaches_t *elm = &tcaches[ind];
+ tcaches_elm_flush(tsd, elm);
+ elm->next = tcaches_avail;
+ tcaches_avail = elm;
+}
+
+bool
+tcache_boot(tsdn_t *tsdn)
+{
+ unsigned i;
+
+ /*
+ * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is
+ * known.
+ */
+ if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS)
+ tcache_maxclass = SMALL_MAXCLASS;
+ else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass)
+ tcache_maxclass = large_maxclass;
+ else
+ tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
+
+ nhbins = size2index(tcache_maxclass) + 1;
+
+ /* Initialize tcache_bin_info. */
+ tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins *
+ sizeof(tcache_bin_info_t));
+ if (tcache_bin_info == NULL)
+ return (true);
+ stack_nelms = 0;
+ for (i = 0; i < NBINS; i++) {
+ if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
+ tcache_bin_info[i].ncached_max =
+ TCACHE_NSLOTS_SMALL_MIN;
+ } else if ((arena_bin_info[i].nregs << 1) <=
+ TCACHE_NSLOTS_SMALL_MAX) {
+ tcache_bin_info[i].ncached_max =
+ (arena_bin_info[i].nregs << 1);
+ } else {
+ tcache_bin_info[i].ncached_max =
+ TCACHE_NSLOTS_SMALL_MAX;
+ }
+ stack_nelms += tcache_bin_info[i].ncached_max;
+ }
+ for (; i < nhbins; i++) {
+ tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
+ stack_nelms += tcache_bin_info[i].ncached_max;
+ }
+
+ return (false);
+}
diff --git a/deps/jemalloc/src/ticker.c b/deps/jemalloc/src/ticker.c
new file mode 100644
index 0000000..db09024
--- /dev/null
+++ b/deps/jemalloc/src/ticker.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_TICKER_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/deps/jemalloc/src/tsd.c b/deps/jemalloc/src/tsd.c
new file mode 100644
index 0000000..ec69a51
--- /dev/null
+++ b/deps/jemalloc/src/tsd.c
@@ -0,0 +1,197 @@
+#define JEMALLOC_TSD_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+static unsigned ncleanups;
+static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
+
+malloc_tsd_data(, , tsd_t, TSD_INITIALIZER)
+
+/******************************************************************************/
+
+void *
+malloc_tsd_malloc(size_t size)
+{
+
+ return (a0malloc(CACHELINE_CEILING(size)));
+}
+
+void
+malloc_tsd_dalloc(void *wrapper)
+{
+
+ a0dalloc(wrapper);
+}
+
+void
+malloc_tsd_no_cleanup(void *arg)
+{
+
+ not_reached();
+}
+
+#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
+#ifndef _WIN32
+JEMALLOC_EXPORT
+#endif
+void
+_malloc_thread_cleanup(void)
+{
+ bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
+ unsigned i;
+
+ for (i = 0; i < ncleanups; i++)
+ pending[i] = true;
+
+ do {
+ again = false;
+ for (i = 0; i < ncleanups; i++) {
+ if (pending[i]) {
+ pending[i] = cleanups[i]();
+ if (pending[i])
+ again = true;
+ }
+ }
+ } while (again);
+}
+#endif
+
+void
+malloc_tsd_cleanup_register(bool (*f)(void))
+{
+
+ assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
+ cleanups[ncleanups] = f;
+ ncleanups++;
+}
+
+void
+tsd_cleanup(void *arg)
+{
+ tsd_t *tsd = (tsd_t *)arg;
+
+ switch (tsd->state) {
+ case tsd_state_uninitialized:
+ /* Do nothing. */
+ break;
+ case tsd_state_nominal:
+#define O(n, t) \
+ n##_cleanup(tsd);
+MALLOC_TSD
+#undef O
+ tsd->state = tsd_state_purgatory;
+ tsd_set(tsd);
+ break;
+ case tsd_state_purgatory:
+ /*
+ * The previous time this destructor was called, we set the
+ * state to tsd_state_purgatory so that other destructors
+ * wouldn't cause re-creation of the tsd. This time, do
+ * nothing, and do not request another callback.
+ */
+ break;
+ case tsd_state_reincarnated:
+ /*
+ * Another destructor deallocated memory after this destructor
+ * was called. Reset state to tsd_state_purgatory and request
+ * another callback.
+ */
+ tsd->state = tsd_state_purgatory;
+ tsd_set(tsd);
+ break;
+ default:
+ not_reached();
+ }
+}
+
+tsd_t *
+malloc_tsd_boot0(void)
+{
+ tsd_t *tsd;
+
+ ncleanups = 0;
+ if (tsd_boot0())
+ return (NULL);
+ tsd = tsd_fetch();
+ *tsd_arenas_tdata_bypassp_get(tsd) = true;
+ return (tsd);
+}
+
+void
+malloc_tsd_boot1(void)
+{
+
+ tsd_boot1();
+ *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false;
+}
+
+#ifdef _WIN32
+static BOOL WINAPI
+_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
+{
+
+ switch (fdwReason) {
+#ifdef JEMALLOC_LAZY_LOCK
+ case DLL_THREAD_ATTACH:
+ isthreaded = true;
+ break;
+#endif
+ case DLL_THREAD_DETACH:
+ _malloc_thread_cleanup();
+ break;
+ default:
+ break;
+ }
+ return (true);
+}
+
+#ifdef _MSC_VER
+# ifdef _M_IX86
+# pragma comment(linker, "/INCLUDE:__tls_used")
+# pragma comment(linker, "/INCLUDE:_tls_callback")
+# else
+# pragma comment(linker, "/INCLUDE:_tls_used")
+# pragma comment(linker, "/INCLUDE:tls_callback")
+# endif
+# pragma section(".CRT$XLY",long,read)
+#endif
+JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
+BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
+ DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
+#endif
+
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+void *
+tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
+{
+ pthread_t self = pthread_self();
+ tsd_init_block_t *iter;
+
+ /* Check whether this thread has already inserted into the list. */
+ malloc_mutex_lock(TSDN_NULL, &head->lock);
+ ql_foreach(iter, &head->blocks, link) {
+ if (iter->thread == self) {
+ malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ return (iter->data);
+ }
+ }
+ /* Insert block into list. */
+ ql_elm_new(block, link);
+ block->thread = self;
+ ql_tail_insert(&head->blocks, block, link);
+ malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ return (NULL);
+}
+
+void
+tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
+{
+
+ malloc_mutex_lock(TSDN_NULL, &head->lock);
+ ql_remove(&head->blocks, block, link);
+ malloc_mutex_unlock(TSDN_NULL, &head->lock);
+}
+#endif
diff --git a/deps/jemalloc/src/util.c b/deps/jemalloc/src/util.c
new file mode 100644
index 0000000..dd8c236
--- /dev/null
+++ b/deps/jemalloc/src/util.c
@@ -0,0 +1,666 @@
+/*
+ * Define simple versions of assertion macros that won't recurse in case
+ * of assertion failures in malloc_*printf().
+ */
+#define assert(e) do { \
+ if (config_debug && !(e)) { \
+ malloc_write("<jemalloc>: Failed assertion\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#define not_reached() do { \
+ if (config_debug) { \
+ malloc_write("<jemalloc>: Unreachable code reached\n"); \
+ abort(); \
+ } \
+ unreachable(); \
+} while (0)
+
+#define not_implemented() do { \
+ if (config_debug) { \
+ malloc_write("<jemalloc>: Not implemented\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#define JEMALLOC_UTIL_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void wrtmessage(void *cbopaque, const char *s);
+#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
+static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
+ size_t *slen_p);
+#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
+static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
+#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
+static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
+#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
+static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
+ size_t *slen_p);
+
+/******************************************************************************/
+
+/* malloc_message() setup. */
+static void
+wrtmessage(void *cbopaque, const char *s)
+{
+
+#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write)
+ /*
+ * Use syscall(2) rather than write(2) when possible in order to avoid
+ * the possibility of memory allocation within libc. This is necessary
+ * on FreeBSD; most operating systems do not have this problem though.
+ *
+ * syscall() returns long or int, depending on platform, so capture the
+ * unused result in the widest plausible type to avoid compiler
+ * warnings.
+ */
+ UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
+#else
+ UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s));
+#endif
+}
+
+JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
+
+/*
+ * Wrapper around malloc_message() that avoids the need for
+ * je_malloc_message(...) throughout the code.
+ */
+void
+malloc_write(const char *s)
+{
+
+ if (je_malloc_message != NULL)
+ je_malloc_message(NULL, s);
+ else
+ wrtmessage(NULL, s);
+}
+
+/*
+ * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
+ * provide a wrapper.
+ */
+int
+buferror(int err, char *buf, size_t buflen)
+{
+
+#ifdef _WIN32
+ FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0,
+ (LPSTR)buf, (DWORD)buflen, NULL);
+ return (0);
+#elif defined(__GLIBC__) && defined(_GNU_SOURCE)
+ char *b = strerror_r(err, buf, buflen);
+ if (b != buf) {
+ strncpy(buf, b, buflen);
+ buf[buflen-1] = '\0';
+ }
+ return (0);
+#else
+ return (strerror_r(err, buf, buflen));
+#endif
+}
+
+uintmax_t
+malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
+{
+ uintmax_t ret, digit;
+ unsigned b;
+ bool neg;
+ const char *p, *ns;
+
+ p = nptr;
+ if (base < 0 || base == 1 || base > 36) {
+ ns = p;
+ set_errno(EINVAL);
+ ret = UINTMAX_MAX;
+ goto label_return;
+ }
+ b = base;
+
+ /* Swallow leading whitespace and get sign, if any. */
+ neg = false;
+ while (true) {
+ switch (*p) {
+ case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
+ p++;
+ break;
+ case '-':
+ neg = true;
+ /* Fall through. */
+ case '+':
+ p++;
+ /* Fall through. */
+ default:
+ goto label_prefix;
+ }
+ }
+
+ /* Get prefix, if any. */
+ label_prefix:
+ /*
+ * Note where the first non-whitespace/sign character is so that it is
+ * possible to tell whether any digits are consumed (e.g., " 0" vs.
+ * " -x").
+ */
+ ns = p;
+ if (*p == '0') {
+ switch (p[1]) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7':
+ if (b == 0)
+ b = 8;
+ if (b == 8)
+ p++;
+ break;
+ case 'X': case 'x':
+ switch (p[2]) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ if (b == 0)
+ b = 16;
+ if (b == 16)
+ p += 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ p++;
+ ret = 0;
+ goto label_return;
+ }
+ }
+ if (b == 0)
+ b = 10;
+
+ /* Convert. */
+ ret = 0;
+ while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
+ || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
+ || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
+ uintmax_t pret = ret;
+ ret *= b;
+ ret += digit;
+ if (ret < pret) {
+ /* Overflow. */
+ set_errno(ERANGE);
+ ret = UINTMAX_MAX;
+ goto label_return;
+ }
+ p++;
+ }
+ if (neg)
+ ret = (uintmax_t)(-((intmax_t)ret));
+
+ if (p == ns) {
+ /* No conversion performed. */
+ set_errno(EINVAL);
+ ret = UINTMAX_MAX;
+ goto label_return;
+ }
+
+label_return:
+ if (endptr != NULL) {
+ if (p == ns) {
+ /* No characters were converted. */
+ *endptr = (char *)nptr;
+ } else
+ *endptr = (char *)p;
+ }
+ return (ret);
+}
+
+static char *
+u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
+{
+ unsigned i;
+
+ i = U2S_BUFSIZE - 1;
+ s[i] = '\0';
+ switch (base) {
+ case 10:
+ do {
+ i--;
+ s[i] = "0123456789"[x % (uint64_t)10];
+ x /= (uint64_t)10;
+ } while (x > 0);
+ break;
+ case 16: {
+ const char *digits = (uppercase)
+ ? "0123456789ABCDEF"
+ : "0123456789abcdef";
+
+ do {
+ i--;
+ s[i] = digits[x & 0xf];
+ x >>= 4;
+ } while (x > 0);
+ break;
+ } default: {
+ const char *digits = (uppercase)
+ ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ : "0123456789abcdefghijklmnopqrstuvwxyz";
+
+ assert(base >= 2 && base <= 36);
+ do {
+ i--;
+ s[i] = digits[x % (uint64_t)base];
+ x /= (uint64_t)base;
+ } while (x > 0);
+ }}
+
+ *slen_p = U2S_BUFSIZE - 1 - i;
+ return (&s[i]);
+}
+
+static char *
+d2s(intmax_t x, char sign, char *s, size_t *slen_p)
+{
+ bool neg;
+
+ if ((neg = (x < 0)))
+ x = -x;
+ s = u2s(x, 10, false, s, slen_p);
+ if (neg)
+ sign = '-';
+ switch (sign) {
+ case '-':
+ if (!neg)
+ break;
+ /* Fall through. */
+ case ' ':
+ case '+':
+ s--;
+ (*slen_p)++;
+ *s = sign;
+ break;
+ default: not_reached();
+ }
+ return (s);
+}
+
+static char *
+o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
+{
+
+ s = u2s(x, 8, false, s, slen_p);
+ if (alt_form && *s != '0') {
+ s--;
+ (*slen_p)++;
+ *s = '0';
+ }
+ return (s);
+}
+
+static char *
+x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
+{
+
+ s = u2s(x, 16, uppercase, s, slen_p);
+ if (alt_form) {
+ s -= 2;
+ (*slen_p) += 2;
+ memcpy(s, uppercase ? "0X" : "0x", 2);
+ }
+ return (s);
+}
+
+size_t
+malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
+{
+ size_t i;
+ const char *f;
+
+#define APPEND_C(c) do { \
+ if (i < size) \
+ str[i] = (c); \
+ i++; \
+} while (0)
+#define APPEND_S(s, slen) do { \
+ if (i < size) { \
+ size_t cpylen = (slen <= size - i) ? slen : size - i; \
+ memcpy(&str[i], s, cpylen); \
+ } \
+ i += slen; \
+} while (0)
+#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
+ /* Left padding. */ \
+ size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
+ (size_t)width - slen : 0); \
+ if (!left_justify && pad_len != 0) { \
+ size_t j; \
+ for (j = 0; j < pad_len; j++) \
+ APPEND_C(' '); \
+ } \
+ /* Value. */ \
+ APPEND_S(s, slen); \
+ /* Right padding. */ \
+ if (left_justify && pad_len != 0) { \
+ size_t j; \
+ for (j = 0; j < pad_len; j++) \
+ APPEND_C(' '); \
+ } \
+} while (0)
+#define GET_ARG_NUMERIC(val, len) do { \
+ switch (len) { \
+ case '?': \
+ val = va_arg(ap, int); \
+ break; \
+ case '?' | 0x80: \
+ val = va_arg(ap, unsigned int); \
+ break; \
+ case 'l': \
+ val = va_arg(ap, long); \
+ break; \
+ case 'l' | 0x80: \
+ val = va_arg(ap, unsigned long); \
+ break; \
+ case 'q': \
+ val = va_arg(ap, long long); \
+ break; \
+ case 'q' | 0x80: \
+ val = va_arg(ap, unsigned long long); \
+ break; \
+ case 'j': \
+ val = va_arg(ap, intmax_t); \
+ break; \
+ case 'j' | 0x80: \
+ val = va_arg(ap, uintmax_t); \
+ break; \
+ case 't': \
+ val = va_arg(ap, ptrdiff_t); \
+ break; \
+ case 'z': \
+ val = va_arg(ap, ssize_t); \
+ break; \
+ case 'z' | 0x80: \
+ val = va_arg(ap, size_t); \
+ break; \
+ case 'p': /* Synthetic; used for %p. */ \
+ val = va_arg(ap, uintptr_t); \
+ break; \
+ default: \
+ not_reached(); \
+ val = 0; \
+ } \
+} while (0)
+
+ i = 0;
+ f = format;
+ while (true) {
+ switch (*f) {
+ case '\0': goto label_out;
+ case '%': {
+ bool alt_form = false;
+ bool left_justify = false;
+ bool plus_space = false;
+ bool plus_plus = false;
+ int prec = -1;
+ int width = -1;
+ unsigned char len = '?';
+ char *s;
+ size_t slen;
+
+ f++;
+ /* Flags. */
+ while (true) {
+ switch (*f) {
+ case '#':
+ assert(!alt_form);
+ alt_form = true;
+ break;
+ case '-':
+ assert(!left_justify);
+ left_justify = true;
+ break;
+ case ' ':
+ assert(!plus_space);
+ plus_space = true;
+ break;
+ case '+':
+ assert(!plus_plus);
+ plus_plus = true;
+ break;
+ default: goto label_width;
+ }
+ f++;
+ }
+ /* Width. */
+ label_width:
+ switch (*f) {
+ case '*':
+ width = va_arg(ap, int);
+ f++;
+ if (width < 0) {
+ left_justify = true;
+ width = -width;
+ }
+ break;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9': {
+ uintmax_t uwidth;
+ set_errno(0);
+ uwidth = malloc_strtoumax(f, (char **)&f, 10);
+ assert(uwidth != UINTMAX_MAX || get_errno() !=
+ ERANGE);
+ width = (int)uwidth;
+ break;
+ } default:
+ break;
+ }
+ /* Width/precision separator. */
+ if (*f == '.')
+ f++;
+ else
+ goto label_length;
+ /* Precision. */
+ switch (*f) {
+ case '*':
+ prec = va_arg(ap, int);
+ f++;
+ break;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9': {
+ uintmax_t uprec;
+ set_errno(0);
+ uprec = malloc_strtoumax(f, (char **)&f, 10);
+ assert(uprec != UINTMAX_MAX || get_errno() !=
+ ERANGE);
+ prec = (int)uprec;
+ break;
+ }
+ default: break;
+ }
+ /* Length. */
+ label_length:
+ switch (*f) {
+ case 'l':
+ f++;
+ if (*f == 'l') {
+ len = 'q';
+ f++;
+ } else
+ len = 'l';
+ break;
+ case 'q': case 'j': case 't': case 'z':
+ len = *f;
+ f++;
+ break;
+ default: break;
+ }
+ /* Conversion specifier. */
+ switch (*f) {
+ case '%':
+ /* %% */
+ APPEND_C(*f);
+ f++;
+ break;
+ case 'd': case 'i': {
+ intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[D2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len);
+ s = d2s(val, (plus_plus ? '+' : (plus_space ?
+ ' ' : '-')), buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'o': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[O2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len | 0x80);
+ s = o2s(val, alt_form, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'u': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[U2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len | 0x80);
+ s = u2s(val, 10, false, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'x': case 'X': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[X2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len | 0x80);
+ s = x2s(val, alt_form, *f == 'X', buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'c': {
+ unsigned char val;
+ char buf[2];
+
+ assert(len == '?' || len == 'l');
+ assert_not_implemented(len != 'l');
+ val = va_arg(ap, int);
+ buf[0] = val;
+ buf[1] = '\0';
+ APPEND_PADDED_S(buf, 1, width, left_justify);
+ f++;
+ break;
+ } case 's':
+ assert(len == '?' || len == 'l');
+ assert_not_implemented(len != 'l');
+ s = va_arg(ap, char *);
+ slen = (prec < 0) ? strlen(s) : (size_t)prec;
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ case 'p': {
+ uintmax_t val;
+ char buf[X2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, 'p');
+ s = x2s(val, true, false, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } default: not_reached();
+ }
+ break;
+ } default: {
+ APPEND_C(*f);
+ f++;
+ break;
+ }}
+ }
+ label_out:
+ if (i < size)
+ str[i] = '\0';
+ else
+ str[size - 1] = '\0';
+
+#undef APPEND_C
+#undef APPEND_S
+#undef APPEND_PADDED_S
+#undef GET_ARG_NUMERIC
+ return (i);
+}
+
+JEMALLOC_FORMAT_PRINTF(3, 4)
+size_t
+malloc_snprintf(char *str, size_t size, const char *format, ...)
+{
+ size_t ret;
+ va_list ap;
+
+ va_start(ap, format);
+ ret = malloc_vsnprintf(str, size, format, ap);
+ va_end(ap);
+
+ return (ret);
+}
+
+void
+malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, va_list ap)
+{
+ char buf[MALLOC_PRINTF_BUFSIZE];
+
+ if (write_cb == NULL) {
+ /*
+ * The caller did not provide an alternate write_cb callback
+ * function, so use the default one. malloc_write() is an
+ * inline function, so use malloc_message() directly here.
+ */
+ write_cb = (je_malloc_message != NULL) ? je_malloc_message :
+ wrtmessage;
+ cbopaque = NULL;
+ }
+
+ malloc_vsnprintf(buf, sizeof(buf), format, ap);
+ write_cb(cbopaque, buf);
+}
+
+/*
+ * Print to a callback function in such a way as to (hopefully) avoid memory
+ * allocation.
+ */
+JEMALLOC_FORMAT_PRINTF(3, 4)
+void
+malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(write_cb, cbopaque, format, ap);
+ va_end(ap);
+}
+
+/* Print to stderr in such a way as to avoid memory allocation. */
+JEMALLOC_FORMAT_PRINTF(1, 2)
+void
+malloc_printf(const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(NULL, NULL, format, ap);
+ va_end(ap);
+}
+
+/*
+ * Restore normal assertion macros, in order to make it possible to compile all
+ * C files as a single concatenation.
+ */
+#undef assert
+#undef not_reached
+#undef not_implemented
+#include "jemalloc/internal/assert.h"
diff --git a/deps/jemalloc/src/valgrind.c b/deps/jemalloc/src/valgrind.c
new file mode 100644
index 0000000..8e7ef3a
--- /dev/null
+++ b/deps/jemalloc/src/valgrind.c
@@ -0,0 +1,34 @@
+#include "jemalloc/internal/jemalloc_internal.h"
+#ifndef JEMALLOC_VALGRIND
+# error "This source file is for Valgrind integration."
+#endif
+
+#include <valgrind/memcheck.h>
+
+void
+valgrind_make_mem_noaccess(void *ptr, size_t usize)
+{
+
+ VALGRIND_MAKE_MEM_NOACCESS(ptr, usize);
+}
+
+void
+valgrind_make_mem_undefined(void *ptr, size_t usize)
+{
+
+ VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize);
+}
+
+void
+valgrind_make_mem_defined(void *ptr, size_t usize)
+{
+
+ VALGRIND_MAKE_MEM_DEFINED(ptr, usize);
+}
+
+void
+valgrind_freelike_block(void *ptr, size_t usize)
+{
+
+ VALGRIND_FREELIKE_BLOCK(ptr, usize);
+}
diff --git a/deps/jemalloc/src/witness.c b/deps/jemalloc/src/witness.c
new file mode 100644
index 0000000..23753f2
--- /dev/null
+++ b/deps/jemalloc/src/witness.c
@@ -0,0 +1,136 @@
+#define JEMALLOC_WITNESS_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+void
+witness_init(witness_t *witness, const char *name, witness_rank_t rank,
+ witness_comp_t *comp)
+{
+
+ witness->name = name;
+ witness->rank = rank;
+ witness->comp = comp;
+}
+
+#ifdef JEMALLOC_JET
+#undef witness_lock_error
+#define witness_lock_error JEMALLOC_N(n_witness_lock_error)
+#endif
+void
+witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
+{
+ witness_t *w;
+
+ malloc_printf("<jemalloc>: Lock rank order reversal:");
+ ql_foreach(w, witnesses, link) {
+ malloc_printf(" %s(%u)", w->name, w->rank);
+ }
+ malloc_printf(" %s(%u)\n", witness->name, witness->rank);
+ abort();
+}
+#ifdef JEMALLOC_JET
+#undef witness_lock_error
+#define witness_lock_error JEMALLOC_N(witness_lock_error)
+witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error);
+#endif
+
+#ifdef JEMALLOC_JET
+#undef witness_owner_error
+#define witness_owner_error JEMALLOC_N(n_witness_owner_error)
+#endif
+void
+witness_owner_error(const witness_t *witness)
+{
+
+ malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
+ witness->rank);
+ abort();
+}
+#ifdef JEMALLOC_JET
+#undef witness_owner_error
+#define witness_owner_error JEMALLOC_N(witness_owner_error)
+witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error);
+#endif
+
+#ifdef JEMALLOC_JET
+#undef witness_not_owner_error
+#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error)
+#endif
+void
+witness_not_owner_error(const witness_t *witness)
+{
+
+ malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
+ witness->rank);
+ abort();
+}
+#ifdef JEMALLOC_JET
+#undef witness_not_owner_error
+#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
+witness_not_owner_error_t *witness_not_owner_error =
+ JEMALLOC_N(n_witness_not_owner_error);
+#endif
+
+#ifdef JEMALLOC_JET
+#undef witness_lockless_error
+#define witness_lockless_error JEMALLOC_N(n_witness_lockless_error)
+#endif
+void
+witness_lockless_error(const witness_list_t *witnesses)
+{
+ witness_t *w;
+
+ malloc_printf("<jemalloc>: Should not own any locks:");
+ ql_foreach(w, witnesses, link) {
+ malloc_printf(" %s(%u)", w->name, w->rank);
+ }
+ malloc_printf("\n");
+ abort();
+}
+#ifdef JEMALLOC_JET
+#undef witness_lockless_error
+#define witness_lockless_error JEMALLOC_N(witness_lockless_error)
+witness_lockless_error_t *witness_lockless_error =
+ JEMALLOC_N(n_witness_lockless_error);
+#endif
+
+void
+witnesses_cleanup(tsd_t *tsd)
+{
+
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ /* Do nothing. */
+}
+
+void
+witness_fork_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+witness_prefork(tsd_t *tsd)
+{
+
+ tsd_witness_fork_set(tsd, true);
+}
+
+void
+witness_postfork_parent(tsd_t *tsd)
+{
+
+ tsd_witness_fork_set(tsd, false);
+}
+
+void
+witness_postfork_child(tsd_t *tsd)
+{
+#ifndef JEMALLOC_MUTEX_INIT_CB
+ witness_list_t *witnesses;
+
+ witnesses = tsd_witnessesp_get(tsd);
+ ql_new(witnesses);
+#endif
+ tsd_witness_fork_set(tsd, false);
+}
diff --git a/deps/jemalloc/src/zone.c b/deps/jemalloc/src/zone.c
new file mode 100644
index 0000000..0571920
--- /dev/null
+++ b/deps/jemalloc/src/zone.c
@@ -0,0 +1,330 @@
+#include "jemalloc/internal/jemalloc_internal.h"
+#ifndef JEMALLOC_ZONE
+# error "This source file is for zones on Darwin (OS X)."
+#endif
+
+/*
+ * The malloc_default_purgeable_zone() function is only available on >= 10.6.
+ * We need to check whether it is present at runtime, thus the weak_import.
+ */
+extern malloc_zone_t *malloc_default_purgeable_zone(void)
+JEMALLOC_ATTR(weak_import);
+
+/******************************************************************************/
+/* Data. */
+
+static malloc_zone_t *default_zone, *purgeable_zone;
+static malloc_zone_t jemalloc_zone;
+static struct malloc_introspection_t jemalloc_zone_introspect;
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static size_t zone_size(malloc_zone_t *zone, void *ptr);
+static void *zone_malloc(malloc_zone_t *zone, size_t size);
+static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
+static void *zone_valloc(malloc_zone_t *zone, size_t size);
+static void zone_free(malloc_zone_t *zone, void *ptr);
+static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
+#if (JEMALLOC_ZONE_VERSION >= 5)
+static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
+#endif
+#if (JEMALLOC_ZONE_VERSION >= 6)
+ size_t size);
+static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
+ size_t size);
+#endif
+static void *zone_destroy(malloc_zone_t *zone);
+static size_t zone_good_size(malloc_zone_t *zone, size_t size);
+static void zone_force_lock(malloc_zone_t *zone);
+static void zone_force_unlock(malloc_zone_t *zone);
+
+/******************************************************************************/
+/*
+ * Functions.
+ */
+
+static size_t
+zone_size(malloc_zone_t *zone, void *ptr)
+{
+
+ /*
+ * There appear to be places within Darwin (such as setenv(3)) that
+ * cause calls to this function with pointers that *no* zone owns. If
+ * we knew that all pointers were owned by *some* zone, we could split
+ * our zone into two parts, and use one as the default allocator and
+ * the other as the default deallocator/reallocator. Since that will
+ * not work in practice, we must check all pointers to assure that they
+ * reside within a mapped chunk before determining size.
+ */
+ return (ivsalloc(tsdn_fetch(), ptr, config_prof));
+}
+
+static void *
+zone_malloc(malloc_zone_t *zone, size_t size)
+{
+
+ return (je_malloc(size));
+}
+
+static void *
+zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
+{
+
+ return (je_calloc(num, size));
+}
+
+static void *
+zone_valloc(malloc_zone_t *zone, size_t size)
+{
+ void *ret = NULL; /* Assignment avoids useless compiler warning. */
+
+ je_posix_memalign(&ret, PAGE, size);
+
+ return (ret);
+}
+
+static void
+zone_free(malloc_zone_t *zone, void *ptr)
+{
+
+ if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0) {
+ je_free(ptr);
+ return;
+ }
+
+ free(ptr);
+}
+
+static void *
+zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
+{
+
+ if (ivsalloc(tsdn_fetch(), ptr, config_prof) != 0)
+ return (je_realloc(ptr, size));
+
+ return (realloc(ptr, size));
+}
+
+#if (JEMALLOC_ZONE_VERSION >= 5)
+static void *
+zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
+{
+ void *ret = NULL; /* Assignment avoids useless compiler warning. */
+
+ je_posix_memalign(&ret, alignment, size);
+
+ return (ret);
+}
+#endif
+
+#if (JEMALLOC_ZONE_VERSION >= 6)
+static void
+zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
+{
+ size_t alloc_size;
+
+ alloc_size = ivsalloc(tsdn_fetch(), ptr, config_prof);
+ if (alloc_size != 0) {
+ assert(alloc_size == size);
+ je_free(ptr);
+ return;
+ }
+
+ free(ptr);
+}
+#endif
+
+static void *
+zone_destroy(malloc_zone_t *zone)
+{
+
+ /* This function should never be called. */
+ not_reached();
+ return (NULL);
+}
+
+static size_t
+zone_good_size(malloc_zone_t *zone, size_t size)
+{
+
+ if (size == 0)
+ size = 1;
+ return (s2u(size));
+}
+
+static void
+zone_force_lock(malloc_zone_t *zone)
+{
+
+ if (isthreaded)
+ jemalloc_prefork();
+}
+
+static void
+zone_force_unlock(malloc_zone_t *zone)
+{
+
+ /*
+ * Call jemalloc_postfork_child() rather than
+ * jemalloc_postfork_parent(), because this function is executed by both
+ * parent and child. The parent can tolerate having state
+ * reinitialized, but the child cannot unlock mutexes that were locked
+ * by the parent.
+ */
+ if (isthreaded)
+ jemalloc_postfork_child();
+}
+
+static void
+zone_init(void)
+{
+
+ jemalloc_zone.size = (void *)zone_size;
+ jemalloc_zone.malloc = (void *)zone_malloc;
+ jemalloc_zone.calloc = (void *)zone_calloc;
+ jemalloc_zone.valloc = (void *)zone_valloc;
+ jemalloc_zone.free = (void *)zone_free;
+ jemalloc_zone.realloc = (void *)zone_realloc;
+ jemalloc_zone.destroy = (void *)zone_destroy;
+ jemalloc_zone.zone_name = "jemalloc_zone";
+ jemalloc_zone.batch_malloc = NULL;
+ jemalloc_zone.batch_free = NULL;
+ jemalloc_zone.introspect = &jemalloc_zone_introspect;
+ jemalloc_zone.version = JEMALLOC_ZONE_VERSION;
+#if (JEMALLOC_ZONE_VERSION >= 5)
+ jemalloc_zone.memalign = zone_memalign;
+#endif
+#if (JEMALLOC_ZONE_VERSION >= 6)
+ jemalloc_zone.free_definite_size = zone_free_definite_size;
+#endif
+#if (JEMALLOC_ZONE_VERSION >= 8)
+ jemalloc_zone.pressure_relief = NULL;
+#endif
+
+ jemalloc_zone_introspect.enumerator = NULL;
+ jemalloc_zone_introspect.good_size = (void *)zone_good_size;
+ jemalloc_zone_introspect.check = NULL;
+ jemalloc_zone_introspect.print = NULL;
+ jemalloc_zone_introspect.log = NULL;
+ jemalloc_zone_introspect.force_lock = (void *)zone_force_lock;
+ jemalloc_zone_introspect.force_unlock = (void *)zone_force_unlock;
+ jemalloc_zone_introspect.statistics = NULL;
+#if (JEMALLOC_ZONE_VERSION >= 6)
+ jemalloc_zone_introspect.zone_locked = NULL;
+#endif
+#if (JEMALLOC_ZONE_VERSION >= 7)
+ jemalloc_zone_introspect.enable_discharge_checking = NULL;
+ jemalloc_zone_introspect.disable_discharge_checking = NULL;
+ jemalloc_zone_introspect.discharge = NULL;
+# ifdef __BLOCKS__
+ jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
+# else
+ jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
+# endif
+#endif
+}
+
+static malloc_zone_t *
+zone_default_get(void)
+{
+ malloc_zone_t **zones = NULL;
+ unsigned int num_zones = 0;
+
+ /*
+ * On OSX 10.12, malloc_default_zone returns a special zone that is not
+ * present in the list of registered zones. That zone uses a "lite zone"
+ * if one is present (apparently enabled when malloc stack logging is
+ * enabled), or the first registered zone otherwise. In practice this
+ * means unless malloc stack logging is enabled, the first registered
+ * zone is the default. So get the list of zones to get the first one,
+ * instead of relying on malloc_default_zone.
+ */
+ if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
+ (vm_address_t**)&zones, &num_zones)) {
+ /*
+ * Reset the value in case the failure happened after it was
+ * set.
+ */
+ num_zones = 0;
+ }
+
+ if (num_zones)
+ return (zones[0]);
+
+ return (malloc_default_zone());
+}
+
+/* As written, this function can only promote jemalloc_zone. */
+static void
+zone_promote(void)
+{
+ malloc_zone_t *zone;
+
+ do {
+ /*
+ * Unregister and reregister the default zone. On OSX >= 10.6,
+ * unregistering takes the last registered zone and places it
+ * at the location of the specified zone. Unregistering the
+ * default zone thus makes the last registered one the default.
+ * On OSX < 10.6, unregistering shifts all registered zones.
+ * The first registered zone then becomes the default.
+ */
+ malloc_zone_unregister(default_zone);
+ malloc_zone_register(default_zone);
+
+ /*
+ * On OSX 10.6, having the default purgeable zone appear before
+ * the default zone makes some things crash because it thinks it
+ * owns the default zone allocated pointers. We thus
+ * unregister/re-register it in order to ensure it's always
+ * after the default zone. On OSX < 10.6, there is no purgeable
+ * zone, so this does nothing. On OSX >= 10.6, unregistering
+ * replaces the purgeable zone with the last registered zone
+ * above, i.e. the default zone. Registering it again then puts
+ * it at the end, obviously after the default zone.
+ */
+ if (purgeable_zone != NULL) {
+ malloc_zone_unregister(purgeable_zone);
+ malloc_zone_register(purgeable_zone);
+ }
+
+ zone = zone_default_get();
+ } while (zone != &jemalloc_zone);
+}
+
+JEMALLOC_ATTR(constructor)
+void
+zone_register(void)
+{
+
+ /*
+ * If something else replaced the system default zone allocator, don't
+ * register jemalloc's.
+ */
+ default_zone = zone_default_get();
+ if (!default_zone->zone_name || strcmp(default_zone->zone_name,
+ "DefaultMallocZone") != 0)
+ return;
+
+ /*
+ * The default purgeable zone is created lazily by OSX's libc. It uses
+ * the default zone when it is created for "small" allocations
+ * (< 15 KiB), but assumes the default zone is a scalable_zone. This
+ * obviously fails when the default zone is the jemalloc zone, so
+ * malloc_default_purgeable_zone() is called beforehand so that the
+ * default purgeable zone is created when the default zone is still
+ * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
+ * to check for the existence of malloc_default_purgeable_zone() at
+ * run time.
+ */
+ purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
+ malloc_default_purgeable_zone();
+
+ /* Register the custom zone. At this point it won't be the default. */
+ zone_init();
+ malloc_zone_register(&jemalloc_zone);
+
+ /* Promote the custom zone to be default. */
+ zone_promote();
+}